mirror of https://github.com/apache/cloudstack.git
Merge branch '4.22'
This commit is contained in:
commit
2d6280b9da
|
|
@ -460,3 +460,16 @@ iscsi.session.cleanup.enabled=false
|
|||
|
||||
# Time, in seconds, to wait before retrying to rebase during the incremental snapshot process.
|
||||
# incremental.snapshot.retry.rebase.wait=60
|
||||
|
||||
# Path to the VDDK library directory for VMware to KVM conversion via VDDK,
|
||||
# passed to virt-v2v as -io vddk-libdir=<path>
|
||||
#vddk.lib.dir=
|
||||
|
||||
# Ordered VDDK transport preference for VMware to KVM conversion via VDDK, passed as
|
||||
# -io vddk-transports=<value> to virt-v2v. Example: nbd:nbdssl
|
||||
#vddk.transports=
|
||||
|
||||
# Optional vCenter SHA1 thumbprint for VMware to KVM conversion via VDDK, passed as
|
||||
# -io vddk-thumbprint=<value>. If unset, CloudStack computes it on the KVM host via openssl.
|
||||
#vddk.thumbprint=
|
||||
|
||||
|
|
|
|||
|
|
@ -808,6 +808,30 @@ public class AgentProperties{
|
|||
*/
|
||||
public static final Property<String> CONVERT_ENV_VIRTV2V_TMPDIR = new Property<>("convert.instance.env.virtv2v.tmpdir", null, String.class);
|
||||
|
||||
/**
|
||||
* Path to the VDDK library directory on the KVM conversion host, used when converting VMs from VMware to KVM via VDDK.
|
||||
* This directory is passed to virt-v2v as <code>-io vddk-libdir=<path></code>.
|
||||
* Data type: String.<br>
|
||||
* Default value: <code>null</code>
|
||||
*/
|
||||
public static final Property<String> VDDK_LIB_DIR = new Property<>("vddk.lib.dir", null, String.class);
|
||||
|
||||
/**
|
||||
* Ordered list of VDDK transports for virt-v2v, passed as <code>-io vddk-transports=<value></code>.
|
||||
* Example: <code>nbd:nbdssl</code>.
|
||||
* Data type: String.<br>
|
||||
* Default value: <code>null</code>
|
||||
*/
|
||||
public static final Property<String> VDDK_TRANSPORTS = new Property<>("vddk.transports", null, String.class);
|
||||
|
||||
/**
|
||||
* vCenter TLS certificate thumbprint used by virt-v2v VDDK mode, passed as <code>-io vddk-thumbprint=<value></code>.
|
||||
* If unset, the KVM host computes it at runtime from the vCenter endpoint.
|
||||
* Data type: String.<br>
|
||||
* Default value: <code>null</code>
|
||||
*/
|
||||
public static final Property<String> VDDK_THUMBPRINT = new Property<>("vddk.thumbprint", null, String.class);
|
||||
|
||||
/**
|
||||
* BGP controll CIDR
|
||||
* Data type: String.<br>
|
||||
|
|
|
|||
|
|
@ -36,13 +36,17 @@ public class RemoteInstanceTO implements Serializable {
|
|||
private String vcenterPassword;
|
||||
private String vcenterHost;
|
||||
private String datacenterName;
|
||||
private String clusterName;
|
||||
private String hostName;
|
||||
|
||||
public RemoteInstanceTO() {
|
||||
}
|
||||
|
||||
public RemoteInstanceTO(String instanceName) {
|
||||
public RemoteInstanceTO(String instanceName, String clusterName, String hostName) {
|
||||
this.hypervisorType = Hypervisor.HypervisorType.VMware;
|
||||
this.instanceName = instanceName;
|
||||
this.clusterName = clusterName;
|
||||
this.hostName = hostName;
|
||||
}
|
||||
|
||||
public RemoteInstanceTO(String instanceName, String instancePath, String vcenterHost, String vcenterUsername, String vcenterPassword, String datacenterName) {
|
||||
|
|
@ -55,6 +59,12 @@ public class RemoteInstanceTO implements Serializable {
|
|||
this.datacenterName = datacenterName;
|
||||
}
|
||||
|
||||
public RemoteInstanceTO(String instanceName, String instancePath, String vcenterHost, String vcenterUsername, String vcenterPassword, String datacenterName, String clusterName, String hostName) {
|
||||
this(instanceName, instancePath, vcenterHost, vcenterUsername, vcenterPassword, datacenterName);
|
||||
this.clusterName = clusterName;
|
||||
this.hostName = hostName;
|
||||
}
|
||||
|
||||
public Hypervisor.HypervisorType getHypervisorType() {
|
||||
return this.hypervisorType;
|
||||
}
|
||||
|
|
@ -82,4 +92,12 @@ public class RemoteInstanceTO implements Serializable {
|
|||
public String getDatacenterName() {
|
||||
return datacenterName;
|
||||
}
|
||||
|
||||
public String getClusterName() {
|
||||
return clusterName;
|
||||
}
|
||||
|
||||
public String getHostName() {
|
||||
return hostName;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,6 +57,9 @@ public interface Host extends StateObject<Status>, Identity, Partition, HAResour
|
|||
String HOST_UEFI_ENABLE = "host.uefi.enable";
|
||||
String HOST_VOLUME_ENCRYPTION = "host.volume.encryption";
|
||||
String HOST_INSTANCE_CONVERSION = "host.instance.conversion";
|
||||
String HOST_VDDK_SUPPORT = "host.vddk.support";
|
||||
String HOST_VDDK_LIB_DIR = "vddk.lib.dir";
|
||||
String HOST_VDDK_VERSION = "host.vddk.version";
|
||||
String HOST_OVFTOOL_VERSION = "host.ovftool.version";
|
||||
String HOST_VIRTV2V_VERSION = "host.virtv2v.version";
|
||||
String HOST_SSH_PORT = "host.ssh.port";
|
||||
|
|
|
|||
|
|
@ -526,7 +526,6 @@ public class ApiConstants {
|
|||
public static final String SCHEDULE = "schedule";
|
||||
public static final String SCHEDULE_ID = "scheduleid";
|
||||
public static final String SCOPE = "scope";
|
||||
public static final String USER_SECRET_KEY = "usersecretkey";
|
||||
public static final String SEARCH_BASE = "searchbase";
|
||||
public static final String SECONDARY_IP = "secondaryip";
|
||||
public static final String SECURITY_GROUP_IDS = "securitygroupids";
|
||||
|
|
@ -630,6 +629,8 @@ public class ApiConstants {
|
|||
public static final String USERNAME = "username";
|
||||
public static final String USER_CONFIGURABLE = "userconfigurable";
|
||||
public static final String USER_SECURITY_GROUP_LIST = "usersecuritygrouplist";
|
||||
public static final String USER_SECRET_KEY = "usersecretkey";
|
||||
public static final String USE_VDDK = "usevddk";
|
||||
public static final String USE_VIRTUAL_NETWORK = "usevirtualnetwork";
|
||||
public static final String USE_VIRTUAL_ROUTER_IP_RESOLVER = "userouteripresolver";
|
||||
public static final String UPDATE_IN_SEQUENCE = "updateinsequence";
|
||||
|
|
|
|||
|
|
@ -179,6 +179,14 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd {
|
|||
description = "(only for importing VMs from VMware to KVM) optional - the ID of the guest OS for the imported VM.")
|
||||
private Long guestOsId;
|
||||
|
||||
@Parameter(name = ApiConstants.USE_VDDK,
|
||||
type = CommandType.BOOLEAN,
|
||||
since = "4.22.1",
|
||||
description = "(only for importing VMs from VMware to KVM) optional - if true, uses VDDK on the KVM conversion host for converting the VM. " +
|
||||
"This parameter is mutually exclusive with " + ApiConstants.FORCE_MS_TO_IMPORT_VM_FILES + ".")
|
||||
private Boolean useVddk;
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
/////////////////// Accessors ///////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
@ -255,6 +263,10 @@ public class ImportVmCmd extends ImportUnmanagedInstanceCmd {
|
|||
return storagePoolId;
|
||||
}
|
||||
|
||||
public boolean getUseVddk() {
|
||||
return BooleanUtils.toBooleanDefaultIfNull(useVddk, true);
|
||||
}
|
||||
|
||||
public String getTmpPath() {
|
||||
return tmpPath;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
package org.apache.cloudstack.api.command.user.bucket;
|
||||
|
||||
import com.cloud.exception.ConcurrentOperationException;
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import org.apache.cloudstack.acl.RoleType;
|
||||
import org.apache.cloudstack.storage.object.Bucket;
|
||||
import com.cloud.user.Account;
|
||||
|
|
@ -82,7 +83,7 @@ public class DeleteBucketCmd extends BaseCmd {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void execute() throws ConcurrentOperationException {
|
||||
public void execute() throws ConcurrentOperationException, ResourceAllocationException {
|
||||
CallContext.current().setEventDetails("Bucket ID: " + getResourceUuid(ApiConstants.ID));
|
||||
boolean result = _bucketService.deleteBucket(id, CallContext.current().getCallingAccount());
|
||||
SuccessResponse response = new SuccessResponse(getCommandName());
|
||||
|
|
|
|||
|
|
@ -235,7 +235,7 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer
|
|||
* @param forced Indicates if backup will be force removed or not
|
||||
* @return returns operation success
|
||||
*/
|
||||
boolean deleteBackup(final Long backupId, final Boolean forced);
|
||||
boolean deleteBackup(final Long backupId, final Boolean forced) throws ResourceAllocationException;
|
||||
|
||||
void validateBackupForZone(Long zoneId);
|
||||
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@ public interface BucketApiService {
|
|||
*/
|
||||
Bucket createBucket(CreateBucketCmd cmd);
|
||||
|
||||
boolean deleteBucket(long bucketId, Account caller);
|
||||
boolean deleteBucket(long bucketId, Account caller) throws ResourceAllocationException;
|
||||
|
||||
boolean updateBucket(UpdateBucketCmd cmd, Account caller) throws ResourceAllocationException;
|
||||
|
||||
|
|
|
|||
|
|
@ -18,6 +18,8 @@ package com.cloud.agent.api;
|
|||
|
||||
public class CheckConvertInstanceCommand extends Command {
|
||||
boolean checkWindowsGuestConversionSupport = false;
|
||||
boolean useVddk = false;
|
||||
String vddkLibDir;
|
||||
|
||||
public CheckConvertInstanceCommand() {
|
||||
}
|
||||
|
|
@ -26,6 +28,11 @@ public class CheckConvertInstanceCommand extends Command {
|
|||
this.checkWindowsGuestConversionSupport = checkWindowsGuestConversionSupport;
|
||||
}
|
||||
|
||||
public CheckConvertInstanceCommand(boolean checkWindowsGuestConversionSupport, boolean useVddk) {
|
||||
this.checkWindowsGuestConversionSupport = checkWindowsGuestConversionSupport;
|
||||
this.useVddk = useVddk;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return false;
|
||||
|
|
@ -34,4 +41,20 @@ public class CheckConvertInstanceCommand extends Command {
|
|||
public boolean getCheckWindowsGuestConversionSupport() {
|
||||
return checkWindowsGuestConversionSupport;
|
||||
}
|
||||
|
||||
public boolean isUseVddk() {
|
||||
return useVddk;
|
||||
}
|
||||
|
||||
public void setUseVddk(boolean useVddk) {
|
||||
this.useVddk = useVddk;
|
||||
}
|
||||
|
||||
public String getVddkLibDir() {
|
||||
return vddkLibDir;
|
||||
}
|
||||
|
||||
public void setVddkLibDir(String vddkLibDir) {
|
||||
this.vddkLibDir = vddkLibDir;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,6 +31,10 @@ public class ConvertInstanceCommand extends Command {
|
|||
private boolean exportOvfToConversionLocation;
|
||||
private int threadsCountToExportOvf = 0;
|
||||
private String extraParams;
|
||||
private boolean useVddk;
|
||||
private String vddkLibDir;
|
||||
private String vddkTransports;
|
||||
private String vddkThumbprint;
|
||||
|
||||
public ConvertInstanceCommand() {
|
||||
}
|
||||
|
|
@ -90,6 +94,38 @@ public class ConvertInstanceCommand extends Command {
|
|||
this.extraParams = extraParams;
|
||||
}
|
||||
|
||||
public boolean isUseVddk() {
|
||||
return useVddk;
|
||||
}
|
||||
|
||||
public void setUseVddk(boolean useVddk) {
|
||||
this.useVddk = useVddk;
|
||||
}
|
||||
|
||||
public String getVddkLibDir() {
|
||||
return vddkLibDir;
|
||||
}
|
||||
|
||||
public void setVddkLibDir(String vddkLibDir) {
|
||||
this.vddkLibDir = vddkLibDir;
|
||||
}
|
||||
|
||||
public String getVddkTransports() {
|
||||
return vddkTransports;
|
||||
}
|
||||
|
||||
public void setVddkTransports(String vddkTransports) {
|
||||
this.vddkTransports = vddkTransports;
|
||||
}
|
||||
|
||||
public String getVddkThumbprint() {
|
||||
return vddkThumbprint;
|
||||
}
|
||||
|
||||
public void setVddkThumbprint(String vddkThumbprint) {
|
||||
this.vddkThumbprint = vddkThumbprint;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -24,6 +24,8 @@ import com.cloud.resource.ResourceState;
|
|||
public class PropagateResourceEventCommand extends Command {
|
||||
long hostId;
|
||||
ResourceState.Event event;
|
||||
boolean forced;
|
||||
boolean forceDeleteStorage;
|
||||
|
||||
protected PropagateResourceEventCommand() {
|
||||
|
||||
|
|
@ -34,6 +36,13 @@ public class PropagateResourceEventCommand extends Command {
|
|||
this.event = event;
|
||||
}
|
||||
|
||||
public PropagateResourceEventCommand(long hostId, ResourceState.Event event, boolean forced, boolean forceDeleteStorage) {
|
||||
this.hostId = hostId;
|
||||
this.event = event;
|
||||
this.forced = forced;
|
||||
this.forceDeleteStorage = forceDeleteStorage;
|
||||
}
|
||||
|
||||
public long getHostId() {
|
||||
return hostId;
|
||||
}
|
||||
|
|
@ -42,6 +51,14 @@ public class PropagateResourceEventCommand extends Command {
|
|||
return event;
|
||||
}
|
||||
|
||||
public boolean isForced() {
|
||||
return forced;
|
||||
}
|
||||
|
||||
public boolean isForceDeleteStorage() {
|
||||
return forceDeleteStorage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
// TODO Auto-generated method stub
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ public class LoadBalancerConfigCommand extends NetworkElementCommand {
|
|||
public String lbStatsAuth = "admin1:AdMiN123";
|
||||
public String lbStatsUri = "/admin?stats";
|
||||
public String maxconn = "";
|
||||
public Long idleTimeout = 50000L; /* 0=infinite, >0 = timeout in milliseconds */
|
||||
public String lbProtocol;
|
||||
public boolean keepAliveEnabled = false;
|
||||
NicTO nic;
|
||||
|
|
@ -50,7 +51,7 @@ public class LoadBalancerConfigCommand extends NetworkElementCommand {
|
|||
}
|
||||
|
||||
public LoadBalancerConfigCommand(LoadBalancerTO[] loadBalancers, String publicIp, String guestIp, String privateIp, NicTO nic, Long vpcId, String maxconn,
|
||||
boolean keepAliveEnabled) {
|
||||
boolean keepAliveEnabled, Long idleTimeout) {
|
||||
this.loadBalancers = loadBalancers;
|
||||
this.lbStatsPublicIP = publicIp;
|
||||
this.lbStatsPrivateIP = privateIp;
|
||||
|
|
@ -59,6 +60,7 @@ public class LoadBalancerConfigCommand extends NetworkElementCommand {
|
|||
this.vpcId = vpcId;
|
||||
this.maxconn = maxconn;
|
||||
this.keepAliveEnabled = keepAliveEnabled;
|
||||
this.idleTimeout = idleTimeout;
|
||||
}
|
||||
|
||||
public NicTO getNic() {
|
||||
|
|
|
|||
|
|
@ -635,6 +635,19 @@ public class HAProxyConfigurator implements LoadBalancerConfigurator {
|
|||
if (lbCmd.keepAliveEnabled) {
|
||||
dSection.set(7, "\tno option httpclose");
|
||||
}
|
||||
if (lbCmd.idleTimeout > 0) {
|
||||
dSection.set(9, "\ttimeout client " + Long.toString(lbCmd.idleTimeout));
|
||||
dSection.set(10, "\ttimeout server " + Long.toString(lbCmd.idleTimeout));
|
||||
} else if (lbCmd.idleTimeout == 0) {
|
||||
// .remove() is not allowed, only .set() operations are allowed as the list
|
||||
// is a fixed size. So lets just mark the entry as blank.
|
||||
dSection.set(9, "");
|
||||
dSection.set(10, "");
|
||||
} else {
|
||||
// Negative idleTimeout values are considered invalid; retain the
|
||||
// default HAProxy timeout values from defaultsSection for predictability.
|
||||
logger.warn("Negative idleTimeout ({}) configured; retaining default HAProxy timeouts.", lbCmd.idleTimeout);
|
||||
}
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
for (final String s : dSection) {
|
||||
|
|
|
|||
|
|
@ -235,7 +235,7 @@ public class ConfigHelperTest {
|
|||
lbs.toArray(arrayLbs);
|
||||
|
||||
final NicTO nic = new NicTO();
|
||||
final LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(arrayLbs, "64.10.2.10", "10.1.10.2", "192.168.1.2", nic, null, "1000", false);
|
||||
final LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(arrayLbs, "64.10.2.10", "10.1.10.2", "192.168.1.2", nic, null, "1000", false, 0L);
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, "10.1.10.2");
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, ROUTERNAME);
|
||||
|
||||
|
|
|
|||
|
|
@ -779,7 +779,7 @@ public class VirtualRoutingResourceTest implements VirtualRouterDeployer {
|
|||
final LoadBalancerTO[] arrayLbs = new LoadBalancerTO[lbs.size()];
|
||||
lbs.toArray(arrayLbs);
|
||||
final NicTO nic = new NicTO();
|
||||
final LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(arrayLbs, "64.10.2.10", "10.1.10.2", "192.168.1.2", nic, null, "1000", false);
|
||||
final LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(arrayLbs, "64.10.2.10", "10.1.10.2", "192.168.1.2", nic, null, "1000", false, 50000L);
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, "10.1.10.2");
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, ROUTERNAME);
|
||||
return cmd;
|
||||
|
|
@ -795,7 +795,7 @@ public class VirtualRoutingResourceTest implements VirtualRouterDeployer {
|
|||
lbs.toArray(arrayLbs);
|
||||
final NicTO nic = new NicTO();
|
||||
nic.setIp("10.1.10.2");
|
||||
final LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(arrayLbs, "64.10.2.10", "10.1.10.2", "192.168.1.2", nic, Long.valueOf(1), "1000", false);
|
||||
final LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(arrayLbs, "64.10.2.10", "10.1.10.2", "192.168.1.2", nic, Long.valueOf(1), "1000", false, 50000L);
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, "10.1.10.2");
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, ROUTERNAME);
|
||||
return cmd;
|
||||
|
|
|
|||
|
|
@ -79,13 +79,14 @@ public class HAProxyConfiguratorTest {
|
|||
LoadBalancerTO[] lba = new LoadBalancerTO[1];
|
||||
lba[0] = lb;
|
||||
HAProxyConfigurator hpg = new HAProxyConfigurator();
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false);
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false, 0L);
|
||||
String result = genConfig(hpg, cmd);
|
||||
assertTrue("keepalive disabled should result in 'option httpclose' in the resulting haproxy config", result.contains("\toption httpclose"));
|
||||
|
||||
cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "4", true);
|
||||
cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "4", true, 0L);
|
||||
result = genConfig(hpg, cmd);
|
||||
assertTrue("keepalive enabled should result in 'no option httpclose' in the resulting haproxy config", result.contains("\tno option httpclose"));
|
||||
|
||||
// TODO
|
||||
// create lb command
|
||||
// setup tests for
|
||||
|
|
@ -93,6 +94,27 @@ public class HAProxyConfiguratorTest {
|
|||
// httpmode
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link com.cloud.network.HAProxyConfigurator#generateConfiguration(com.cloud.agent.api.routing.LoadBalancerConfigCommand)}.
|
||||
*/
|
||||
@Test
|
||||
public void testGenerateConfigurationLoadBalancerIdleTimeoutConfigCommand() {
|
||||
LoadBalancerTO lb = new LoadBalancerTO("1", "10.2.0.1", 80, "http", "bla", false, false, false, null);
|
||||
LoadBalancerTO[] lba = new LoadBalancerTO[1];
|
||||
lba[0] = lb;
|
||||
HAProxyConfigurator hpg = new HAProxyConfigurator();
|
||||
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "4", true, 0L);
|
||||
String result = genConfig(hpg, cmd);
|
||||
assertTrue("idleTimeout of 0 should not generate 'timeout server' in the resulting haproxy config", !result.contains("\ttimeout server"));
|
||||
assertTrue("idleTimeout of 0 should not generate 'timeout client' in the resulting haproxy config", !result.contains("\ttimeout client"));
|
||||
|
||||
cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "4", true, 1234L);
|
||||
result = genConfig(hpg, cmd);
|
||||
assertTrue("idleTimeout of 1234 should result in 'timeout server 1234' in the resulting haproxy config", result.contains("\ttimeout server 1234"));
|
||||
assertTrue("idleTimeout of 1234 should result in 'timeout client 1234' in the resulting haproxy config", result.contains("\ttimeout client 1234"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link com.cloud.network.HAProxyConfigurator#generateConfiguration(com.cloud.agent.api.routing.LoadBalancerConfigCommand)}.
|
||||
*/
|
||||
|
|
@ -106,7 +128,7 @@ public class HAProxyConfiguratorTest {
|
|||
LoadBalancerTO[] lba = new LoadBalancerTO[1];
|
||||
lba[0] = lb;
|
||||
HAProxyConfigurator hpg = new HAProxyConfigurator();
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false);
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false, 0L);
|
||||
String result = genConfig(hpg, cmd);
|
||||
assertTrue("'send-proxy' should result if protocol is 'tcp-proxy'", result.contains("send-proxy"));
|
||||
}
|
||||
|
|
@ -118,7 +140,7 @@ public class HAProxyConfiguratorTest {
|
|||
LoadBalancerTO[] lba = new LoadBalancerTO[1];
|
||||
lba[0] = lb;
|
||||
HAProxyConfigurator hpg = new HAProxyConfigurator();
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false);
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false, 0L);
|
||||
String result = genConfig(hpg, cmd);
|
||||
Assert.assertTrue(result.contains("acl network_allowed src 1.1.1.1 2.2.2.2/24 \n\ttcp-request connection reject if !network_allowed"));
|
||||
}
|
||||
|
|
@ -131,7 +153,7 @@ public class HAProxyConfiguratorTest {
|
|||
LoadBalancerTO[] lba = new LoadBalancerTO[1];
|
||||
lba[0] = lb;
|
||||
HAProxyConfigurator hpg = new HAProxyConfigurator();
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false);
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false, 0L);
|
||||
String result = genConfig(hpg, cmd);
|
||||
Assert.assertTrue(result.contains("bind 10.2.0.1:443 ssl crt /etc/cloudstack/ssl/10_2_0_1-443.pem"));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -122,6 +122,14 @@ public interface NetworkOrchestrationService {
|
|||
"Load Balancer(haproxy) maximum number of concurrent connections(global max)",
|
||||
true,
|
||||
Scope.Global);
|
||||
ConfigKey<Long> NETWORK_LB_HAPROXY_IDLE_TIMEOUT = new ConfigKey<>(
|
||||
"Network",
|
||||
Long.class,
|
||||
"network.loadbalancer.haproxy.idle.timeout",
|
||||
"50000",
|
||||
"Load Balancer(haproxy) idle timeout in milliseconds. Use 0 for infinite.",
|
||||
true,
|
||||
Scope.Global);
|
||||
|
||||
List<? extends Network> setupNetwork(Account owner, NetworkOffering offering, DeploymentPlan plan, String name, String displayText, boolean isDefault)
|
||||
throws ConcurrentOperationException;
|
||||
|
|
|
|||
|
|
@ -122,6 +122,8 @@ public interface ResourceManager extends ResourceService, Configurable {
|
|||
|
||||
public boolean executeUserRequest(long hostId, ResourceState.Event event) throws AgentUnavailableException;
|
||||
|
||||
boolean executeUserRequest(long hostId, ResourceState.Event event, boolean isForced, boolean isForceDeleteStorage) throws AgentUnavailableException;
|
||||
|
||||
boolean resourceStateTransitTo(Host host, Event event, long msId) throws NoTransitionException;
|
||||
|
||||
boolean umanageHost(long hostId);
|
||||
|
|
|
|||
|
|
@ -805,8 +805,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||
String uefiEnabled = detailsMap.get(Host.HOST_UEFI_ENABLE);
|
||||
String virtv2vVersion = detailsMap.get(Host.HOST_VIRTV2V_VERSION);
|
||||
String ovftoolVersion = detailsMap.get(Host.HOST_OVFTOOL_VERSION);
|
||||
String vddkSupport = detailsMap.get(Host.HOST_VDDK_SUPPORT);
|
||||
String vddkLibDir = detailsMap.get(Host.HOST_VDDK_LIB_DIR);
|
||||
String vddkVersion = detailsMap.get(Host.HOST_VDDK_VERSION);
|
||||
logger.debug("Got HOST_UEFI_ENABLE [{}] for host [{}]:", uefiEnabled, host);
|
||||
if (ObjectUtils.anyNotNull(uefiEnabled, virtv2vVersion, ovftoolVersion)) {
|
||||
if (ObjectUtils.anyNotNull(uefiEnabled, virtv2vVersion, ovftoolVersion, vddkSupport, vddkLibDir, vddkVersion)) {
|
||||
_hostDao.loadDetails(host);
|
||||
boolean updateNeeded = false;
|
||||
if (StringUtils.isNotBlank(uefiEnabled) && !uefiEnabled.equals(host.getDetails().get(Host.HOST_UEFI_ENABLE))) {
|
||||
|
|
@ -821,6 +824,26 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||
host.getDetails().put(Host.HOST_OVFTOOL_VERSION, ovftoolVersion);
|
||||
updateNeeded = true;
|
||||
}
|
||||
if (StringUtils.isNotBlank(vddkSupport) && !vddkSupport.equals(host.getDetails().get(Host.HOST_VDDK_SUPPORT))) {
|
||||
host.getDetails().put(Host.HOST_VDDK_SUPPORT, vddkSupport);
|
||||
updateNeeded = true;
|
||||
}
|
||||
if (!StringUtils.defaultString(vddkLibDir).equals(StringUtils.defaultString(host.getDetails().get(Host.HOST_VDDK_LIB_DIR)))) {
|
||||
if (StringUtils.isBlank(vddkLibDir)) {
|
||||
host.getDetails().remove(Host.HOST_VDDK_LIB_DIR);
|
||||
} else {
|
||||
host.getDetails().put(Host.HOST_VDDK_LIB_DIR, vddkLibDir);
|
||||
}
|
||||
updateNeeded = true;
|
||||
}
|
||||
if (!StringUtils.defaultString(vddkVersion).equals(StringUtils.defaultString(host.getDetails().get(Host.HOST_VDDK_VERSION)))) {
|
||||
if (StringUtils.isBlank(vddkVersion)) {
|
||||
host.getDetails().remove(Host.HOST_VDDK_VERSION);
|
||||
} else {
|
||||
host.getDetails().put(Host.HOST_VDDK_VERSION, vddkVersion);
|
||||
}
|
||||
updateNeeded = true;
|
||||
}
|
||||
if (updateNeeded) {
|
||||
_hostDao.saveDetails(host);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1306,11 +1306,20 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust
|
|||
|
||||
boolean result;
|
||||
try {
|
||||
result = _resourceMgr.executeUserRequest(cmd.getHostId(), cmd.getEvent());
|
||||
result = _resourceMgr.executeUserRequest(cmd.getHostId(), cmd.getEvent(), cmd.isForced(), cmd.isForceDeleteStorage());
|
||||
logger.debug("Result is {}", result);
|
||||
} catch (final AgentUnavailableException ex) {
|
||||
logger.warn("Agent is unavailable", ex);
|
||||
return null;
|
||||
} catch (final RuntimeException ex) {
|
||||
logger.error(String.format("Failed to execute propagated event %s for host %d", cmd.getEvent().name(), cmd.getHostId()), ex);
|
||||
final Answer[] answers = new Answer[1];
|
||||
String details = ex.getMessage();
|
||||
if (details == null || details.isEmpty()) {
|
||||
details = ex.toString();
|
||||
}
|
||||
answers[0] = new Answer(cmd, false, details);
|
||||
return _gson.toJson(answers);
|
||||
}
|
||||
|
||||
final Answer[] answers = new Answer[1];
|
||||
|
|
|
|||
|
|
@ -2776,219 +2776,218 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||
boolean ipv6 = false;
|
||||
|
||||
try (CheckedReservation networkReservation = new CheckedReservation(owner, domainId, Resource.ResourceType.network, null, null, 1L, reservationDao, _resourceLimitMgr)) {
|
||||
|
||||
if (StringUtils.isNoneBlank(ip6Gateway, ip6Cidr)) {
|
||||
ipv6 = true;
|
||||
}
|
||||
// Validate zone
|
||||
if (zone.getNetworkType() == NetworkType.Basic) {
|
||||
// In Basic zone the network should have aclType=Domain, domainId=1, subdomainAccess=true
|
||||
if (aclType == null || aclType != ACLType.Domain) {
|
||||
throw new InvalidParameterValueException("Only AclType=Domain can be specified for network creation in Basic zone");
|
||||
if (StringUtils.isNoneBlank(ip6Gateway, ip6Cidr)) {
|
||||
ipv6 = true;
|
||||
}
|
||||
// Validate zone
|
||||
if (zone.getNetworkType() == NetworkType.Basic) {
|
||||
// In Basic zone the network should have aclType=Domain, domainId=1, subdomainAccess=true
|
||||
if (aclType == null || aclType != ACLType.Domain) {
|
||||
throw new InvalidParameterValueException("Only AclType=Domain can be specified for network creation in Basic zone");
|
||||
}
|
||||
|
||||
// Only one guest network is supported in Basic zone
|
||||
final List<NetworkVO> guestNetworks = _networksDao.listByZoneAndTrafficType(zone.getId(), TrafficType.Guest);
|
||||
if (!guestNetworks.isEmpty()) {
|
||||
throw new InvalidParameterValueException("Can't have more than one Guest network in zone with network type " + NetworkType.Basic);
|
||||
}
|
||||
// Only one guest network is supported in Basic zone
|
||||
final List<NetworkVO> guestNetworks = _networksDao.listByZoneAndTrafficType(zone.getId(), TrafficType.Guest);
|
||||
if (!guestNetworks.isEmpty()) {
|
||||
throw new InvalidParameterValueException("Can't have more than one Guest network in zone with network type " + NetworkType.Basic);
|
||||
}
|
||||
|
||||
// if zone is basic, only Shared network offerings w/o source nat service are allowed
|
||||
if (!(ntwkOff.getGuestType() == GuestType.Shared && !_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.SourceNat))) {
|
||||
throw new InvalidParameterValueException("For zone of type " + NetworkType.Basic + " only offerings of " + "guestType " + GuestType.Shared + " with disabled "
|
||||
+ Service.SourceNat.getName() + " service are allowed");
|
||||
}
|
||||
// if zone is basic, only Shared network offerings w/o source nat service are allowed
|
||||
if (!(ntwkOff.getGuestType() == GuestType.Shared && !_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.SourceNat))) {
|
||||
throw new InvalidParameterValueException("For zone of type " + NetworkType.Basic + " only offerings of " + "guestType " + GuestType.Shared + " with disabled "
|
||||
+ Service.SourceNat.getName() + " service are allowed");
|
||||
}
|
||||
|
||||
if (domainId == null || domainId != Domain.ROOT_DOMAIN) {
|
||||
throw new InvalidParameterValueException("Guest network in Basic zone should be dedicated to ROOT domain");
|
||||
}
|
||||
if (domainId == null || domainId != Domain.ROOT_DOMAIN) {
|
||||
throw new InvalidParameterValueException("Guest network in Basic zone should be dedicated to ROOT domain");
|
||||
}
|
||||
|
||||
if (subdomainAccess == null) {
|
||||
subdomainAccess = true;
|
||||
} else if (!subdomainAccess) {
|
||||
throw new InvalidParameterValueException("Subdomain access should be set to true for the" + " guest network in the Basic zone");
|
||||
}
|
||||
if (subdomainAccess == null) {
|
||||
subdomainAccess = true;
|
||||
} else if (!subdomainAccess) {
|
||||
throw new InvalidParameterValueException("Subdomain access should be set to true for the" + " guest network in the Basic zone");
|
||||
}
|
||||
|
||||
if (vlanId == null) {
|
||||
vlanId = Vlan.UNTAGGED;
|
||||
} else {
|
||||
if (!vlanId.equalsIgnoreCase(Vlan.UNTAGGED)) {
|
||||
throw new InvalidParameterValueException("Only vlan " + Vlan.UNTAGGED + " can be created in " + "the zone of type " + NetworkType.Basic);
|
||||
if (vlanId == null) {
|
||||
vlanId = Vlan.UNTAGGED;
|
||||
} else {
|
||||
if (!vlanId.equalsIgnoreCase(Vlan.UNTAGGED)) {
|
||||
throw new InvalidParameterValueException("Only vlan " + Vlan.UNTAGGED + " can be created in " + "the zone of type " + NetworkType.Basic);
|
||||
}
|
||||
}
|
||||
|
||||
} else if (zone.getNetworkType() == NetworkType.Advanced) {
|
||||
if (zone.isSecurityGroupEnabled()) {
|
||||
if (isolatedPvlan != null) {
|
||||
throw new InvalidParameterValueException("Isolated Private VLAN is not supported with security group!");
|
||||
}
|
||||
// Only Account specific Isolated network with sourceNat service disabled are allowed in security group
|
||||
// enabled zone
|
||||
if ((ntwkOff.getGuestType() != GuestType.Shared) && (ntwkOff.getGuestType() != GuestType.L2)) {
|
||||
throw new InvalidParameterValueException("Only shared or L2 guest network can be created in security group enabled zone");
|
||||
}
|
||||
if (_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.SourceNat)) {
|
||||
throw new InvalidParameterValueException("Service SourceNat is not allowed in security group enabled zone");
|
||||
}
|
||||
}
|
||||
|
||||
//don't allow eip/elb networks in Advance zone
|
||||
if (ntwkOff.isElasticIp() || ntwkOff.isElasticLb()) {
|
||||
throw new InvalidParameterValueException("Elastic IP and Elastic LB services are supported in zone of type " + NetworkType.Basic);
|
||||
}
|
||||
}
|
||||
|
||||
} else if (zone.getNetworkType() == NetworkType.Advanced) {
|
||||
if (zone.isSecurityGroupEnabled()) {
|
||||
if (isolatedPvlan != null) {
|
||||
throw new InvalidParameterValueException("Isolated Private VLAN is not supported with security group!");
|
||||
}
|
||||
// Only Account specific Isolated network with sourceNat service disabled are allowed in security group
|
||||
// enabled zone
|
||||
if ((ntwkOff.getGuestType() != GuestType.Shared) && (ntwkOff.getGuestType() != GuestType.L2)) {
|
||||
throw new InvalidParameterValueException("Only shared or L2 guest network can be created in security group enabled zone");
|
||||
}
|
||||
if (_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.SourceNat)) {
|
||||
throw new InvalidParameterValueException("Service SourceNat is not allowed in security group enabled zone");
|
||||
if (ipv6 && !GuestType.Shared.equals(ntwkOff.getGuestType())) {
|
||||
_networkModel.checkIp6CidrSizeEqualTo64(ip6Cidr);
|
||||
}
|
||||
|
||||
//TODO(VXLAN): Support VNI specified
|
||||
// VlanId can be specified only when network offering supports it
|
||||
final boolean vlanSpecified = vlanId != null;
|
||||
if (vlanSpecified != ntwkOff.isSpecifyVlan()) {
|
||||
if (vlanSpecified) {
|
||||
if (!isSharedNetworkWithoutSpecifyVlan(ntwkOff) && !isPrivateGatewayWithoutSpecifyVlan(ntwkOff)) {
|
||||
throw new InvalidParameterValueException("Can't specify vlan; corresponding offering says specifyVlan=false");
|
||||
}
|
||||
} else {
|
||||
throw new InvalidParameterValueException("Vlan has to be specified; corresponding offering says specifyVlan=true");
|
||||
}
|
||||
}
|
||||
|
||||
//don't allow eip/elb networks in Advance zone
|
||||
if (ntwkOff.isElasticIp() || ntwkOff.isElasticLb()) {
|
||||
throw new InvalidParameterValueException("Elastic IP and Elastic LB services are supported in zone of type " + NetworkType.Basic);
|
||||
}
|
||||
}
|
||||
|
||||
if (ipv6 && !GuestType.Shared.equals(ntwkOff.getGuestType())) {
|
||||
_networkModel.checkIp6CidrSizeEqualTo64(ip6Cidr);
|
||||
}
|
||||
|
||||
//TODO(VXLAN): Support VNI specified
|
||||
// VlanId can be specified only when network offering supports it
|
||||
final boolean vlanSpecified = vlanId != null;
|
||||
if (vlanSpecified != ntwkOff.isSpecifyVlan()) {
|
||||
if (vlanSpecified) {
|
||||
if (!isSharedNetworkWithoutSpecifyVlan(ntwkOff) && !isPrivateGatewayWithoutSpecifyVlan(ntwkOff)) {
|
||||
throw new InvalidParameterValueException("Can't specify vlan; corresponding offering says specifyVlan=false");
|
||||
URI uri = encodeVlanIdIntoBroadcastUri(vlanId, pNtwk);
|
||||
// Aux: generate secondary URI for secondary VLAN ID (if provided) for performing checks
|
||||
URI secondaryUri = StringUtils.isNotBlank(isolatedPvlan) ? BroadcastDomainType.fromString(isolatedPvlan) : null;
|
||||
if (isSharedNetworkWithoutSpecifyVlan(ntwkOff) || isPrivateGatewayWithoutSpecifyVlan(ntwkOff)) {
|
||||
bypassVlanOverlapCheck = true;
|
||||
}
|
||||
} else {
|
||||
throw new InvalidParameterValueException("Vlan has to be specified; corresponding offering says specifyVlan=true");
|
||||
}
|
||||
}
|
||||
|
||||
if (vlanSpecified) {
|
||||
URI uri = encodeVlanIdIntoBroadcastUri(vlanId, pNtwk);
|
||||
// Aux: generate secondary URI for secondary VLAN ID (if provided) for performing checks
|
||||
URI secondaryUri = StringUtils.isNotBlank(isolatedPvlan) ? BroadcastDomainType.fromString(isolatedPvlan) : null;
|
||||
if (isSharedNetworkWithoutSpecifyVlan(ntwkOff) || isPrivateGatewayWithoutSpecifyVlan(ntwkOff)) {
|
||||
bypassVlanOverlapCheck = true;
|
||||
}
|
||||
//don't allow to specify vlan tag used by physical network for dynamic vlan allocation
|
||||
if (!(bypassVlanOverlapCheck && (ntwkOff.getGuestType() == GuestType.Shared || isPrivateNetwork))
|
||||
&& _dcDao.findVnet(zoneId, pNtwk.getId(), BroadcastDomainType.getValue(uri)).size() > 0) {
|
||||
throw new InvalidParameterValueException("The VLAN tag to use for new guest network, " + vlanId + " is already being used for dynamic vlan allocation for the guest network in zone "
|
||||
+ zone.getName());
|
||||
}
|
||||
if (secondaryUri != null && !(bypassVlanOverlapCheck && ntwkOff.getGuestType() == GuestType.Shared) &&
|
||||
_dcDao.findVnet(zoneId, pNtwk.getId(), BroadcastDomainType.getValue(secondaryUri)).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"The VLAN tag for isolated PVLAN %s is already being used for dynamic vlan allocation for the guest network in zone %s",
|
||||
isolatedPvlan, zone));
|
||||
}
|
||||
if (!UuidUtils.isUuid(vlanId)) {
|
||||
// For Isolated and L2 networks, don't allow to create network with vlan that already exists in the zone
|
||||
if (!hasGuestBypassVlanOverlapCheck(bypassVlanOverlapCheck, ntwkOff, isPrivateNetwork)) {
|
||||
if (_networksDao.listByZoneAndUriAndGuestType(zoneId, uri.toString(), null).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"Network with vlan %s already exists or overlaps with other network vlans in zone %s",
|
||||
vlanId, zone));
|
||||
} else if (secondaryUri != null && _networksDao.listByZoneAndUriAndGuestType(zoneId, secondaryUri.toString(), null).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"Network with vlan %s already exists or overlaps with other network vlans in zone %s",
|
||||
isolatedPvlan, zone));
|
||||
} else {
|
||||
final List<DataCenterVnetVO> dcVnets = _datacenterVnetDao.findVnet(zoneId, BroadcastDomainType.getValue(uri));
|
||||
//for the network that is created as part of private gateway,
|
||||
//the vnet is not coming from the data center vnet table, so the list can be empty
|
||||
if (!dcVnets.isEmpty()) {
|
||||
final DataCenterVnetVO dcVnet = dcVnets.get(0);
|
||||
// Fail network creation if specified vlan is dedicated to a different account
|
||||
if (dcVnet.getAccountGuestVlanMapId() != null) {
|
||||
final Long accountGuestVlanMapId = dcVnet.getAccountGuestVlanMapId();
|
||||
final AccountGuestVlanMapVO map = _accountGuestVlanMapDao.findById(accountGuestVlanMapId);
|
||||
if (map.getAccountId() != owner.getAccountId()) {
|
||||
throw new InvalidParameterValueException("Vlan " + vlanId + " is dedicated to a different account");
|
||||
}
|
||||
// Fail network creation if owner has a dedicated range of vlans but the specified vlan belongs to the system pool
|
||||
} else {
|
||||
final List<AccountGuestVlanMapVO> maps = _accountGuestVlanMapDao.listAccountGuestVlanMapsByAccount(owner.getAccountId());
|
||||
if (maps != null && !maps.isEmpty()) {
|
||||
final int vnetsAllocatedToAccount = _datacenterVnetDao.countVnetsAllocatedToAccount(zoneId, owner.getAccountId());
|
||||
final int vnetsDedicatedToAccount = _datacenterVnetDao.countVnetsDedicatedToAccount(zoneId, owner.getAccountId());
|
||||
if (vnetsAllocatedToAccount < vnetsDedicatedToAccount) {
|
||||
throw new InvalidParameterValueException("Specified vlan " + vlanId + " doesn't belong" + " to the vlan range dedicated to the owner "
|
||||
+ owner.getAccountName());
|
||||
//don't allow to specify vlan tag used by physical network for dynamic vlan allocation
|
||||
if (!(bypassVlanOverlapCheck && (ntwkOff.getGuestType() == GuestType.Shared || isPrivateNetwork))
|
||||
&& _dcDao.findVnet(zoneId, pNtwk.getId(), BroadcastDomainType.getValue(uri)).size() > 0) {
|
||||
throw new InvalidParameterValueException("The VLAN tag to use for new guest network, " + vlanId + " is already being used for dynamic vlan allocation for the guest network in zone "
|
||||
+ zone.getName());
|
||||
}
|
||||
if (secondaryUri != null && !(bypassVlanOverlapCheck && ntwkOff.getGuestType() == GuestType.Shared) &&
|
||||
_dcDao.findVnet(zoneId, pNtwk.getId(), BroadcastDomainType.getValue(secondaryUri)).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"The VLAN tag for isolated PVLAN %s is already being used for dynamic vlan allocation for the guest network in zone %s",
|
||||
isolatedPvlan, zone));
|
||||
}
|
||||
if (!UuidUtils.isUuid(vlanId)) {
|
||||
// For Isolated and L2 networks, don't allow to create network with vlan that already exists in the zone
|
||||
if (!hasGuestBypassVlanOverlapCheck(bypassVlanOverlapCheck, ntwkOff, isPrivateNetwork)) {
|
||||
if (_networksDao.listByZoneAndUriAndGuestType(zoneId, uri.toString(), null).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"Network with vlan %s already exists or overlaps with other network vlans in zone %s",
|
||||
vlanId, zone));
|
||||
} else if (secondaryUri != null && _networksDao.listByZoneAndUriAndGuestType(zoneId, secondaryUri.toString(), null).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"Network with vlan %s already exists or overlaps with other network vlans in zone %s",
|
||||
isolatedPvlan, zone));
|
||||
} else {
|
||||
final List<DataCenterVnetVO> dcVnets = _datacenterVnetDao.findVnet(zoneId, BroadcastDomainType.getValue(uri));
|
||||
//for the network that is created as part of private gateway,
|
||||
//the vnet is not coming from the data center vnet table, so the list can be empty
|
||||
if (!dcVnets.isEmpty()) {
|
||||
final DataCenterVnetVO dcVnet = dcVnets.get(0);
|
||||
// Fail network creation if specified vlan is dedicated to a different account
|
||||
if (dcVnet.getAccountGuestVlanMapId() != null) {
|
||||
final Long accountGuestVlanMapId = dcVnet.getAccountGuestVlanMapId();
|
||||
final AccountGuestVlanMapVO map = _accountGuestVlanMapDao.findById(accountGuestVlanMapId);
|
||||
if (map.getAccountId() != owner.getAccountId()) {
|
||||
throw new InvalidParameterValueException("Vlan " + vlanId + " is dedicated to a different account");
|
||||
}
|
||||
// Fail network creation if owner has a dedicated range of vlans but the specified vlan belongs to the system pool
|
||||
} else {
|
||||
final List<AccountGuestVlanMapVO> maps = _accountGuestVlanMapDao.listAccountGuestVlanMapsByAccount(owner.getAccountId());
|
||||
if (maps != null && !maps.isEmpty()) {
|
||||
final int vnetsAllocatedToAccount = _datacenterVnetDao.countVnetsAllocatedToAccount(zoneId, owner.getAccountId());
|
||||
final int vnetsDedicatedToAccount = _datacenterVnetDao.countVnetsDedicatedToAccount(zoneId, owner.getAccountId());
|
||||
if (vnetsAllocatedToAccount < vnetsDedicatedToAccount) {
|
||||
throw new InvalidParameterValueException("Specified vlan " + vlanId + " doesn't belong" + " to the vlan range dedicated to the owner "
|
||||
+ owner.getAccountName());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// don't allow to creating shared network with given Vlan ID, if there already exists a isolated network or
|
||||
// shared network with same Vlan ID in the zone
|
||||
if (!bypassVlanOverlapCheck && _networksDao.listByZoneAndUriAndGuestType(zoneId, uri.toString(), GuestType.Isolated).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"There is an existing isolated/shared network that overlaps with vlan id:%s in zone %s", vlanId, zone));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// don't allow to creating shared network with given Vlan ID, if there already exists a isolated network or
|
||||
// shared network with same Vlan ID in the zone
|
||||
if (!bypassVlanOverlapCheck && _networksDao.listByZoneAndUriAndGuestType(zoneId, uri.toString(), GuestType.Isolated).size() > 0) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// If networkDomain is not specified, take it from the global configuration
|
||||
if (_networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.Dns)) {
|
||||
final Map<Network.Capability, String> dnsCapabilities = _networkModel.getNetworkOfferingServiceCapabilities(_entityMgr.findById(NetworkOffering.class, networkOfferingId),
|
||||
Service.Dns);
|
||||
final String isUpdateDnsSupported = dnsCapabilities.get(Capability.AllowDnsSuffixModification);
|
||||
if (isUpdateDnsSupported == null || !Boolean.valueOf(isUpdateDnsSupported)) {
|
||||
if (networkDomain != null) {
|
||||
// TBD: NetworkOfferingId and zoneId. Send uuids instead.
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"There is an existing isolated/shared network that overlaps with vlan id:%s in zone %s", vlanId, zone));
|
||||
"Domain name change is not supported by network offering id=%d in zone %s",
|
||||
networkOfferingId, zone));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// If networkDomain is not specified, take it from the global configuration
|
||||
if (_networkModel.areServicesSupportedByNetworkOffering(networkOfferingId, Service.Dns)) {
|
||||
final Map<Network.Capability, String> dnsCapabilities = _networkModel.getNetworkOfferingServiceCapabilities(_entityMgr.findById(NetworkOffering.class, networkOfferingId),
|
||||
Service.Dns);
|
||||
final String isUpdateDnsSupported = dnsCapabilities.get(Capability.AllowDnsSuffixModification);
|
||||
if (isUpdateDnsSupported == null || !Boolean.valueOf(isUpdateDnsSupported)) {
|
||||
if (networkDomain != null) {
|
||||
// TBD: NetworkOfferingId and zoneId. Send uuids instead.
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"Domain name change is not supported by network offering id=%d in zone %s",
|
||||
networkOfferingId, zone));
|
||||
}
|
||||
} else {
|
||||
if (networkDomain == null) {
|
||||
// 1) Get networkDomain from the corresponding account/domain/zone
|
||||
if (aclType == ACLType.Domain) {
|
||||
networkDomain = _networkModel.getDomainNetworkDomain(domainId, zoneId);
|
||||
} else if (aclType == ACLType.Account) {
|
||||
networkDomain = _networkModel.getAccountNetworkDomain(owner.getId(), zoneId);
|
||||
}
|
||||
|
||||
// 2) If null, generate networkDomain using domain suffix from the global config variables
|
||||
if (networkDomain == null) {
|
||||
networkDomain = "cs" + Long.toHexString(owner.getId()) + GuestDomainSuffix.valueIn(zoneId);
|
||||
}
|
||||
|
||||
} else {
|
||||
// validate network domain
|
||||
if (!NetUtils.verifyDomainName(networkDomain)) {
|
||||
throw new InvalidParameterValueException("Invalid network domain. Total length shouldn't exceed 190 chars. Each domain "
|
||||
+ "label must be between 1 and 63 characters long, can contain ASCII letters 'a' through 'z', the digits '0' through '9', "
|
||||
+ "and the hyphen ('-'); can't start or end with \"-\"");
|
||||
if (networkDomain == null) {
|
||||
// 1) Get networkDomain from the corresponding account/domain/zone
|
||||
if (aclType == ACLType.Domain) {
|
||||
networkDomain = _networkModel.getDomainNetworkDomain(domainId, zoneId);
|
||||
} else if (aclType == ACLType.Account) {
|
||||
networkDomain = _networkModel.getAccountNetworkDomain(owner.getId(), zoneId);
|
||||
}
|
||||
|
||||
// 2) If null, generate networkDomain using domain suffix from the global config variables
|
||||
if (networkDomain == null) {
|
||||
networkDomain = "cs" + Long.toHexString(owner.getId()) + GuestDomainSuffix.valueIn(zoneId);
|
||||
}
|
||||
|
||||
} else {
|
||||
// validate network domain
|
||||
if (!NetUtils.verifyDomainName(networkDomain)) {
|
||||
throw new InvalidParameterValueException("Invalid network domain. Total length shouldn't exceed 190 chars. Each domain "
|
||||
+ "label must be between 1 and 63 characters long, can contain ASCII letters 'a' through 'z', the digits '0' through '9', "
|
||||
+ "and the hyphen ('-'); can't start or end with \"-\"");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// In Advance zone Cidr for Shared networks and Isolated networks w/o source nat service can't be NULL - 2.2.x
|
||||
// limitation, remove after we introduce support for multiple ip ranges
|
||||
// with different Cidrs for the same Shared network
|
||||
final boolean cidrRequired = zone.getNetworkType() == NetworkType.Advanced
|
||||
&& ntwkOff.getTrafficType() == TrafficType.Guest
|
||||
&& (ntwkOff.getGuestType() == GuestType.Shared || (ntwkOff.getGuestType() == GuestType.Isolated
|
||||
&& !_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.SourceNat)
|
||||
&& !_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.Gateway)));
|
||||
if (cidr == null && ip6Cidr == null && cidrRequired) {
|
||||
if (ntwkOff.getGuestType() == GuestType.Shared) {
|
||||
throw new InvalidParameterValueException(String.format("Gateway/netmask are required when creating %s networks.", Network.GuestType.Shared));
|
||||
} else {
|
||||
throw new InvalidParameterValueException("gateway/netmask are required when create network of" + " type " + GuestType.Isolated + " with service " + Service.SourceNat.getName() + " disabled");
|
||||
// In Advance zone Cidr for Shared networks and Isolated networks w/o source nat service can't be NULL - 2.2.x
|
||||
// limitation, remove after we introduce support for multiple ip ranges
|
||||
// with different Cidrs for the same Shared network
|
||||
final boolean cidrRequired = zone.getNetworkType() == NetworkType.Advanced
|
||||
&& ntwkOff.getTrafficType() == TrafficType.Guest
|
||||
&& (ntwkOff.getGuestType() == GuestType.Shared || (ntwkOff.getGuestType() == GuestType.Isolated
|
||||
&& !_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.SourceNat)
|
||||
&& !_networkModel.areServicesSupportedByNetworkOffering(ntwkOff.getId(), Service.Gateway)));
|
||||
if (cidr == null && ip6Cidr == null && cidrRequired) {
|
||||
if (ntwkOff.getGuestType() == GuestType.Shared) {
|
||||
throw new InvalidParameterValueException(String.format("Gateway/netmask are required when creating %s networks.", Network.GuestType.Shared));
|
||||
} else {
|
||||
throw new InvalidParameterValueException("gateway/netmask are required when create network of" + " type " + GuestType.Isolated + " with service " + Service.SourceNat.getName() + " disabled");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
checkL2OfferingServices(ntwkOff);
|
||||
checkL2OfferingServices(ntwkOff);
|
||||
|
||||
// No cidr can be specified in Basic zone
|
||||
if (zone.getNetworkType() == NetworkType.Basic && cidr != null) {
|
||||
throw new InvalidParameterValueException("StartIp/endIp/gateway/netmask can't be specified for zone of type " + NetworkType.Basic);
|
||||
}
|
||||
// No cidr can be specified in Basic zone
|
||||
if (zone.getNetworkType() == NetworkType.Basic && cidr != null) {
|
||||
throw new InvalidParameterValueException("StartIp/endIp/gateway/netmask can't be specified for zone of type " + NetworkType.Basic);
|
||||
}
|
||||
|
||||
// Check if cidr is RFC1918 compliant if the network is Guest Isolated for IPv4
|
||||
if (cidr != null && (ntwkOff.getGuestType() == Network.GuestType.Isolated && ntwkOff.getTrafficType() == TrafficType.Guest) &&
|
||||
!NetUtils.validateGuestCidr(cidr, !ConfigurationManager.AllowNonRFC1918CompliantIPs.value())) {
|
||||
// Check if cidr is RFC1918 compliant if the network is Guest Isolated for IPv4
|
||||
if (cidr != null && (ntwkOff.getGuestType() == Network.GuestType.Isolated && ntwkOff.getTrafficType() == TrafficType.Guest) &&
|
||||
!NetUtils.validateGuestCidr(cidr, !ConfigurationManager.AllowNonRFC1918CompliantIPs.value())) {
|
||||
throw new InvalidParameterValueException("Virtual Guest Cidr " + cidr + " is not RFC 1918 or 6598 compliant");
|
||||
}
|
||||
}
|
||||
|
||||
final String networkDomainFinal = networkDomain;
|
||||
final String vlanIdFinal = vlanId;
|
||||
|
|
@ -3004,75 +3003,75 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||
final NetworkVO userNetwork = new NetworkVO();
|
||||
userNetwork.setNetworkDomain(networkDomainFinal);
|
||||
|
||||
if (cidr != null && gateway != null) {
|
||||
userNetwork.setCidr(cidr);
|
||||
userNetwork.setGateway(gateway);
|
||||
}
|
||||
if (cidr != null && gateway != null) {
|
||||
userNetwork.setCidr(cidr);
|
||||
userNetwork.setGateway(gateway);
|
||||
}
|
||||
|
||||
if (StringUtils.isNoneBlank(ip6Gateway, ip6Cidr)) {
|
||||
userNetwork.setIp6Cidr(ip6Cidr);
|
||||
userNetwork.setIp6Gateway(ip6Gateway);
|
||||
}
|
||||
if (StringUtils.isNoneBlank(ip6Gateway, ip6Cidr)) {
|
||||
userNetwork.setIp6Cidr(ip6Cidr);
|
||||
userNetwork.setIp6Gateway(ip6Gateway);
|
||||
}
|
||||
|
||||
if (externalId != null) {
|
||||
userNetwork.setExternalId(externalId);
|
||||
}
|
||||
if (externalId != null) {
|
||||
userNetwork.setExternalId(externalId);
|
||||
}
|
||||
|
||||
if (StringUtils.isNotBlank(routerIp)) {
|
||||
userNetwork.setRouterIp(routerIp);
|
||||
}
|
||||
if (StringUtils.isNotBlank(routerIp)) {
|
||||
userNetwork.setRouterIp(routerIp);
|
||||
}
|
||||
|
||||
if (StringUtils.isNotBlank(routerIpv6)) {
|
||||
userNetwork.setRouterIpv6(routerIpv6);
|
||||
}
|
||||
if (StringUtils.isNotBlank(routerIpv6)) {
|
||||
userNetwork.setRouterIpv6(routerIpv6);
|
||||
}
|
||||
|
||||
if (vrIfaceMTUs != null) {
|
||||
if (vrIfaceMTUs.first() != null && vrIfaceMTUs.first() > 0) {
|
||||
userNetwork.setPublicMtu(vrIfaceMTUs.first());
|
||||
if (vrIfaceMTUs != null) {
|
||||
if (vrIfaceMTUs.first() != null && vrIfaceMTUs.first() > 0) {
|
||||
userNetwork.setPublicMtu(vrIfaceMTUs.first());
|
||||
} else {
|
||||
userNetwork.setPublicMtu(Integer.valueOf(NetworkService.VRPublicInterfaceMtu.defaultValue()));
|
||||
}
|
||||
|
||||
if (vrIfaceMTUs.second() != null && vrIfaceMTUs.second() > 0) {
|
||||
userNetwork.setPrivateMtu(vrIfaceMTUs.second());
|
||||
} else {
|
||||
userNetwork.setPrivateMtu(Integer.valueOf(NetworkService.VRPrivateInterfaceMtu.defaultValue()));
|
||||
}
|
||||
} else {
|
||||
userNetwork.setPublicMtu(Integer.valueOf(NetworkService.VRPublicInterfaceMtu.defaultValue()));
|
||||
}
|
||||
|
||||
if (vrIfaceMTUs.second() != null && vrIfaceMTUs.second() > 0) {
|
||||
userNetwork.setPrivateMtu(vrIfaceMTUs.second());
|
||||
} else {
|
||||
userNetwork.setPrivateMtu(Integer.valueOf(NetworkService.VRPrivateInterfaceMtu.defaultValue()));
|
||||
}
|
||||
} else {
|
||||
userNetwork.setPublicMtu(Integer.valueOf(NetworkService.VRPublicInterfaceMtu.defaultValue()));
|
||||
userNetwork.setPrivateMtu(Integer.valueOf(NetworkService.VRPrivateInterfaceMtu.defaultValue()));
|
||||
}
|
||||
|
||||
if (!GuestType.L2.equals(userNetwork.getGuestType())) {
|
||||
if (StringUtils.isNotBlank(ip4Dns1)) {
|
||||
userNetwork.setDns1(ip4Dns1);
|
||||
}
|
||||
if (StringUtils.isNotBlank(ip4Dns2)) {
|
||||
userNetwork.setDns2(ip4Dns2);
|
||||
}
|
||||
if (StringUtils.isNotBlank(ip6Dns1)) {
|
||||
userNetwork.setIp6Dns1(ip6Dns1);
|
||||
}
|
||||
if (StringUtils.isNotBlank(ip6Dns2)) {
|
||||
userNetwork.setIp6Dns2(ip6Dns2);
|
||||
}
|
||||
}
|
||||
|
||||
if (vlanIdFinal != null) {
|
||||
if (isolatedPvlan == null) {
|
||||
URI uri = null;
|
||||
if (UuidUtils.isUuid(vlanIdFinal)) {
|
||||
//Logical router's UUID provided as VLAN_ID
|
||||
userNetwork.setVlanIdAsUUID(vlanIdFinal); //Set transient field
|
||||
} else {
|
||||
uri = encodeVlanIdIntoBroadcastUri(vlanIdFinal, pNtwk);
|
||||
if (!GuestType.L2.equals(userNetwork.getGuestType())) {
|
||||
if (StringUtils.isNotBlank(ip4Dns1)) {
|
||||
userNetwork.setDns1(ip4Dns1);
|
||||
}
|
||||
|
||||
if (_networksDao.listByPhysicalNetworkPvlan(physicalNetworkId, uri.toString()).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"Network with vlan %s already exists or overlaps with other network pvlans in zone %s",
|
||||
vlanIdFinal, zone));
|
||||
if (StringUtils.isNotBlank(ip4Dns2)) {
|
||||
userNetwork.setDns2(ip4Dns2);
|
||||
}
|
||||
if (StringUtils.isNotBlank(ip6Dns1)) {
|
||||
userNetwork.setIp6Dns1(ip6Dns1);
|
||||
}
|
||||
if (StringUtils.isNotBlank(ip6Dns2)) {
|
||||
userNetwork.setIp6Dns2(ip6Dns2);
|
||||
}
|
||||
}
|
||||
|
||||
if (vlanIdFinal != null) {
|
||||
if (isolatedPvlan == null) {
|
||||
URI uri = null;
|
||||
if (UuidUtils.isUuid(vlanIdFinal)) {
|
||||
//Logical router's UUID provided as VLAN_ID
|
||||
userNetwork.setVlanIdAsUUID(vlanIdFinal); //Set transient field
|
||||
} else {
|
||||
uri = encodeVlanIdIntoBroadcastUri(vlanIdFinal, pNtwk);
|
||||
}
|
||||
|
||||
if (_networksDao.listByPhysicalNetworkPvlan(physicalNetworkId, uri.toString()).size() > 0) {
|
||||
throw new InvalidParameterValueException(String.format(
|
||||
"Network with vlan %s already exists or overlaps with other network pvlans in zone %s",
|
||||
vlanIdFinal, zone));
|
||||
}
|
||||
|
||||
userNetwork.setBroadcastUri(uri);
|
||||
if (!vlanIdFinal.equalsIgnoreCase(Vlan.UNTAGGED)) {
|
||||
|
|
@ -4940,6 +4939,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||
return new ConfigKey<?>[]{NetworkGcWait, NetworkGcInterval, NetworkLockTimeout, DeniedRoutes,
|
||||
GuestDomainSuffix, NetworkThrottlingRate, MinVRVersion,
|
||||
PromiscuousMode, MacAddressChanges, ForgedTransmits, MacLearning, RollingRestartEnabled,
|
||||
TUNGSTEN_ENABLED, NSX_ENABLED, NETRIS_ENABLED, NETWORK_LB_HAPROXY_MAX_CONN};
|
||||
TUNGSTEN_ENABLED, NSX_ENABLED, NETRIS_ENABLED, NETWORK_LB_HAPROXY_MAX_CONN,
|
||||
NETWORK_LB_HAPROXY_IDLE_TIMEOUT};
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
// under the License.
|
||||
package com.cloud.upgrade;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
|
@ -96,7 +97,9 @@ public final class DatabaseVersionHierarchy {
|
|||
// we cannot find the version specified, so get the
|
||||
// most recent one immediately before this version
|
||||
if (!contains(fromVersion)) {
|
||||
return getPath(getRecentVersion(fromVersion), toVersion);
|
||||
DbUpgrade[] dbUpgrades = getPath(getRecentVersion(fromVersion), toVersion);
|
||||
return Arrays.stream(dbUpgrades).filter(up -> CloudStackVersion.compare(up.getUpgradedVersion(), fromVersion.toString()) > 0)
|
||||
.toArray(DbUpgrade[]::new);
|
||||
}
|
||||
|
||||
final Predicate<? super VersionNode> predicate;
|
||||
|
|
|
|||
|
|
@ -57,8 +57,4 @@ public class Upgrade42020to42030 extends DbUpgradeAbstractImpl implements DbUpgr
|
|||
public InputStream[] getCleanupScripts() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateSystemVmTemplates(Connection conn) {
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.account', 'api_key_access', 'boolean
|
|||
CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.account', 'api_key_access', 'boolean DEFAULT NULL COMMENT "is api key access allowed for the account" ');
|
||||
|
||||
-- Create a new group for Usage Server related configurations
|
||||
INSERT INTO `cloud`.`configuration_group` (`name`, `description`, `precedence`) VALUES ('Usage Server', 'Usage Server related configuration', 9);
|
||||
INSERT IGNORE INTO `cloud`.`configuration_group` (`name`, `description`, `precedence`) VALUES ('Usage Server', 'Usage Server related configuration', 9);
|
||||
UPDATE `cloud`.`configuration_subgroup` set `group_id` = (SELECT `id` FROM `cloud`.`configuration_group` WHERE `name` = 'Usage Server'), `precedence` = 1 WHERE `name`='Usage';
|
||||
UPDATE `cloud`.`configuration` SET `group_id` = (SELECT `id` FROM `cloud`.`configuration_group` WHERE `name` = 'Usage Server') where `subgroup_id` = (SELECT `id` FROM `cloud`.`configuration_subgroup` WHERE `name` = 'Usage');
|
||||
|
||||
|
|
|
|||
|
|
@ -53,3 +53,10 @@ DELETE FROM `cloud`.`configuration` WHERE name = 'consoleproxy.cmd.port';
|
|||
|
||||
-- Drops the unused "backup_interval_type" column of the "cloud.backups" table
|
||||
ALTER TABLE `cloud`.`backups` DROP COLUMN `backup_interval_type`;
|
||||
|
||||
-- Update `user.password.reset.mail.template` configuration value to match new logic
|
||||
UPDATE `cloud`.`configuration`
|
||||
SET value = CONCAT_WS('\n', 'Hello {{username}}!', 'You have requested to reset your password. Please click the following link to reset your password:', '{{{resetLink}}}', 'If you did not request a password reset, please ignore this email.', '', 'Regards,', 'The CloudStack Team')
|
||||
WHERE name = 'user.password.reset.mail.template'
|
||||
AND value IN (CONCAT_WS('\n', 'Hello {{username}}!', 'You have requested to reset your password. Please click the following link to reset your password:', 'http://{{{resetLink}}}', 'If you did not request a password reset, please ignore this email.', '', 'Regards,', 'The CloudStack Team'), CONCAT_WS('\n', 'Hello {{username}}!', 'You have requested to reset your password. Please click the following link to reset your password:', '{{{domainUrl}}}{{{resetLink}}}', 'If you did not request a password reset, please ignore this email.', '', 'Regards,', 'The CloudStack Team'));
|
||||
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ import com.cloud.upgrade.dao.Upgrade41120to41130;
|
|||
import com.cloud.upgrade.dao.Upgrade41120to41200;
|
||||
import com.cloud.upgrade.dao.Upgrade41510to41520;
|
||||
import com.cloud.upgrade.dao.Upgrade41610to41700;
|
||||
import com.cloud.upgrade.dao.Upgrade42010to42100;
|
||||
import com.cloud.upgrade.dao.Upgrade452to453;
|
||||
import com.cloud.upgrade.dao.Upgrade453to460;
|
||||
import com.cloud.upgrade.dao.Upgrade460to461;
|
||||
|
|
@ -380,4 +381,23 @@ public class DatabaseUpgradeCheckerTest {
|
|||
assertFalse("DatabaseUpgradeChecker should not be a standalone component", checker.isStandalone());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCalculateUpgradePath42010to42100() {
|
||||
|
||||
final CloudStackVersion dbVersion = CloudStackVersion.parse("4.20.1.0");
|
||||
assertNotNull(dbVersion);
|
||||
|
||||
final CloudStackVersion currentVersion = CloudStackVersion.parse("4.21.0.0");
|
||||
assertNotNull(currentVersion);
|
||||
|
||||
final DatabaseUpgradeChecker checker = new DatabaseUpgradeChecker();
|
||||
final DbUpgrade[] upgrades = checker.calculateUpgradePath(dbVersion, currentVersion);
|
||||
|
||||
assertNotNull(upgrades);
|
||||
assertEquals(1, upgrades.length);
|
||||
assertTrue(upgrades[0] instanceof Upgrade42010to42100);
|
||||
|
||||
assertArrayEquals(new String[]{"4.20.1.0", "4.21.0.0"}, upgrades[0].getUpgradableVersionRange());
|
||||
assertEquals(currentVersion.toString(), upgrades[0].getUpgradedVersion());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,9 @@ package com.cloud.hypervisor.kvm.resource;
|
|||
|
||||
import static com.cloud.host.Host.HOST_INSTANCE_CONVERSION;
|
||||
import static com.cloud.host.Host.HOST_OVFTOOL_VERSION;
|
||||
import static com.cloud.host.Host.HOST_VDDK_LIB_DIR;
|
||||
import static com.cloud.host.Host.HOST_VDDK_SUPPORT;
|
||||
import static com.cloud.host.Host.HOST_VDDK_VERSION;
|
||||
import static com.cloud.host.Host.HOST_VIRTV2V_VERSION;
|
||||
import static com.cloud.host.Host.HOST_VOLUME_ENCRYPTION;
|
||||
import static org.apache.cloudstack.utils.linux.KVMHostInfo.isHostS390x;
|
||||
|
|
@ -365,6 +368,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
public static final String WINDOWS_GUEST_CONVERSION_SUPPORTED_CHECK_CMD = "rpm -qa | grep -i virtio-win";
|
||||
public static final String UBUNTU_WINDOWS_GUEST_CONVERSION_SUPPORTED_CHECK_CMD = "dpkg -l virtio-win";
|
||||
public static final String UBUNTU_NBDKIT_PKG_CHECK_CMD = "dpkg -l nbdkit";
|
||||
public static final String VDDK_AUTODETECT_PATH_CMD = "find / -type d -name 'vmware-vix-disklib-distrib' 2>/dev/null | head -n 1";
|
||||
|
||||
public static final int LIBVIRT_CGROUP_CPU_SHARES_MIN = 2;
|
||||
public static final int LIBVIRT_CGROUP_CPU_SHARES_MAX = 262144;
|
||||
|
|
@ -885,10 +889,16 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
|
||||
private boolean convertInstanceVerboseMode = false;
|
||||
private Map<String, String> convertInstanceEnv = null;
|
||||
private String vddkLibDir = null;
|
||||
private static final String libguestfsBackend = "direct";
|
||||
protected boolean dpdkSupport = false;
|
||||
protected String dpdkOvsPath;
|
||||
protected String directDownloadTemporaryDownloadPath;
|
||||
protected String cachePath;
|
||||
private String vddkTransports = null;
|
||||
private String vddkThumbprint = null;
|
||||
private String vddkVersion = null;
|
||||
private String detectedPasswordFileOption = null;
|
||||
protected String javaTempDir = System.getProperty("java.io.tmpdir");
|
||||
|
||||
private String getEndIpFromStartIp(final String startIp, final int numIps) {
|
||||
|
|
@ -953,6 +963,26 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
return convertInstanceEnv;
|
||||
}
|
||||
|
||||
public String getVddkLibDir() {
|
||||
return vddkLibDir;
|
||||
}
|
||||
|
||||
public String getLibguestfsBackend() {
|
||||
return libguestfsBackend;
|
||||
}
|
||||
|
||||
public String getVddkTransports() {
|
||||
return vddkTransports;
|
||||
}
|
||||
|
||||
public String getVddkThumbprint() {
|
||||
return vddkThumbprint;
|
||||
}
|
||||
|
||||
public String getVddkVersion() {
|
||||
return vddkVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Defines resource's public and private network interface according to what is configured in agent.properties.
|
||||
*/
|
||||
|
|
@ -1153,6 +1183,37 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
|
||||
setConvertInstanceEnv(convertEnvTmpDir, convertEnvVirtv2vTmpDir);
|
||||
|
||||
vddkLibDir = StringUtils.trimToNull(AgentPropertiesFileHandler.getPropertyValue(AgentProperties.VDDK_LIB_DIR));
|
||||
if (StringUtils.isNotBlank(vddkLibDir) && !isVddkLibDirValid(vddkLibDir)) {
|
||||
LOGGER.warn("Configured VDDK library dir [{}] is invalid (missing lib64/libvixDiskLib.so), attempting auto-detection", vddkLibDir);
|
||||
vddkLibDir = null;
|
||||
}
|
||||
if (StringUtils.isBlank(vddkLibDir)) {
|
||||
vddkLibDir = detectVddkLibDir();
|
||||
}
|
||||
if (StringUtils.isNotBlank(vddkLibDir)) {
|
||||
LOGGER.info("Detected VDDK library dir: {}", vddkLibDir);
|
||||
} else {
|
||||
LOGGER.warn("Could not detect a valid VDDK library dir; VDDK conversion will be unavailable");
|
||||
}
|
||||
|
||||
vddkVersion = detectVddkVersion();
|
||||
if (StringUtils.isNotBlank(vddkVersion)) {
|
||||
LOGGER.info("Detected nbdkit VDDK plugin version: {}", vddkVersion);
|
||||
}
|
||||
|
||||
vddkTransports = StringUtils.trimToNull(
|
||||
AgentPropertiesFileHandler.getPropertyValue(AgentProperties.VDDK_TRANSPORTS));
|
||||
vddkThumbprint = StringUtils.trimToNull(
|
||||
AgentPropertiesFileHandler.getPropertyValue(AgentProperties.VDDK_THUMBPRINT));
|
||||
|
||||
detectedPasswordFileOption = detectPasswordFileOption();
|
||||
if (StringUtils.isNotBlank(detectedPasswordFileOption)) {
|
||||
LOGGER.info("Detected virt-v2v password option: {}", detectedPasswordFileOption);
|
||||
} else {
|
||||
LOGGER.warn("Could not detect virt-v2v password option, VDDK conversions may fail");
|
||||
}
|
||||
|
||||
pool = (String)params.get("pool");
|
||||
if (pool == null) {
|
||||
pool = "/root";
|
||||
|
|
@ -4224,6 +4285,13 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
cmd.setHostTags(getHostTags());
|
||||
boolean instanceConversionSupported = hostSupportsInstanceConversion();
|
||||
cmd.getHostDetails().put(HOST_INSTANCE_CONVERSION, String.valueOf(instanceConversionSupported));
|
||||
cmd.getHostDetails().put(HOST_VDDK_SUPPORT, String.valueOf(hostSupportsVddk()));
|
||||
if (StringUtils.isNotBlank(vddkLibDir)) {
|
||||
cmd.getHostDetails().put(HOST_VDDK_LIB_DIR, vddkLibDir);
|
||||
}
|
||||
if (StringUtils.isNotBlank(vddkVersion)) {
|
||||
cmd.getHostDetails().put(HOST_VDDK_VERSION, vddkVersion);
|
||||
}
|
||||
if (instanceConversionSupported) {
|
||||
cmd.getHostDetails().put(HOST_VIRTV2V_VERSION, getHostVirtV2vVersion());
|
||||
}
|
||||
|
|
@ -5945,6 +6013,66 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
return exitValue == 0;
|
||||
}
|
||||
|
||||
public boolean hostSupportsVddk() {
|
||||
return hostSupportsVddk(null);
|
||||
}
|
||||
|
||||
public boolean hostSupportsVddk(String overriddenVddkLibDir) {
|
||||
String effectiveVddkLibDir = StringUtils.trimToNull(overriddenVddkLibDir);
|
||||
if (StringUtils.isBlank(effectiveVddkLibDir)) {
|
||||
effectiveVddkLibDir = StringUtils.trimToNull(vddkLibDir);
|
||||
}
|
||||
if (StringUtils.isBlank(effectiveVddkLibDir) || !isVddkLibDirValid(effectiveVddkLibDir)) {
|
||||
effectiveVddkLibDir = detectVddkLibDir();
|
||||
}
|
||||
return hostSupportsInstanceConversion() && isVddkLibDirValid(effectiveVddkLibDir) && StringUtils.isNotBlank(detectVddkVersion());
|
||||
}
|
||||
|
||||
protected boolean isVddkLibDirValid(String path) {
|
||||
if (StringUtils.isBlank(path)) {
|
||||
return false;
|
||||
}
|
||||
File libDir = new File(path, "lib64");
|
||||
if (!libDir.isDirectory()) {
|
||||
return false;
|
||||
}
|
||||
File[] libs = libDir.listFiles((dir, name) -> name.startsWith("libvixDiskLib.so"));
|
||||
return libs != null && libs.length > 0;
|
||||
}
|
||||
|
||||
protected String detectVddkLibDir() {
|
||||
String detectedPath = StringUtils.trimToNull(Script.runSimpleBashScript(VDDK_AUTODETECT_PATH_CMD));
|
||||
if (StringUtils.isNotBlank(detectedPath) && isVddkLibDirValid(detectedPath)) {
|
||||
return detectedPath;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
protected String detectVddkVersion() {
|
||||
try {
|
||||
ProcessBuilder pb = new ProcessBuilder("nbdkit", "vddk", "--version");
|
||||
Process process = pb.start();
|
||||
|
||||
String output = new String(process.getInputStream().readAllBytes());
|
||||
process.waitFor();
|
||||
|
||||
if (StringUtils.isBlank(output)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
for (String line : output.split("\\R")) {
|
||||
String trimmed = StringUtils.trimToEmpty(line);
|
||||
if (trimmed.startsWith("vddk ")) {
|
||||
return StringUtils.trimToNull(trimmed.substring("vddk ".length()));
|
||||
}
|
||||
}
|
||||
return null;
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Failed to detect vddk version: {}", e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean hostSupportsWindowsGuestConversion() {
|
||||
if (isUbuntuOrDebianHost()) {
|
||||
int exitValue = Script.runSimpleBashScriptForExitValue(UBUNTU_WINDOWS_GUEST_CONVERSION_SUPPORTED_CHECK_CMD);
|
||||
|
|
@ -5959,6 +6087,40 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
return exitValue == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect which password option virt-v2v supports by examining its --help output
|
||||
* @return "-ip" if supported (virt-v2v >= 2.8.1), "--password-file" if older version, or null if detection fails
|
||||
*/
|
||||
protected String detectPasswordFileOption() {
|
||||
try {
|
||||
ProcessBuilder pb = new ProcessBuilder("virt-v2v", "--help");
|
||||
Process process = pb.start();
|
||||
|
||||
String output = new String(process.getInputStream().readAllBytes());
|
||||
process.waitFor();
|
||||
|
||||
if (output.contains("-ip <filename>")) {
|
||||
return "-ip";
|
||||
} else if (output.contains("--password-file")) {
|
||||
return "--password-file";
|
||||
} else {
|
||||
LOGGER.error("virt-v2v does not support -ip or --password-file");
|
||||
return null;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Failed to detect virt-v2v password option: {}", e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the detected password file option for virt-v2v
|
||||
* @return the password option ("-ip" or "--password-file") or null if not detected
|
||||
*/
|
||||
public String getDetectedPasswordFileOption() {
|
||||
return detectedPasswordFileOption;
|
||||
}
|
||||
|
||||
public String getHostVirtV2vVersion() {
|
||||
if (!hostSupportsInstanceConversion()) {
|
||||
return "";
|
||||
|
|
|
|||
|
|
@ -30,7 +30,15 @@ public class LibvirtCheckConvertInstanceCommandWrapper extends CommandWrapper<Ch
|
|||
|
||||
@Override
|
||||
public Answer execute(CheckConvertInstanceCommand cmd, LibvirtComputingResource serverResource) {
|
||||
if (!serverResource.hostSupportsInstanceConversion()) {
|
||||
if (cmd.isUseVddk()) {
|
||||
if (!serverResource.hostSupportsVddk(cmd.getVddkLibDir())) {
|
||||
String msg = String.format("Cannot convert the instance from VMware using VDDK on host %s. " +
|
||||
"Please make sure virt-v2v%s, nbdkit-vddk and a valid VDDK library directory are available on the host.",
|
||||
serverResource.getPrivateIp(), serverResource.isUbuntuOrDebianHost() ? ", nbdkit" : "");
|
||||
logger.info(msg);
|
||||
return new CheckConvertInstanceAnswer(cmd, false, msg);
|
||||
}
|
||||
} else if (!serverResource.hostSupportsInstanceConversion()) {
|
||||
String msg = String.format("Cannot convert the instance from VMware as the virt-v2v binary is not found on host %s. " +
|
||||
"Please install virt-v2v%s on the host before attempting the instance conversion.", serverResource.getPrivateIp(), serverResource.isUbuntuOrDebianHost()? ", nbdkit" : "");
|
||||
logger.info(msg);
|
||||
|
|
|
|||
|
|
@ -20,10 +20,17 @@ package com.cloud.hypervisor.kvm.resource.wrapper;
|
|||
|
||||
import java.net.URLEncoder;
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.PosixFilePermission;
|
||||
import java.util.Locale;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
import org.apache.commons.collections4.MapUtils;
|
||||
|
|
@ -51,6 +58,7 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
|
||||
private static final List<Hypervisor.HypervisorType> supportedInstanceConvertSourceHypervisors =
|
||||
List.of(Hypervisor.HypervisorType.VMware);
|
||||
private static final Pattern SHA1_FINGERPRINT_PATTERN = Pattern.compile("(?i)(?:SHA1\\s+)?Fingerprint\\s*=\\s*([0-9A-F:]+)");
|
||||
|
||||
@Override
|
||||
public Answer execute(ConvertInstanceCommand cmd, LibvirtComputingResource serverResource) {
|
||||
|
|
@ -61,7 +69,8 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
DataStoreTO conversionTemporaryLocation = cmd.getConversionTemporaryLocation();
|
||||
long timeout = (long) cmd.getWait() * 1000;
|
||||
String extraParams = cmd.getExtraParams();
|
||||
String originalVMName = cmd.getOriginalVMName(); // For logging purposes, as the sourceInstance may have been cloned
|
||||
boolean useVddk = cmd.isUseVddk();
|
||||
String originalVMName = cmd.getOriginalVMName();
|
||||
|
||||
if (cmd.getCheckConversionSupport() && !serverResource.hostSupportsInstanceConversion()) {
|
||||
String msg = String.format("Cannot convert the instance %s from VMware as the virt-v2v binary is not found. " +
|
||||
|
|
@ -84,61 +93,75 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
logger.info(String.format("(%s) Attempting to convert the instance %s from %s to KVM",
|
||||
originalVMName, sourceInstanceName, sourceHypervisorType));
|
||||
final String temporaryConvertPath = temporaryStoragePool.getLocalPath();
|
||||
|
||||
String ovfTemplateDirOnConversionLocation;
|
||||
String sourceOVFDirPath;
|
||||
boolean ovfExported = false;
|
||||
if (cmd.getExportOvfToConversionLocation()) {
|
||||
String exportInstanceOVAUrl = getExportInstanceOVAUrl(sourceInstance, originalVMName);
|
||||
if (StringUtils.isBlank(exportInstanceOVAUrl)) {
|
||||
String err = String.format("Couldn't export OVA for the VM %s, due to empty url", sourceInstanceName);
|
||||
logger.error(String.format("(%s) %s", originalVMName, err));
|
||||
return new Answer(cmd, false, err);
|
||||
}
|
||||
|
||||
int noOfThreads = cmd.getThreadsCountToExportOvf();
|
||||
if (noOfThreads > 1 && !serverResource.ovfExportToolSupportsParallelThreads()) {
|
||||
noOfThreads = 0;
|
||||
}
|
||||
ovfTemplateDirOnConversionLocation = UUID.randomUUID().toString();
|
||||
temporaryStoragePool.createFolder(ovfTemplateDirOnConversionLocation);
|
||||
sourceOVFDirPath = String.format("%s/%s/", temporaryConvertPath, ovfTemplateDirOnConversionLocation);
|
||||
ovfExported = exportOVAFromVMOnVcenter(exportInstanceOVAUrl, sourceOVFDirPath, noOfThreads, originalVMName, timeout);
|
||||
if (!ovfExported) {
|
||||
String err = String.format("Export OVA for the VM %s failed", sourceInstanceName);
|
||||
logger.error(String.format("(%s) %s", originalVMName, err));
|
||||
return new Answer(cmd, false, err);
|
||||
}
|
||||
sourceOVFDirPath = String.format("%s%s/", sourceOVFDirPath, sourceInstanceName);
|
||||
} else {
|
||||
ovfTemplateDirOnConversionLocation = cmd.getTemplateDirOnConversionLocation();
|
||||
sourceOVFDirPath = String.format("%s/%s/", temporaryConvertPath, ovfTemplateDirOnConversionLocation);
|
||||
}
|
||||
|
||||
logger.info(String.format("(%s) Attempting to convert the OVF %s of the instance %s from %s to KVM",
|
||||
originalVMName, ovfTemplateDirOnConversionLocation, sourceInstanceName, sourceHypervisorType));
|
||||
|
||||
final String temporaryConvertUuid = UUID.randomUUID().toString();
|
||||
boolean verboseModeEnabled = serverResource.isConvertInstanceVerboseModeEnabled();
|
||||
|
||||
boolean cleanupSecondaryStorage = false;
|
||||
boolean ovfExported = false;
|
||||
String ovfTemplateDirOnConversionLocation = null;
|
||||
|
||||
try {
|
||||
boolean result = performInstanceConversion(originalVMName, sourceOVFDirPath, temporaryConvertPath, temporaryConvertUuid,
|
||||
timeout, verboseModeEnabled, extraParams, serverResource);
|
||||
boolean result;
|
||||
if (useVddk) {
|
||||
logger.info("({}) Using VDDK-based conversion (direct from VMware)", originalVMName);
|
||||
String vddkLibDir = resolveVddkSetting(cmd.getVddkLibDir(), serverResource.getVddkLibDir());
|
||||
if (StringUtils.isBlank(vddkLibDir)) {
|
||||
String err = String.format("VDDK lib dir is not configured on the host. " +
|
||||
"Set '%s' in agent.properties or in details parameter of the import api calll to use VDDK-based conversion.", "vddk.lib.dir");
|
||||
logger.error("({}) {}", originalVMName, err);
|
||||
return new Answer(cmd, false, err);
|
||||
}
|
||||
String vddkTransports = resolveVddkSetting(cmd.getVddkTransports(), serverResource.getVddkTransports());
|
||||
String configuredVddkThumbprint = resolveVddkSetting(cmd.getVddkThumbprint(), serverResource.getVddkThumbprint());
|
||||
String passwordOption = serverResource.getDetectedPasswordFileOption();
|
||||
result = performInstanceConversionUsingVddk(sourceInstance, originalVMName, temporaryConvertPath,
|
||||
vddkLibDir, serverResource.getLibguestfsBackend(), vddkTransports, configuredVddkThumbprint,
|
||||
timeout, verboseModeEnabled, extraParams, temporaryConvertUuid, passwordOption);
|
||||
} else {
|
||||
logger.info("({}) Using OVF-based conversion (export + local convert)", originalVMName);
|
||||
String sourceOVFDirPath;
|
||||
if (cmd.getExportOvfToConversionLocation()) {
|
||||
String exportInstanceOVAUrl = getExportInstanceOVAUrl(sourceInstance, originalVMName);
|
||||
|
||||
if (StringUtils.isBlank(exportInstanceOVAUrl)) {
|
||||
String err = String.format("Couldn't export OVA for the VM %s, due to empty url", sourceInstanceName);
|
||||
logger.error("({}) {}", originalVMName, err);
|
||||
return new Answer(cmd, false, err);
|
||||
}
|
||||
|
||||
int noOfThreads = cmd.getThreadsCountToExportOvf();
|
||||
if (noOfThreads > 1 && !serverResource.ovfExportToolSupportsParallelThreads()) {
|
||||
noOfThreads = 0;
|
||||
}
|
||||
ovfTemplateDirOnConversionLocation = UUID.randomUUID().toString();
|
||||
temporaryStoragePool.createFolder(ovfTemplateDirOnConversionLocation);
|
||||
sourceOVFDirPath = String.format("%s/%s/", temporaryConvertPath, ovfTemplateDirOnConversionLocation);
|
||||
ovfExported = exportOVAFromVMOnVcenter(exportInstanceOVAUrl, sourceOVFDirPath, noOfThreads, originalVMName, timeout);
|
||||
|
||||
if (!ovfExported) {
|
||||
String err = String.format("Export OVA for the VM %s failed", sourceInstanceName);
|
||||
logger.error("({}) {}", originalVMName, err);
|
||||
return new Answer(cmd, false, err);
|
||||
}
|
||||
sourceOVFDirPath = String.format("%s%s/", sourceOVFDirPath, sourceInstanceName);
|
||||
} else {
|
||||
ovfTemplateDirOnConversionLocation = cmd.getTemplateDirOnConversionLocation();
|
||||
sourceOVFDirPath = String.format("%s/%s/", temporaryConvertPath, ovfTemplateDirOnConversionLocation);
|
||||
}
|
||||
|
||||
result = performInstanceConversion(originalVMName, sourceOVFDirPath, temporaryConvertPath, temporaryConvertUuid,
|
||||
timeout, verboseModeEnabled, extraParams, serverResource);
|
||||
}
|
||||
|
||||
if (!result) {
|
||||
String err = String.format(
|
||||
"The virt-v2v conversion for the OVF %s failed. Please check the agent logs " +
|
||||
"for the virt-v2v output. Please try on a different kvm host which " +
|
||||
"has a different virt-v2v version.",
|
||||
ovfTemplateDirOnConversionLocation);
|
||||
logger.error(String.format("(%s) %s", originalVMName, err));
|
||||
String err = String.format("Instance conversion failed for VM %s. Please check virt-v2v logs.", sourceInstanceName);
|
||||
logger.error("({}) {}", originalVMName, err);
|
||||
return new Answer(cmd, false, err);
|
||||
}
|
||||
return new ConvertInstanceAnswer(cmd, temporaryConvertUuid);
|
||||
} catch (Exception e) {
|
||||
String error = String.format("Error converting instance %s from %s, due to: %s",
|
||||
sourceInstanceName, sourceHypervisorType, e.getMessage());
|
||||
logger.error(String.format("(%s) %s", originalVMName, error), e);
|
||||
String error = String.format("Error converting instance %s from %s, due to: %s", sourceInstanceName, sourceHypervisorType, e.getMessage());
|
||||
logger.error("({}) {}", originalVMName, error, e);
|
||||
cleanupSecondaryStorage = true;
|
||||
return new Answer(cmd, false, error);
|
||||
} finally {
|
||||
|
|
@ -275,4 +298,198 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
protected String encodeUsername(String username) {
|
||||
return URLEncoder.encode(username, Charset.defaultCharset());
|
||||
}
|
||||
|
||||
private String resolveVddkSetting(String commandValue, String agentValue) {
|
||||
return StringUtils.defaultIfBlank(StringUtils.trimToNull(commandValue), StringUtils.trimToNull(agentValue));
|
||||
}
|
||||
|
||||
protected boolean performInstanceConversionUsingVddk(RemoteInstanceTO vmwareInstance, String originalVMName,
|
||||
String temporaryConvertFolder, String vddkLibDir,
|
||||
String libguestfsBackend, String vddkTransports,
|
||||
String configuredVddkThumbprint,
|
||||
long timeout, boolean verboseModeEnabled, String extraParams,
|
||||
String temporaryConvertUuid, String passwordOption) {
|
||||
|
||||
String vcenterPassword = vmwareInstance.getVcenterPassword();
|
||||
if (StringUtils.isBlank(vcenterPassword)) {
|
||||
logger.error("({}) Could not determine vCenter password for {}", originalVMName, vmwareInstance.getVcenterHost());
|
||||
return false;
|
||||
}
|
||||
|
||||
String passwordFilePath = String.format("/tmp/v2v.pass.cloud.%s.%s",
|
||||
StringUtils.defaultIfBlank(vmwareInstance.getVcenterHost(), "unknown"),
|
||||
UUID.randomUUID());
|
||||
try {
|
||||
Files.writeString(Path.of(passwordFilePath), vcenterPassword);
|
||||
Files.setPosixFilePermissions(Path.of(passwordFilePath), Set.of(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE));
|
||||
logger.debug("({}) Written vCenter password to {}", originalVMName, passwordFilePath);
|
||||
} catch (Exception e) {
|
||||
logger.error("({}) Failed to write vCenter password file {}: {}", originalVMName, passwordFilePath, e.getMessage());
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
String vpxUrl = buildVpxUrl(vmwareInstance);
|
||||
|
||||
StringBuilder cmd = new StringBuilder();
|
||||
|
||||
cmd.append("export LIBGUESTFS_BACKEND=").append(libguestfsBackend).append(" && ");
|
||||
|
||||
cmd.append("virt-v2v ");
|
||||
cmd.append("--root first ");
|
||||
cmd.append("-ic '").append(vpxUrl).append("' ");
|
||||
if (StringUtils.isBlank(passwordOption)) {
|
||||
logger.error("({}) Could not determine supported password file option for virt-v2v", originalVMName);
|
||||
return false;
|
||||
}
|
||||
|
||||
cmd.append(passwordOption).append(" ").append(passwordFilePath).append(" ");
|
||||
cmd.append("-it vddk ");
|
||||
cmd.append("-io vddk-libdir=").append(vddkLibDir).append(" ");
|
||||
String vddkThumbprint = StringUtils.trimToNull(configuredVddkThumbprint);
|
||||
if (StringUtils.isBlank(vddkThumbprint)) {
|
||||
vddkThumbprint = getVcenterThumbprint(vmwareInstance.getVcenterHost(), timeout, originalVMName);
|
||||
}
|
||||
if (StringUtils.isBlank(vddkThumbprint)) {
|
||||
logger.error("({}) Could not determine vCenter thumbprint for {}", originalVMName, vmwareInstance.getVcenterHost());
|
||||
return false;
|
||||
}
|
||||
cmd.append("-io vddk-thumbprint=").append(vddkThumbprint).append(" ");
|
||||
if (StringUtils.isNotBlank(vddkTransports)) {
|
||||
cmd.append("-io vddk-transports=").append(vddkTransports).append(" ");
|
||||
}
|
||||
cmd.append(vmwareInstance.getInstanceName()).append(" ");
|
||||
cmd.append("-o local ");
|
||||
cmd.append("-os ").append(temporaryConvertFolder).append(" ");
|
||||
cmd.append("-of qcow2 ");
|
||||
cmd.append("-on ").append(temporaryConvertUuid).append(" ");
|
||||
|
||||
if (verboseModeEnabled) {
|
||||
cmd.append("-v ");
|
||||
}
|
||||
|
||||
if (StringUtils.isNotBlank(extraParams)) {
|
||||
cmd.append(extraParams).append(" ");
|
||||
}
|
||||
|
||||
Script script = new Script("/bin/bash", timeout, logger);
|
||||
script.add("-c");
|
||||
script.add(cmd.toString());
|
||||
|
||||
String logPrefix = String.format("(%s) virt-v2v vddk import", originalVMName);
|
||||
OutputInterpreter.LineByLineOutputLogger outputLogger =
|
||||
new OutputInterpreter.LineByLineOutputLogger(logger, logPrefix);
|
||||
|
||||
logger.info("({}) Starting virt-v2v VDDK conversion", originalVMName);
|
||||
script.execute(outputLogger);
|
||||
|
||||
int exitValue = script.getExitValue();
|
||||
if (exitValue != 0) {
|
||||
logger.error("({}) virt-v2v failed with exit code {}", originalVMName, exitValue);
|
||||
}
|
||||
|
||||
return exitValue == 0;
|
||||
} finally {
|
||||
try {
|
||||
Files.deleteIfExists(Path.of(passwordFilePath));
|
||||
logger.debug("({}) Deleted password file {}", originalVMName, passwordFilePath);
|
||||
} catch (Exception e) {
|
||||
logger.warn("({}) Failed to delete password file {}: {}", originalVMName, passwordFilePath, e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected String getVcenterThumbprint(String vcenterHost, long timeout, String originalVMName) {
|
||||
if (StringUtils.isBlank(vcenterHost)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
String endpoint = String.format("%s:443", vcenterHost);
|
||||
String command = String.format("openssl s_client -connect '%s' </dev/null 2>/dev/null | " +
|
||||
"openssl x509 -fingerprint -sha1 -noout", endpoint);
|
||||
|
||||
Script script = new Script("/bin/bash", timeout, logger);
|
||||
script.add("-c");
|
||||
script.add(command);
|
||||
|
||||
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
|
||||
script.execute(parser);
|
||||
|
||||
String output = parser.getLines();
|
||||
if (script.getExitValue() != 0) {
|
||||
logger.error("({}) Failed to fetch vCenter thumbprint for {}", originalVMName, vcenterHost);
|
||||
return null;
|
||||
}
|
||||
|
||||
String thumbprint = extractSha1Fingerprint(output);
|
||||
if (StringUtils.isBlank(thumbprint)) {
|
||||
logger.error("({}) Failed to parse vCenter thumbprint from output for {}", originalVMName, vcenterHost);
|
||||
return null;
|
||||
}
|
||||
return thumbprint;
|
||||
}
|
||||
|
||||
private String extractSha1Fingerprint(String output) {
|
||||
String parsedOutput = StringUtils.trimToEmpty(output);
|
||||
if (StringUtils.isBlank(parsedOutput)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
for (String line : parsedOutput.split("\\R")) {
|
||||
String trimmedLine = StringUtils.trimToEmpty(line);
|
||||
if (StringUtils.isBlank(trimmedLine)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Matcher matcher = SHA1_FINGERPRINT_PATTERN.matcher(trimmedLine);
|
||||
if (matcher.find()) {
|
||||
return matcher.group(1).toUpperCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
// Fallback for raw fingerprint-only output.
|
||||
if (trimmedLine.matches("(?i)[0-9a-f]{2}(:[0-9a-f]{2})+")) {
|
||||
return trimmedLine.toUpperCase(Locale.ROOT);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build vpx:// URL for virt-v2v
|
||||
*
|
||||
* Format:
|
||||
* vpx://user@vcenter/DC/cluster/host?no_verify=1
|
||||
*/
|
||||
private String buildVpxUrl(RemoteInstanceTO vmwareInstance) {
|
||||
|
||||
String vmName = vmwareInstance.getInstanceName();
|
||||
String vcenter = vmwareInstance.getVcenterHost();
|
||||
String username = vmwareInstance.getVcenterUsername();
|
||||
String datacenter = vmwareInstance.getDatacenterName();
|
||||
String cluster = vmwareInstance.getClusterName();
|
||||
String host = vmwareInstance.getHostName();
|
||||
|
||||
String encodedUsername = encodeUsername(username);
|
||||
|
||||
StringBuilder url = new StringBuilder();
|
||||
url.append("vpx://")
|
||||
.append(encodedUsername)
|
||||
.append("@")
|
||||
.append(vcenter)
|
||||
.append("/")
|
||||
.append(datacenter);
|
||||
|
||||
if (StringUtils.isNotBlank(cluster)) {
|
||||
url.append("/").append(cluster);
|
||||
}
|
||||
|
||||
if (StringUtils.isNotBlank(host)) {
|
||||
url.append("/").append(host);
|
||||
}
|
||||
|
||||
url.append("?no_verify=1");
|
||||
|
||||
logger.info("({}) Using VPX URL: {}", vmName, url);
|
||||
return url.toString();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
|||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.utils.script.Script;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
@ResourceWrapper(handles = ReadyCommand.class)
|
||||
public final class LibvirtReadyCommandWrapper extends CommandWrapper<ReadyCommand, Answer, LibvirtComputingResource> {
|
||||
|
|
@ -50,6 +51,9 @@ public final class LibvirtReadyCommandWrapper extends CommandWrapper<ReadyComman
|
|||
if (libvirtComputingResource.hostSupportsInstanceConversion()) {
|
||||
hostDetails.put(Host.HOST_VIRTV2V_VERSION, libvirtComputingResource.getHostVirtV2vVersion());
|
||||
}
|
||||
hostDetails.put(Host.HOST_VDDK_SUPPORT, Boolean.toString(libvirtComputingResource.hostSupportsVddk()));
|
||||
hostDetails.put(Host.HOST_VDDK_LIB_DIR, StringUtils.defaultString(libvirtComputingResource.getVddkLibDir()));
|
||||
hostDetails.put(Host.HOST_VDDK_VERSION, StringUtils.defaultString(libvirtComputingResource.getVddkVersion()));
|
||||
|
||||
if (libvirtComputingResource.hostSupportsOvfExport()) {
|
||||
hostDetails.put(Host.HOST_OVFTOOL_VERSION, libvirtComputingResource.getHostOvfToolVersion());
|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@ public class LibvirtCheckConvertInstanceCommandWrapperTest {
|
|||
|
||||
@Test
|
||||
public void testCheckInstanceCommand_success() {
|
||||
Mockito.when(checkConvertInstanceCommandMock.isUseVddk()).thenReturn(false);
|
||||
Mockito.when(libvirtComputingResourceMock.hostSupportsInstanceConversion()).thenReturn(true);
|
||||
Answer answer = checkConvertInstanceCommandWrapper.execute(checkConvertInstanceCommandMock, libvirtComputingResourceMock);
|
||||
assertTrue(answer.getResult());
|
||||
|
|
@ -59,9 +60,33 @@ public class LibvirtCheckConvertInstanceCommandWrapperTest {
|
|||
|
||||
@Test
|
||||
public void testCheckInstanceCommand_failure() {
|
||||
Mockito.when(checkConvertInstanceCommandMock.isUseVddk()).thenReturn(false);
|
||||
Mockito.when(libvirtComputingResourceMock.hostSupportsInstanceConversion()).thenReturn(false);
|
||||
Answer answer = checkConvertInstanceCommandWrapper.execute(checkConvertInstanceCommandMock, libvirtComputingResourceMock);
|
||||
assertFalse(answer.getResult());
|
||||
assertTrue(StringUtils.isNotBlank(answer.getDetails()));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCheckInstanceCommand_vddkSuccess() {
|
||||
Mockito.when(checkConvertInstanceCommandMock.isUseVddk()).thenReturn(true);
|
||||
Mockito.when(checkConvertInstanceCommandMock.getVddkLibDir()).thenReturn("/opt/vmware-vddk/vmware-vix-disklib-distrib");
|
||||
Mockito.when(libvirtComputingResourceMock.hostSupportsVddk("/opt/vmware-vddk/vmware-vix-disklib-distrib")).thenReturn(true);
|
||||
|
||||
Answer answer = checkConvertInstanceCommandWrapper.execute(checkConvertInstanceCommandMock, libvirtComputingResourceMock);
|
||||
|
||||
assertTrue(answer.getResult());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCheckInstanceCommand_vddkFailure() {
|
||||
Mockito.when(checkConvertInstanceCommandMock.isUseVddk()).thenReturn(true);
|
||||
Mockito.when(checkConvertInstanceCommandMock.getVddkLibDir()).thenReturn("/opt/vmware-vddk/vmware-vix-disklib-distrib");
|
||||
Mockito.when(libvirtComputingResourceMock.hostSupportsVddk("/opt/vmware-vddk/vmware-vix-disklib-distrib")).thenReturn(false);
|
||||
|
||||
Answer answer = checkConvertInstanceCommandWrapper.execute(checkConvertInstanceCommandMock, libvirtComputingResourceMock);
|
||||
|
||||
assertFalse(answer.getResult());
|
||||
assertTrue(StringUtils.isNotBlank(answer.getDetails()));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@
|
|||
//
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
|
|
@ -189,4 +190,127 @@ public class LibvirtConvertInstanceCommandWrapperTest {
|
|||
Mockito.verify(script).add("-x");
|
||||
Mockito.verify(script).add("-v");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPerformInstanceConversionUsingVddkUsesConfiguredLibguestfsBackend() {
|
||||
RemoteInstanceTO remoteInstanceTO = Mockito.mock(RemoteInstanceTO.class);
|
||||
Mockito.when(remoteInstanceTO.getVcenterHost()).thenReturn("vcenter.local");
|
||||
Mockito.when(remoteInstanceTO.getVcenterUsername()).thenReturn("administrator@vsphere.local");
|
||||
Mockito.when(remoteInstanceTO.getVcenterPassword()).thenReturn("secret");
|
||||
Mockito.when(remoteInstanceTO.getDatacenterName()).thenReturn("dc1");
|
||||
Mockito.when(remoteInstanceTO.getClusterName()).thenReturn("cluster1");
|
||||
Mockito.when(remoteInstanceTO.getHostName()).thenReturn("host1");
|
||||
Mockito.doReturn("28:19:A6:1C:90:ED:46:D7:1C:86:BC:F6:13:52:F0:B9:19:81:0D:81")
|
||||
.when(convertInstanceCommandWrapper).getVcenterThumbprint(Mockito.anyString(), Mockito.anyLong(), Mockito.anyString());
|
||||
|
||||
try (MockedStatic<Files> filesMock = Mockito.mockStatic(Files.class);
|
||||
MockedConstruction<Script> ignored = Mockito.mockConstruction(Script.class, (mock, context) -> {
|
||||
Mockito.when(mock.execute(Mockito.any())).thenReturn("");
|
||||
Mockito.when(mock.getExitValue()).thenReturn(0);
|
||||
})) {
|
||||
filesMock.when(() -> Files.writeString(Mockito.argThat(path -> path.toString().contains("/tmp/v2v.pass.cloud.vcenter.local.")), Mockito.eq("secret")))
|
||||
.thenAnswer(invocation -> invocation.getArgument(0));
|
||||
filesMock.when(() -> Files.deleteIfExists(Mockito.argThat(path -> path.toString().contains("/tmp/v2v.pass.cloud.vcenter.local."))))
|
||||
.thenReturn(true);
|
||||
|
||||
boolean result = convertInstanceCommandWrapper.performInstanceConversionUsingVddk(
|
||||
remoteInstanceTO, vmName, "/tmp/convert", "/opt/vddk", "libvirt", null, null, 1000L, false, null, "tmp-uuid", "-ip");
|
||||
|
||||
Assert.assertTrue(result);
|
||||
Script scriptMock = ignored.constructed().get(0);
|
||||
Mockito.verify(scriptMock).add("-c");
|
||||
Mockito.verify(scriptMock).add(Mockito.contains("export LIBGUESTFS_BACKEND=libvirt &&"));
|
||||
Mockito.verify(scriptMock).add(Mockito.contains("-ip /tmp/v2v.pass.cloud.vcenter.local."));
|
||||
Mockito.verify(scriptMock).add(Mockito.contains(" -on tmp-uuid "));
|
||||
Mockito.verify(scriptMock).add(Mockito.contains("-io vddk-thumbprint=28:19:A6:1C:90:ED:46:D7:1C:86:BC:F6:13:52:F0:B9:19:81:0D:81 "));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPerformInstanceConversionUsingVddkUsesConfiguredTransportsOrder() {
|
||||
RemoteInstanceTO remoteInstanceTO = Mockito.mock(RemoteInstanceTO.class);
|
||||
Mockito.when(remoteInstanceTO.getVcenterHost()).thenReturn("vcenter.local");
|
||||
Mockito.when(remoteInstanceTO.getVcenterUsername()).thenReturn("administrator@vsphere.local");
|
||||
Mockito.when(remoteInstanceTO.getVcenterPassword()).thenReturn("secret");
|
||||
Mockito.when(remoteInstanceTO.getDatacenterName()).thenReturn("dc1");
|
||||
Mockito.when(remoteInstanceTO.getClusterName()).thenReturn("cluster1");
|
||||
Mockito.when(remoteInstanceTO.getHostName()).thenReturn("host1");
|
||||
Mockito.doReturn("28:19:A6:1C:90:ED:46:D7:1C:86:BC:F6:13:52:F0:B9:19:81:0D:81")
|
||||
.when(convertInstanceCommandWrapper).getVcenterThumbprint(Mockito.anyString(), Mockito.anyLong(), Mockito.anyString());
|
||||
|
||||
try (MockedStatic<Files> filesMock = Mockito.mockStatic(Files.class);
|
||||
MockedConstruction<Script> ignored = Mockito.mockConstruction(Script.class, (mock, context) -> {
|
||||
Mockito.when(mock.execute(Mockito.any())).thenReturn("");
|
||||
Mockito.when(mock.getExitValue()).thenReturn(0);
|
||||
})) {
|
||||
filesMock.when(() -> Files.writeString(Mockito.argThat(path -> path.toString().contains("/tmp/v2v.pass.cloud.vcenter.local.")), Mockito.eq("secret")))
|
||||
.thenAnswer(invocation -> invocation.getArgument(0));
|
||||
filesMock.when(() -> Files.deleteIfExists(Mockito.argThat(path -> path.toString().contains("/tmp/v2v.pass.cloud.vcenter.local."))))
|
||||
.thenReturn(true);
|
||||
|
||||
boolean result = convertInstanceCommandWrapper.performInstanceConversionUsingVddk(
|
||||
remoteInstanceTO, vmName, "/tmp/convert", "/opt/vddk", "direct", "nbd:nbdssl", null, 1000L, false, null, "tmp-uuid", "-ip");
|
||||
|
||||
Assert.assertTrue(result);
|
||||
Script scriptMock = ignored.constructed().get(0);
|
||||
Mockito.verify(scriptMock).add(Mockito.contains("-io vddk-transports=nbd:nbdssl "));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPerformInstanceConversionUsingVddkFailsWhenThumbprintUnavailable() {
|
||||
RemoteInstanceTO remoteInstanceTO = Mockito.mock(RemoteInstanceTO.class);
|
||||
Mockito.when(remoteInstanceTO.getVcenterHost()).thenReturn("vcenter.local");
|
||||
Mockito.when(remoteInstanceTO.getVcenterUsername()).thenReturn("administrator@vsphere.local");
|
||||
Mockito.when(remoteInstanceTO.getVcenterPassword()).thenReturn("secret");
|
||||
Mockito.when(remoteInstanceTO.getDatacenterName()).thenReturn("dc1");
|
||||
Mockito.when(remoteInstanceTO.getClusterName()).thenReturn("cluster1");
|
||||
Mockito.when(remoteInstanceTO.getHostName()).thenReturn("host1");
|
||||
Mockito.doReturn(null)
|
||||
.when(convertInstanceCommandWrapper).getVcenterThumbprint(Mockito.anyString(), Mockito.anyLong(), Mockito.anyString());
|
||||
|
||||
try (MockedStatic<Files> filesMock = Mockito.mockStatic(Files.class)) {
|
||||
filesMock.when(() -> Files.writeString(Mockito.argThat(path -> path.toString().contains("/tmp/v2v.pass.cloud.vcenter.local.")), Mockito.eq("secret")))
|
||||
.thenAnswer(invocation -> invocation.getArgument(0));
|
||||
filesMock.when(() -> Files.deleteIfExists(Mockito.argThat(path -> path.toString().contains("/tmp/v2v.pass.cloud.vcenter.local."))))
|
||||
.thenReturn(true);
|
||||
|
||||
boolean result = convertInstanceCommandWrapper.performInstanceConversionUsingVddk(
|
||||
remoteInstanceTO, vmName, "/tmp/convert", "/opt/vddk", "direct", null, null, 1000L, false, null, "tmp-uuid", "-ip");
|
||||
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPerformInstanceConversionUsingVddkUsesConfiguredThumbprintFromAgentProperty() {
|
||||
RemoteInstanceTO remoteInstanceTO = Mockito.mock(RemoteInstanceTO.class);
|
||||
Mockito.when(remoteInstanceTO.getVcenterHost()).thenReturn("vcenter.local");
|
||||
Mockito.when(remoteInstanceTO.getVcenterUsername()).thenReturn("administrator@vsphere.local");
|
||||
Mockito.when(remoteInstanceTO.getVcenterPassword()).thenReturn("secret");
|
||||
Mockito.when(remoteInstanceTO.getDatacenterName()).thenReturn("dc1");
|
||||
Mockito.when(remoteInstanceTO.getClusterName()).thenReturn("cluster1");
|
||||
Mockito.when(remoteInstanceTO.getHostName()).thenReturn("host1");
|
||||
|
||||
try (MockedStatic<Files> filesMock = Mockito.mockStatic(Files.class);
|
||||
MockedConstruction<Script> ignored = Mockito.mockConstruction(Script.class, (mock, context) -> {
|
||||
Mockito.when(mock.execute(Mockito.any())).thenReturn("");
|
||||
Mockito.when(mock.getExitValue()).thenReturn(0);
|
||||
})) {
|
||||
filesMock.when(() -> Files.writeString(Mockito.argThat(path -> path.toString().contains("/tmp/v2v.pass.cloud.vcenter.local.")), Mockito.eq("secret")))
|
||||
.thenAnswer(invocation -> invocation.getArgument(0));
|
||||
filesMock.when(() -> Files.deleteIfExists(Mockito.argThat(path -> path.toString().contains("/tmp/v2v.pass.cloud.vcenter.local."))))
|
||||
.thenReturn(true);
|
||||
|
||||
boolean result = convertInstanceCommandWrapper.performInstanceConversionUsingVddk(
|
||||
remoteInstanceTO, vmName, "/tmp/convert", "/opt/vddk", "direct", null,
|
||||
"AA:BB:CC:DD:EE", 1000L, false, null, "tmp-uuid", "-ip");
|
||||
|
||||
Assert.assertTrue(result);
|
||||
Script scriptMock = ignored.constructed().get(0);
|
||||
Mockito.verify(scriptMock).add(Mockito.contains("-io vddk-thumbprint=AA:BB:CC:DD:EE "));
|
||||
Mockito.verify(convertInstanceCommandWrapper, Mockito.never())
|
||||
.getVcenterThumbprint(Mockito.anyString(), Mockito.anyLong(), Mockito.anyString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -581,7 +581,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
|
|||
List<FirewallRuleVO> firewallRules = firewallRulesDao.listByIpPurposeProtocolAndNotRevoked(publicIp.getId(), FirewallRule.Purpose.Firewall, NetUtils.TCP_PROTO);
|
||||
for (FirewallRuleVO firewallRule : firewallRules) {
|
||||
PortForwardingRuleVO pfRule = portForwardingRulesDao.findByNetworkAndPorts(networkId, firewallRule.getSourcePortStart(), firewallRule.getSourcePortEnd());
|
||||
if (firewallRule.getSourcePortStart() == CLUSTER_NODES_DEFAULT_START_SSH_PORT || (Objects.nonNull(pfRule) && pfRule.getDestinationPortStart() == DEFAULT_SSH_PORT) ) {
|
||||
if (Objects.equals(firewallRule.getSourcePortStart(), CLUSTER_NODES_DEFAULT_START_SSH_PORT) || (Objects.nonNull(pfRule) && pfRule.getDestinationPortStart() == DEFAULT_SSH_PORT) ) {
|
||||
rule = firewallRule;
|
||||
firewallService.revokeIngressFwRule(firewallRule.getId(), true);
|
||||
logger.debug("The SSH firewall rule {} with the id {} was revoked", firewallRule.getName(), firewallRule.getId());
|
||||
|
|
|
|||
|
|
@ -135,10 +135,14 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
|
|||
|
||||
// Remove existing SSH firewall rules
|
||||
FirewallRule firewallRule = removeSshFirewallRule(publicIp, network.getId());
|
||||
int existingFirewallRuleSourcePortEnd;
|
||||
if (firewallRule == null) {
|
||||
throw new ManagementServerException("Firewall rule for node SSH access can't be provisioned");
|
||||
logger.warn("SSH firewall rule not found for Kubernetes cluster: {}. It may have been manually deleted or modified.", kubernetesCluster.getName());
|
||||
existingFirewallRuleSourcePortEnd = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1;
|
||||
} else {
|
||||
existingFirewallRuleSourcePortEnd = firewallRule.getSourcePortEnd();
|
||||
}
|
||||
int existingFirewallRuleSourcePortEnd = firewallRule.getSourcePortEnd();
|
||||
|
||||
try {
|
||||
removePortForwardingRules(publicIp, network, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, existingFirewallRuleSourcePortEnd);
|
||||
} catch (ResourceUnavailableException e) {
|
||||
|
|
|
|||
|
|
@ -214,7 +214,8 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast
|
|||
maxconn = offering.getConcurrentConnections().toString();
|
||||
}
|
||||
LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lbs, elbVm.getPublicIpAddress(), _nicDao.getIpAddress(guestNetworkId, elbVm.getId()),
|
||||
elbVm.getPrivateIpAddress(), null, null, maxconn, offering.isKeepAliveEnabled());
|
||||
elbVm.getPrivateIpAddress(), null, null, maxconn, offering.isKeepAliveEnabled(),
|
||||
NetworkOrchestrationService.NETWORK_LB_HAPROXY_IDLE_TIMEOUT.value());
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, elbVm.getPrivateIpAddress());
|
||||
cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, elbVm.getInstanceName());
|
||||
//FIXME: why are we setting attributes directly? Ick!! There should be accessors and
|
||||
|
|
|
|||
|
|
@ -513,7 +513,8 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In
|
|||
}
|
||||
final LoadBalancerConfigCommand cmd =
|
||||
new LoadBalancerConfigCommand(lbs, guestNic.getIPv4Address(), guestNic.getIPv4Address(), internalLbVm.getPrivateIpAddress(), _itMgr.toNicTO(guestNicProfile,
|
||||
internalLbVm.getHypervisorType()), internalLbVm.getVpcId(), maxconn, offering.isKeepAliveEnabled());
|
||||
internalLbVm.getHypervisorType()), internalLbVm.getVpcId(), maxconn, offering.isKeepAliveEnabled(),
|
||||
NetworkOrchestrationService.NETWORK_LB_HAPROXY_IDLE_TIMEOUT.value());
|
||||
|
||||
cmd.lbStatsVisibility = _configDao.getValue(Config.NetworkLBHaproxyStatsVisbility.key());
|
||||
cmd.lbStatsUri = _configDao.getValue(Config.NetworkLBHaproxyStatsUri.key());
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ import com.vmware.nsx_policy.infra.tier_0s.LocaleServices;
|
|||
import com.vmware.nsx_policy.infra.tier_1s.nat.NatRules;
|
||||
import com.vmware.nsx_policy.model.ApiError;
|
||||
import com.vmware.nsx_policy.model.DhcpRelayConfig;
|
||||
import com.vmware.nsx_policy.model.EnforcementPoint;
|
||||
import com.vmware.nsx_policy.model.EnforcementPointListResult;
|
||||
import com.vmware.nsx_policy.model.Group;
|
||||
import com.vmware.nsx_policy.model.GroupListResult;
|
||||
|
|
@ -64,12 +65,13 @@ import com.vmware.nsx_policy.model.PathExpression;
|
|||
import com.vmware.nsx_policy.model.PolicyGroupMembersListResult;
|
||||
import com.vmware.nsx_policy.model.PolicyNatRule;
|
||||
import com.vmware.nsx_policy.model.PolicyNatRuleListResult;
|
||||
import com.vmware.nsx_policy.model.PolicyGroupMemberDetails;
|
||||
import com.vmware.nsx_policy.model.Rule;
|
||||
import com.vmware.nsx_policy.model.SecurityPolicy;
|
||||
import com.vmware.nsx_policy.model.Segment;
|
||||
import com.vmware.nsx_policy.model.SegmentSubnet;
|
||||
import com.vmware.nsx_policy.model.ServiceListResult;
|
||||
import com.vmware.nsx_policy.model.SiteListResult;
|
||||
import com.vmware.nsx_policy.model.Site;
|
||||
import com.vmware.nsx_policy.model.Tier1;
|
||||
import com.vmware.vapi.bindings.Service;
|
||||
import com.vmware.vapi.bindings.Structure;
|
||||
|
|
@ -83,6 +85,7 @@ import com.vmware.vapi.internal.protocol.RestProtocol;
|
|||
import com.vmware.vapi.internal.protocol.client.rest.authn.BasicAuthenticationAppender;
|
||||
import com.vmware.vapi.protocol.HttpConfiguration;
|
||||
import com.vmware.vapi.std.errors.Error;
|
||||
import com.vmware.vapi.std.errors.NotFound;
|
||||
import org.apache.cloudstack.resource.NsxLoadBalancerMember;
|
||||
import org.apache.cloudstack.resource.NsxNetworkRule;
|
||||
import org.apache.cloudstack.utils.NsxControllerUtils;
|
||||
|
|
@ -96,9 +99,12 @@ import java.util.List;
|
|||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.stream.Collectors.toSet;
|
||||
import static org.apache.cloudstack.utils.NsxControllerUtils.getServerPoolMemberName;
|
||||
import static org.apache.cloudstack.utils.NsxControllerUtils.getServerPoolName;
|
||||
import static org.apache.cloudstack.utils.NsxControllerUtils.getServiceName;
|
||||
|
|
@ -282,16 +288,18 @@ public class NsxApiClient {
|
|||
Tier1s tier1service = (Tier1s) nsxService.apply(Tier1s.class);
|
||||
return tier1service.get(tier1GatewayId);
|
||||
} catch (Exception e) {
|
||||
logger.debug(String.format("NSX Tier-1 gateway with name: %s not found", tier1GatewayId));
|
||||
logger.debug("NSX Tier-1 gateway with name: {} not found", tier1GatewayId);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private List<com.vmware.nsx_policy.model.LocaleServices> getTier0LocalServices(String tier0Gateway) {
|
||||
private Optional<com.vmware.nsx_policy.model.LocaleServices> findTier0LocalServices(String tier0Gateway) {
|
||||
try {
|
||||
LocaleServices tier0LocaleServices = (LocaleServices) nsxService.apply(LocaleServices.class);
|
||||
LocaleServicesListResult result = tier0LocaleServices.list(tier0Gateway, null, false, null, null, null, null);
|
||||
return result.getResults();
|
||||
LocaleServicesListResult result = tier0LocaleServices.list(tier0Gateway, null, false, null, 1L, null, null);
|
||||
return Optional.ofNullable(result.getResults())
|
||||
.filter(Predicate.not(List::isEmpty))
|
||||
.map(l -> l.get(0));
|
||||
} catch (Exception e) {
|
||||
throw new CloudRuntimeException(String.format("Failed to fetch locale services for tier gateway %s due to %s", tier0Gateway, e.getMessage()));
|
||||
}
|
||||
|
|
@ -302,10 +310,13 @@ public class NsxApiClient {
|
|||
*/
|
||||
private void createTier1LocaleServices(String tier1Id, String edgeCluster, String tier0Gateway) {
|
||||
try {
|
||||
List<com.vmware.nsx_policy.model.LocaleServices> localeServices = getTier0LocalServices(tier0Gateway);
|
||||
Optional<com.vmware.nsx_policy.model.LocaleServices> localeServices = findTier0LocalServices(tier0Gateway);
|
||||
if (localeServices.isEmpty()) {
|
||||
throw new CloudRuntimeException(String.format("Failed to find locale services for tier-0 gateway %s", tier0Gateway));
|
||||
}
|
||||
com.vmware.nsx_policy.infra.tier_1s.LocaleServices tier1LocalService = (com.vmware.nsx_policy.infra.tier_1s.LocaleServices) nsxService.apply(com.vmware.nsx_policy.infra.tier_1s.LocaleServices.class);
|
||||
com.vmware.nsx_policy.model.LocaleServices localeService = new com.vmware.nsx_policy.model.LocaleServices.Builder()
|
||||
.setEdgeClusterPath(localeServices.get(0).getEdgeClusterPath()).build();
|
||||
.setEdgeClusterPath(localeServices.get().getEdgeClusterPath()).build();
|
||||
tier1LocalService.patch(tier1Id, TIER_1_LOCALE_SERVICE_ID, localeService);
|
||||
} catch (Error error) {
|
||||
throw new CloudRuntimeException(String.format("Failed to instantiate tier-1 gateway %s in edge cluster %s", tier1Id, edgeCluster));
|
||||
|
|
@ -327,7 +338,7 @@ public class NsxApiClient {
|
|||
String tier0GatewayPath = TIER_0_GATEWAY_PATH_PREFIX + tier0Gateway;
|
||||
Tier1 tier1 = getTier1Gateway(name);
|
||||
if (tier1 != null) {
|
||||
logger.info(String.format("VPC network with name %s exists in NSX zone", name));
|
||||
logger.info("VPC network with name {} exists in NSX zone", name);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -359,7 +370,7 @@ public class NsxApiClient {
|
|||
com.vmware.nsx_policy.infra.tier_1s.LocaleServices localeService = (com.vmware.nsx_policy.infra.tier_1s.LocaleServices)
|
||||
nsxService.apply(com.vmware.nsx_policy.infra.tier_1s.LocaleServices.class);
|
||||
if (getTier1Gateway(tier1Id) == null) {
|
||||
logger.warn(String.format("The Tier 1 Gateway %s does not exist, cannot be removed", tier1Id));
|
||||
logger.warn("The Tier 1 Gateway {} does not exist, cannot be removed", tier1Id);
|
||||
return;
|
||||
}
|
||||
removeTier1GatewayNatRules(tier1Id);
|
||||
|
|
@ -370,13 +381,21 @@ public class NsxApiClient {
|
|||
|
||||
private void removeTier1GatewayNatRules(String tier1Id) {
|
||||
NatRules natRulesService = (NatRules) nsxService.apply(NatRules.class);
|
||||
PolicyNatRuleListResult result = natRulesService.list(tier1Id, NAT_ID, null, false, null, null, null, null);
|
||||
List<PolicyNatRule> natRules = result.getResults();
|
||||
List<PolicyNatRule> natRules = PagedFetcher.<PolicyNatRuleListResult, PolicyNatRule>withPageFetcher(
|
||||
cursor -> natRulesService.list(tier1Id, NAT_ID, cursor, false, null, null, null, null)
|
||||
).cursorExtractor(PolicyNatRuleListResult::getCursor)
|
||||
.itemsExtractor(PolicyNatRuleListResult::getResults)
|
||||
.itemsSetter((page, allItems) -> {
|
||||
page.setResults(allItems);
|
||||
page.setResultCount((long) allItems.size());
|
||||
})
|
||||
.fetchAll()
|
||||
.getResults();
|
||||
if (CollectionUtils.isEmpty(natRules)) {
|
||||
logger.debug(String.format("Didn't find any NAT rule to remove on the Tier 1 Gateway %s", tier1Id));
|
||||
logger.debug("Didn't find any NAT rule to remove on the Tier 1 Gateway {}", tier1Id);
|
||||
} else {
|
||||
for (PolicyNatRule natRule : natRules) {
|
||||
logger.debug(String.format("Removing NAT rule %s from Tier 1 Gateway %s", natRule.getId(), tier1Id));
|
||||
logger.debug("Removing NAT rule {} from Tier 1 Gateway {}", natRule.getId(), tier1Id);
|
||||
natRulesService.delete(tier1Id, NAT_ID, natRule.getId());
|
||||
}
|
||||
}
|
||||
|
|
@ -384,38 +403,45 @@ public class NsxApiClient {
|
|||
}
|
||||
|
||||
public String getDefaultSiteId() {
|
||||
SiteListResult sites = getSites();
|
||||
if (CollectionUtils.isEmpty(sites.getResults())) {
|
||||
Optional<Site> site = findFirstSite();
|
||||
if (site.isEmpty()) {
|
||||
String errorMsg = "No sites are found in the linked NSX infrastructure";
|
||||
logger.error(errorMsg);
|
||||
throw new CloudRuntimeException(errorMsg);
|
||||
}
|
||||
return sites.getResults().get(0).getId();
|
||||
return site.get().getId();
|
||||
}
|
||||
|
||||
protected SiteListResult getSites() {
|
||||
protected Optional<Site> findFirstSite() {
|
||||
try {
|
||||
Sites sites = (Sites) nsxService.apply(Sites.class);
|
||||
return sites.list(null, false, null, null, null, null);
|
||||
List<Site> siteList = sites.list(null, false, null, 1L, null, null)
|
||||
.getResults();
|
||||
return Optional.ofNullable(siteList)
|
||||
.filter(Predicate.not(List::isEmpty))
|
||||
.map(l -> l.get(0));
|
||||
} catch (Exception e) {
|
||||
throw new CloudRuntimeException(String.format("Failed to fetch sites list due to %s", e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
public String getDefaultEnforcementPointPath(String siteId) {
|
||||
EnforcementPointListResult epList = getEnforcementPoints(siteId);
|
||||
if (CollectionUtils.isEmpty(epList.getResults())) {
|
||||
Optional<EnforcementPoint> ep = findFirstEnforcementPoint(siteId);
|
||||
if (ep.isEmpty()) {
|
||||
String errorMsg = String.format("No enforcement points are found in the linked NSX infrastructure for site ID %s", siteId);
|
||||
logger.error(errorMsg);
|
||||
throw new CloudRuntimeException(errorMsg);
|
||||
}
|
||||
return epList.getResults().get(0).getPath();
|
||||
return ep.get().getPath();
|
||||
}
|
||||
|
||||
protected EnforcementPointListResult getEnforcementPoints(String siteId) {
|
||||
protected Optional<EnforcementPoint> findFirstEnforcementPoint(String siteId) {
|
||||
try {
|
||||
EnforcementPoints enforcementPoints = (EnforcementPoints) nsxService.apply(EnforcementPoints.class);
|
||||
return enforcementPoints.list(siteId, null, false, null, null, null, null);
|
||||
EnforcementPointListResult result = enforcementPoints.list(siteId, null, false, null, 1L, null, null);
|
||||
return Optional.ofNullable(result.getResults())
|
||||
.filter(Predicate.not(List::isEmpty))
|
||||
.map(l -> l.get(0));
|
||||
} catch (Exception e) {
|
||||
throw new CloudRuntimeException(String.format("Failed to fetch enforcement points due to %s", e.getMessage()));
|
||||
}
|
||||
|
|
@ -424,7 +450,15 @@ public class NsxApiClient {
|
|||
public TransportZoneListResult getTransportZones() {
|
||||
try {
|
||||
com.vmware.nsx.TransportZones transportZones = (com.vmware.nsx.TransportZones) nsxService.apply(com.vmware.nsx.TransportZones.class);
|
||||
return transportZones.list(null, null, true, null, null, null, null, null, TransportType.OVERLAY.name(), null);
|
||||
return PagedFetcher.<TransportZoneListResult, TransportZone>withPageFetcher(
|
||||
cursor -> transportZones.list(cursor, null, true, null, null, null, null, null, TransportType.OVERLAY.name(), null)
|
||||
).cursorExtractor(TransportZoneListResult::getCursor)
|
||||
.itemsExtractor(TransportZoneListResult::getResults)
|
||||
.itemsSetter((page, allItems) -> {
|
||||
page.setResults(allItems);
|
||||
page.setResultCount((long) allItems.size());
|
||||
})
|
||||
.fetchAll();
|
||||
} catch (Exception e) {
|
||||
throw new CloudRuntimeException(String.format("Failed to fetch transport zones due to %s", e.getMessage()));
|
||||
}
|
||||
|
|
@ -465,7 +499,7 @@ public class NsxApiClient {
|
|||
removeSegment(segmentName, zoneId);
|
||||
DhcpRelayConfigs dhcpRelayConfig = (DhcpRelayConfigs) nsxService.apply(DhcpRelayConfigs.class);
|
||||
String dhcpRelayConfigId = NsxControllerUtils.getNsxDhcpRelayConfigId(zoneId, domainId, accountId, vpcId, networkId);
|
||||
logger.debug(String.format("Removing the DHCP relay config with ID %s", dhcpRelayConfigId));
|
||||
logger.debug("Removing the DHCP relay config with ID {}", dhcpRelayConfigId);
|
||||
dhcpRelayConfig.delete(dhcpRelayConfigId);
|
||||
} catch (Error error) {
|
||||
ApiError ae = error.getData()._convertTo(ApiError.class);
|
||||
|
|
@ -476,7 +510,7 @@ public class NsxApiClient {
|
|||
}
|
||||
|
||||
protected void removeSegment(String segmentName, long zoneId) {
|
||||
logger.debug(String.format("Removing the segment with ID %s", segmentName));
|
||||
logger.debug("Removing the segment with ID {}", segmentName);
|
||||
Segments segmentService = (Segments) nsxService.apply(Segments.class);
|
||||
String errMsg = String.format("The segment with ID %s is not found, skipping removal", segmentName);
|
||||
try {
|
||||
|
|
@ -498,7 +532,7 @@ public class NsxApiClient {
|
|||
portCount = retrySegmentDeletion(segmentPortsService, segmentName, enforcementPointPath, zoneId);
|
||||
}
|
||||
if (portCount == 0L) {
|
||||
logger.debug(String.format("Removing the segment with ID %s", segmentName));
|
||||
logger.debug("Removing the segment with ID {}", segmentName);
|
||||
removeGroupForSegment(segmentName);
|
||||
segmentService.delete(segmentName);
|
||||
} else {
|
||||
|
|
@ -509,8 +543,18 @@ public class NsxApiClient {
|
|||
}
|
||||
|
||||
private PolicyGroupMembersListResult getSegmentPortList(SegmentPorts segmentPortsService, String segmentName, String enforcementPointPath) {
|
||||
return segmentPortsService.list(DEFAULT_DOMAIN, segmentName, null, enforcementPointPath,
|
||||
false, null, 50L, false, null);
|
||||
return PagedFetcher.
|
||||
<PolicyGroupMembersListResult, PolicyGroupMemberDetails>withPageFetcher(
|
||||
cursor -> segmentPortsService.list(DEFAULT_DOMAIN, segmentName, cursor, enforcementPointPath,
|
||||
false, null, 50L, false, null)
|
||||
)
|
||||
.cursorExtractor(PolicyGroupMembersListResult::getCursor)
|
||||
.itemsExtractor(PolicyGroupMembersListResult::getResults)
|
||||
.itemsSetter((page, allItems) -> {
|
||||
page.setResults(allItems);
|
||||
page.setResultCount((long) allItems.size());
|
||||
})
|
||||
.fetchAll();
|
||||
}
|
||||
|
||||
private Long retrySegmentDeletion(SegmentPorts segmentPortsService, String segmentName, String enforcementPointPath, long zoneId) {
|
||||
|
|
@ -546,7 +590,7 @@ public class NsxApiClient {
|
|||
.setEnabled(true)
|
||||
.build();
|
||||
|
||||
logger.debug(String.format("Creating NSX static NAT rule %s for tier-1 gateway %s (VPC: %s)", ruleName, tier1GatewayName, vpcName));
|
||||
logger.debug("Creating NSX static NAT rule {} for tier-1 gateway {} (VPC: {})", ruleName, tier1GatewayName, vpcName);
|
||||
natService.patch(tier1GatewayName, NatId.USER.name(), ruleName, rule);
|
||||
} catch (Error error) {
|
||||
ApiError ae = error.getData()._convertTo(ApiError.class);
|
||||
|
|
@ -582,8 +626,7 @@ public class NsxApiClient {
|
|||
natService.delete(tier1GatewayName, NatId.USER.name(), ruleName);
|
||||
}
|
||||
} catch (Error error) {
|
||||
String msg = String.format("Cannot find NAT rule with name %s: %s, skipping deletion", ruleName, error.getMessage());
|
||||
logger.debug(msg);
|
||||
logger.debug("Cannot find NAT rule with name {}: {}, skipping deletion", ruleName, error.getMessage());
|
||||
}
|
||||
|
||||
if (service == Network.Service.PortForwarding) {
|
||||
|
|
@ -595,7 +638,7 @@ public class NsxApiClient {
|
|||
String vmIp, String publicPort, String service) {
|
||||
try {
|
||||
NatRules natService = (NatRules) nsxService.apply(NatRules.class);
|
||||
logger.debug(String.format("Creating NSX Port-Forwarding NAT %s for network %s", ruleName, networkName));
|
||||
logger.debug("Creating NSX Port-Forwarding NAT {} for network {}", ruleName, networkName);
|
||||
PolicyNatRule rule = new PolicyNatRule.Builder()
|
||||
.setId(ruleName)
|
||||
.setDisplayName(ruleName)
|
||||
|
|
@ -656,9 +699,20 @@ public class NsxApiClient {
|
|||
public void createNsxLbServerPool(List<NsxLoadBalancerMember> memberList, String tier1GatewayName, String lbServerPoolName,
|
||||
String algorithm, String privatePort, String protocol) {
|
||||
try {
|
||||
String activeMonitorPath = getLbActiveMonitorPath(lbServerPoolName, privatePort, protocol);
|
||||
List<LBPoolMember> members = getLbPoolMembers(memberList, tier1GatewayName);
|
||||
LbPools lbPools = (LbPools) nsxService.apply(LbPools.class);
|
||||
Optional<LBPool> nsxLbServerPool = getNsxLbServerPool(lbPools, lbServerPoolName);
|
||||
// Skip if pool exists and members unchanged
|
||||
if (nsxLbServerPool.isPresent()) {
|
||||
List<LBPoolMember> existingMembers = nsxLbServerPool
|
||||
.map(LBPool::getMembers)
|
||||
.orElseGet(List::of);
|
||||
if (hasSamePoolMembers(existingMembers, members)) {
|
||||
logger.debug("Skipping patch for LB pool {} on Tier-1 {}: members unchanged", lbServerPoolName, tier1GatewayName);
|
||||
return;
|
||||
}
|
||||
}
|
||||
String activeMonitorPath = getLbActiveMonitorPath(lbServerPoolName, privatePort, protocol);
|
||||
LBPool lbPool = new LBPool.Builder()
|
||||
.setId(lbServerPoolName)
|
||||
.setDisplayName(lbServerPoolName)
|
||||
|
|
@ -676,9 +730,52 @@ public class NsxApiClient {
|
|||
}
|
||||
}
|
||||
|
||||
private Optional<LBPool> getNsxLbServerPool(LbPools lbPools, String lbServerPoolName) {
|
||||
try {
|
||||
return Optional.ofNullable(lbPools.get(lbServerPoolName));
|
||||
} catch (NotFound e) {
|
||||
logger.warn("Server Pool not found: {}", lbServerPoolName);
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
||||
|
||||
private boolean hasSamePoolMembers(List<LBPoolMember> existingMembers, List<LBPoolMember> membersUpdate) {
|
||||
Set<String> existingMembersSet = existingMembers.stream()
|
||||
.map(this::buildPoolMemberKey)
|
||||
.collect(toSet());
|
||||
Set<String> updateMembersSet = membersUpdate.stream()
|
||||
.map(this::buildPoolMemberKey)
|
||||
.collect(toSet());
|
||||
|
||||
return existingMembersSet.size() == updateMembersSet.size()
|
||||
&& existingMembersSet.containsAll(updateMembersSet);
|
||||
}
|
||||
|
||||
private String buildPoolMemberKey(LBPoolMember member) {
|
||||
return member.getIpAddress() + ':' + member.getPort() + ':' + member.getDisplayName();
|
||||
}
|
||||
|
||||
private String getLbActiveMonitorPath(String lbServerPoolName, String port, String protocol) {
|
||||
LbMonitorProfiles lbActiveMonitor = (LbMonitorProfiles) nsxService.apply(LbMonitorProfiles.class);
|
||||
String lbMonitorProfileId = getActiveMonitorProfileName(lbServerPoolName, port, protocol);
|
||||
Optional<Structure> monitorProfile = getMonitorProfile(lbActiveMonitor, lbMonitorProfileId);
|
||||
if (monitorProfile.isEmpty()) {
|
||||
patchMonitoringProfile(port, protocol, lbMonitorProfileId, lbActiveMonitor);
|
||||
monitorProfile = getMonitorProfile(lbActiveMonitor, lbMonitorProfileId);
|
||||
}
|
||||
return monitorProfile.map(structure -> structure._getDataValue().getField("path").toString()).orElse(null);
|
||||
}
|
||||
|
||||
private Optional<Structure> getMonitorProfile(LbMonitorProfiles lbActiveMonitor, String lbMonitorProfileId) {
|
||||
try {
|
||||
return Optional.ofNullable(lbActiveMonitor.get(lbMonitorProfileId));
|
||||
} catch (NotFound e) {
|
||||
logger.warn("LB Monitor Profile not found: {}", lbMonitorProfileId);
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
||||
|
||||
private void patchMonitoringProfile(String port, String protocol, String lbMonitorProfileId, LbMonitorProfiles lbActiveMonitor) {
|
||||
if ("TCP".equals(protocol.toUpperCase(Locale.ROOT))) {
|
||||
LBTcpMonitorProfile lbTcpMonitorProfile = new LBTcpMonitorProfile.Builder(TCP_MONITOR_PROFILE)
|
||||
.setDisplayName(lbMonitorProfileId)
|
||||
|
|
@ -691,14 +788,18 @@ public class NsxApiClient {
|
|||
.build();
|
||||
lbActiveMonitor.patch(lbMonitorProfileId, icmpMonitorProfile);
|
||||
}
|
||||
|
||||
LBMonitorProfileListResult listResult = listLBActiveMonitors(lbActiveMonitor);
|
||||
Optional<Structure> monitorProfile = listResult.getResults().stream().filter(profile -> profile._getDataValue().getField("id").toString().equals(lbMonitorProfileId)).findFirst();
|
||||
return monitorProfile.map(structure -> structure._getDataValue().getField("path").toString()).orElse(null);
|
||||
}
|
||||
|
||||
LBMonitorProfileListResult listLBActiveMonitors(LbMonitorProfiles lbActiveMonitor) {
|
||||
return lbActiveMonitor.list(null, false, null, null, null, null);
|
||||
return PagedFetcher.<LBMonitorProfileListResult, Structure>withPageFetcher(
|
||||
cursor -> lbActiveMonitor.list(cursor, false, null, null, null, null)
|
||||
).cursorExtractor(LBMonitorProfileListResult::getCursor)
|
||||
.itemsExtractor(LBMonitorProfileListResult::getResults)
|
||||
.itemsSetter((page, allItems) -> {
|
||||
page.setResults(allItems);
|
||||
page.setResultCount((long) allItems.size());
|
||||
})
|
||||
.fetchAll();
|
||||
}
|
||||
|
||||
public void createNsxLoadBalancer(String tier1GatewayName) {
|
||||
|
|
@ -735,7 +836,7 @@ public class NsxApiClient {
|
|||
String lbVirtualServerName = getVirtualServerName(tier1GatewayName, lbId);
|
||||
String lbServiceName = getLoadBalancerName(tier1GatewayName);
|
||||
LbVirtualServers lbVirtualServers = (LbVirtualServers) nsxService.apply(LbVirtualServers.class);
|
||||
if (Objects.nonNull(getLbVirtualServerService(lbVirtualServers, lbServiceName))) {
|
||||
if (Objects.nonNull(getLbVirtualServerService(lbVirtualServers, lbVirtualServerName))) {
|
||||
return;
|
||||
}
|
||||
LBVirtualServer lbVirtualServer = new LBVirtualServer.Builder()
|
||||
|
|
@ -763,7 +864,7 @@ public class NsxApiClient {
|
|||
return lbVirtualServer;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.debug(String.format("Found an LB virtual server named: %s on NSX", lbVSName));
|
||||
logger.debug("Found an LB virtual server named: {} on NSX", lbVSName);
|
||||
return null;
|
||||
}
|
||||
return null;
|
||||
|
|
@ -851,8 +952,15 @@ public class NsxApiClient {
|
|||
private String getLbProfileForProtocol(String protocol) {
|
||||
try {
|
||||
LbAppProfiles lbAppProfiles = (LbAppProfiles) nsxService.apply(LbAppProfiles.class);
|
||||
LBAppProfileListResult lbAppProfileListResults = lbAppProfiles.list(null, null,
|
||||
null, null, null, null);
|
||||
LBAppProfileListResult lbAppProfileListResults = PagedFetcher.<LBAppProfileListResult, Structure>withPageFetcher(
|
||||
cursor -> lbAppProfiles.list(cursor, null, null, null, null, null)
|
||||
).cursorExtractor(LBAppProfileListResult::getCursor)
|
||||
.itemsExtractor(LBAppProfileListResult::getResults)
|
||||
.itemsSetter((page, allItems) -> {
|
||||
page.setResults(allItems);
|
||||
page.setResultCount((long) allItems.size());
|
||||
})
|
||||
.fetchAll();
|
||||
Optional<Structure> appProfile = lbAppProfileListResults.getResults().stream().filter(profile -> profile._getDataValue().getField("path").toString().contains(protocol.toLowerCase(Locale.ROOT))).findFirst();
|
||||
return appProfile.map(structure -> structure._getDataValue().getField("path").toString()).orElse(null);
|
||||
} catch (Error error) {
|
||||
|
|
@ -868,7 +976,15 @@ public class NsxApiClient {
|
|||
Services service = (Services) nsxService.apply(Services.class);
|
||||
|
||||
// Find default service if present
|
||||
ServiceListResult serviceList = service.list(null, true, false, null, null, null, null);
|
||||
ServiceListResult serviceList = PagedFetcher.<ServiceListResult, com.vmware.nsx_policy.model.Service>withPageFetcher(
|
||||
cursor -> service.list(cursor, true, false, null, null, null, null)
|
||||
).cursorExtractor(ServiceListResult::getCursor)
|
||||
.itemsExtractor(ServiceListResult::getResults)
|
||||
.itemsSetter((page, allItems) -> {
|
||||
page.setResults(allItems);
|
||||
page.setResultCount((long) allItems.size());
|
||||
})
|
||||
.fetchAll();
|
||||
|
||||
List<com.vmware.nsx_policy.model.Service> services = serviceList.getResults();
|
||||
List<String> matchedDefaultSvc = services.parallelStream().filter(svc ->
|
||||
|
|
@ -1095,9 +1211,17 @@ public class NsxApiClient {
|
|||
|
||||
private List<Group> listNsxGroups() {
|
||||
try {
|
||||
Groups groups = (Groups) nsxService.apply(Groups.class);
|
||||
GroupListResult result = groups.list(DEFAULT_DOMAIN, null, false, null, null, null, null, null);
|
||||
return result.getResults();
|
||||
Groups groups = (Groups) nsxService.apply(Groups.class);
|
||||
GroupListResult result = PagedFetcher.<GroupListResult, Group>withPageFetcher(
|
||||
cursor -> groups.list(DEFAULT_DOMAIN, cursor, false, null, null, null, null, null)
|
||||
).cursorExtractor(GroupListResult::getCursor)
|
||||
.itemsExtractor(GroupListResult::getResults)
|
||||
.itemsSetter((page, allItems) -> {
|
||||
page.setResults(allItems);
|
||||
page.setResultCount((long) allItems.size());
|
||||
})
|
||||
.fetchAll();
|
||||
return result.getResults();
|
||||
} catch (Error error) {
|
||||
ApiError ae = error.getData()._convertTo(ApiError.class);
|
||||
String msg = String.format("Failed to list NSX groups, due to: %s", ae.getErrorMessage());
|
||||
|
|
|
|||
|
|
@ -0,0 +1,82 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.service;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
class PagedFetcher<R, T> {
|
||||
|
||||
private final Function<String, R> fetchPage;
|
||||
private Function<R, String> cursorExtractor;
|
||||
private Function<R, List<T>> itemsExtractor;
|
||||
private BiConsumer<R, List<T>> itemsSetter;
|
||||
|
||||
static <R, T> PagedFetcher<R, T> withPageFetcher(Function<String, R> pageFetcher) {
|
||||
return new PagedFetcher<>(pageFetcher);
|
||||
}
|
||||
|
||||
PagedFetcher<R, T> cursorExtractor(Function<R, String> cursorProvider) {
|
||||
this.cursorExtractor = cursorProvider;
|
||||
return this;
|
||||
}
|
||||
|
||||
PagedFetcher<R, T> itemsExtractor(Function<R, List<T>> resultsProvider) {
|
||||
this.itemsExtractor = resultsProvider;
|
||||
return this;
|
||||
}
|
||||
|
||||
PagedFetcher<R, T> itemsSetter(BiConsumer<R, List<T>> resultsSetter) {
|
||||
this.itemsSetter = resultsSetter;
|
||||
return this;
|
||||
}
|
||||
|
||||
private PagedFetcher(Function<String, R> pageFetcher) {
|
||||
this.fetchPage = pageFetcher;
|
||||
}
|
||||
|
||||
R fetchAll() {
|
||||
Objects.requireNonNull(cursorExtractor, "Cursor extractor must be set");
|
||||
Objects.requireNonNull(itemsExtractor, "Items extractor must be set");
|
||||
Objects.requireNonNull(itemsSetter, "Items setter must be set");
|
||||
|
||||
R firstPage = fetchPage.apply(null);
|
||||
String cursor = cursorExtractor.apply(firstPage);
|
||||
if (cursor == null || cursor.isEmpty()) {
|
||||
return firstPage;
|
||||
}
|
||||
|
||||
List<T> firstResults = itemsExtractor.apply(firstPage);
|
||||
List<T> allItems = firstResults != null
|
||||
? new ArrayList<>(firstResults)
|
||||
: new ArrayList<>();
|
||||
while (cursor != null && !cursor.isEmpty()) {
|
||||
R nextPage = fetchPage.apply(cursor);
|
||||
List<T> nextItems = itemsExtractor.apply(nextPage);
|
||||
if (nextItems != null && !nextItems.isEmpty()) {
|
||||
allItems.addAll(nextItems);
|
||||
}
|
||||
cursor = cursorExtractor.apply(nextPage);
|
||||
}
|
||||
|
||||
itemsSetter.accept(firstPage, allItems);
|
||||
return firstPage;
|
||||
}
|
||||
}
|
||||
|
|
@ -18,13 +18,32 @@ package org.apache.cloudstack.service;
|
|||
|
||||
import com.cloud.network.Network;
|
||||
import com.cloud.network.SDNProviderNetworkRule;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.vmware.nsx.cluster.Status;
|
||||
import com.vmware.nsx.model.ClusterStatus;
|
||||
import com.vmware.nsx.model.ControllerClusterStatus;
|
||||
import com.vmware.nsx_policy.infra.LbAppProfiles;
|
||||
import com.vmware.nsx_policy.infra.LbMonitorProfiles;
|
||||
import com.vmware.nsx_policy.infra.LbPools;
|
||||
import com.vmware.nsx_policy.infra.LbServices;
|
||||
import com.vmware.nsx_policy.infra.LbVirtualServers;
|
||||
import com.vmware.nsx_policy.infra.domains.Groups;
|
||||
import com.vmware.nsx_policy.model.ApiError;
|
||||
import com.vmware.nsx_policy.model.Group;
|
||||
import com.vmware.nsx_policy.model.LBAppProfileListResult;
|
||||
import com.vmware.nsx_policy.model.LBIcmpMonitorProfile;
|
||||
import com.vmware.nsx_policy.model.LBService;
|
||||
import com.vmware.nsx_policy.model.LBTcpMonitorProfile;
|
||||
import com.vmware.nsx_policy.model.LBPool;
|
||||
import com.vmware.nsx_policy.model.LBPoolMember;
|
||||
import com.vmware.nsx_policy.model.LBVirtualServer;
|
||||
import com.vmware.nsx_policy.model.PathExpression;
|
||||
import com.vmware.vapi.bindings.Service;
|
||||
import com.vmware.vapi.bindings.Structure;
|
||||
import com.vmware.vapi.std.errors.Error;
|
||||
import com.vmware.vapi.std.errors.NotFound;
|
||||
import org.apache.cloudstack.resource.NsxLoadBalancerMember;
|
||||
import org.apache.cloudstack.utils.NsxControllerUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
|
@ -36,8 +55,20 @@ import org.mockito.MockitoAnnotations;
|
|||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.junit.Assert.assertThrows;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.doThrow;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class NsxApiClientTest {
|
||||
|
||||
private static final String TIER_1_GATEWAY_NAME = "t1";
|
||||
|
||||
@Mock
|
||||
private Function<Class<? extends Service>, Service> nsxService;
|
||||
@Mock
|
||||
|
|
@ -108,4 +139,284 @@ public class NsxApiClientTest {
|
|||
Mockito.when(clusterStatus.getControlClusterStatus()).thenReturn(status);
|
||||
Assert.assertTrue(client.isNsxControllerActive());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolExistingMonitorProfileSkipsMonitorPatch() {
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, 1L);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(new NsxLoadBalancerMember(1L, "10.0.0.1", 80));
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LbMonitorProfiles lbMonitorProfiles = mockLbMonitorProfiles();
|
||||
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenThrow(new NotFound(null, null));
|
||||
|
||||
client.createNsxLbServerPool(memberList, TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "TCP");
|
||||
|
||||
verify(lbMonitorProfiles, never()).patch(anyString(), any(LBTcpMonitorProfile.class));
|
||||
verify(lbPools).patch(eq(lbServerPoolName), any(LBPool.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolMissingMonitorTCPProfilePerformsPatch() {
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, 1L);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(new NsxLoadBalancerMember(1L, "10.0.0.1", 80));
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LbMonitorProfiles lbMonitorProfiles = Mockito.mock(LbMonitorProfiles.class);
|
||||
Structure monitorStructure = Mockito.mock(Structure.class, Mockito.RETURNS_DEEP_STUBS);
|
||||
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(nsxService.apply(LbMonitorProfiles.class)).thenReturn(lbMonitorProfiles);
|
||||
Mockito.when(lbMonitorProfiles.get(anyString())).thenThrow(new NotFound(null, null)).thenReturn(monitorStructure);
|
||||
Mockito.when(monitorStructure._getDataValue().getField("path").toString()).thenReturn("/infra/lb-monitor-profiles/test");
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenThrow(new NotFound(null, null));
|
||||
|
||||
client.createNsxLbServerPool(memberList, TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "TCP");
|
||||
|
||||
verify(lbMonitorProfiles).patch(anyString(), any(LBTcpMonitorProfile.class));
|
||||
verify(lbPools).patch(eq(lbServerPoolName), any(LBPool.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolMissingMonitorUDPProfilePerformsPatch() {
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, 1L);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(new NsxLoadBalancerMember(1L, "10.0.0.1", 80));
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LbMonitorProfiles lbMonitorProfiles = Mockito.mock(LbMonitorProfiles.class);
|
||||
Structure monitorStructure = Mockito.mock(Structure.class, Mockito.RETURNS_DEEP_STUBS);
|
||||
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(nsxService.apply(LbMonitorProfiles.class)).thenReturn(lbMonitorProfiles);
|
||||
Mockito.when(lbMonitorProfiles.get(anyString())).thenThrow(new NotFound(null, null)).thenReturn(monitorStructure);
|
||||
Mockito.when(monitorStructure._getDataValue().getField("path").toString()).thenReturn("/infra/lb-monitor-profiles/test");
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenThrow(new NotFound(null, null));
|
||||
|
||||
client.createNsxLbServerPool(memberList, TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "UDP");
|
||||
|
||||
verify(lbMonitorProfiles).patch(anyString(), any(LBIcmpMonitorProfile.class));
|
||||
verify(lbPools).patch(eq(lbServerPoolName), any(LBPool.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolPoolExistsWithSameMembersSkipsPatch() {
|
||||
long lbId = 1L;
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, lbId);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(
|
||||
new NsxLoadBalancerMember(1L, "10.0.0.1", 80),
|
||||
new NsxLoadBalancerMember(2L, "10.0.0.2", 80)
|
||||
);
|
||||
List<LBPoolMember> sameMembers = List.of(
|
||||
createPoolMember(2L, "10.0.0.2", 80),
|
||||
createPoolMember(1L, "10.0.0.1", 80)
|
||||
);
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LBPool existingPool = Mockito.mock(LBPool.class);
|
||||
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenReturn(existingPool);
|
||||
Mockito.when(existingPool.getMembers()).thenReturn(sameMembers);
|
||||
|
||||
client.createNsxLbServerPool(memberList, TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "TCP");
|
||||
|
||||
verify(nsxService, never()).apply(LbMonitorProfiles.class);
|
||||
verify(lbPools, never()).patch(anyString(), any(LBPool.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolPoolExistsWithoutMembersAndEmptyUpdateSkipsPatch() {
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, 1L);
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LBPool existingPool = Mockito.mock(LBPool.class);
|
||||
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenReturn(existingPool);
|
||||
Mockito.when(existingPool.getMembers()).thenReturn(null);
|
||||
|
||||
client.createNsxLbServerPool(List.of(), TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "TCP");
|
||||
|
||||
verify(nsxService, never()).apply(LbMonitorProfiles.class);
|
||||
verify(lbPools, never()).patch(anyString(), any(LBPool.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolPoolExistsWithDuplicateMembersSkipsPatch() {
|
||||
long lbId = 1L;
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, lbId);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(
|
||||
new NsxLoadBalancerMember(1L, "10.0.0.1", 80),
|
||||
new NsxLoadBalancerMember(2L, "10.0.0.2", 80)
|
||||
);
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LBPool existingPool = Mockito.mock(LBPool.class);
|
||||
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenReturn(existingPool);
|
||||
Mockito.when(existingPool.getMembers()).thenReturn(List.of(
|
||||
createPoolMember(1L, "10.0.0.1", 80),
|
||||
createPoolMember(1L, "10.0.0.1", 80),
|
||||
createPoolMember(2L, "10.0.0.2", 80)
|
||||
));
|
||||
|
||||
client.createNsxLbServerPool(memberList, TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "TCP");
|
||||
|
||||
verify(nsxService, never()).apply(LbMonitorProfiles.class);
|
||||
verify(lbPools, never()).patch(anyString(), any(LBPool.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolPoolExistsWithDifferentMembersPerformsPatch() {
|
||||
long lbId = 1L;
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, lbId);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(
|
||||
new NsxLoadBalancerMember(1L, "10.0.0.1", 80),
|
||||
new NsxLoadBalancerMember(2L, "10.0.0.2", 80)
|
||||
);
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LBPool existingPool = Mockito.mock(LBPool.class);
|
||||
|
||||
mockLbMonitorProfiles();
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenReturn(existingPool);
|
||||
Mockito.when(existingPool.getMembers()).thenReturn(List.of(
|
||||
createPoolMember(1L, "10.0.0.10", 80)
|
||||
));
|
||||
|
||||
client.createNsxLbServerPool(memberList, TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "TCP");
|
||||
|
||||
verify(lbPools).patch(eq(lbServerPoolName), any(LBPool.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolPoolDoesNotExistPerformsPatch() {
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, 1L);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(new NsxLoadBalancerMember(1L, "10.0.0.1", 80));
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
|
||||
mockLbMonitorProfiles();
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenThrow(new NotFound(null, null));
|
||||
|
||||
client.createNsxLbServerPool(memberList, TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "TCP");
|
||||
|
||||
verify(lbPools).patch(eq(lbServerPoolName), any(LBPool.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateAndAddNsxLbVirtualServerVirtualServerAlreadyExistsSkipsPatch() {
|
||||
long lbId = 1L;
|
||||
String lbVirtualServerName = NsxControllerUtils.getVirtualServerName(TIER_1_GATEWAY_NAME, lbId);
|
||||
String lbServiceName = NsxControllerUtils.getLoadBalancerName(TIER_1_GATEWAY_NAME);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(new NsxLoadBalancerMember(1L, "10.0.0.1", 80));
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LbServices lbServices = Mockito.mock(LbServices.class);
|
||||
LbVirtualServers lbVirtualServers = Mockito.mock(LbVirtualServers.class);
|
||||
LBVirtualServer existingVs = Mockito.mock(LBVirtualServer.class);
|
||||
|
||||
mockLbMonitorProfiles();
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(nsxService.apply(LbServices.class)).thenReturn(lbServices);
|
||||
Mockito.when(nsxService.apply(LbVirtualServers.class)).thenReturn(lbVirtualServers);
|
||||
Mockito.when(lbPools.get(anyString())).thenThrow(new NotFound(null, null));
|
||||
Mockito.when(lbServices.get(anyString())).thenReturn(null);
|
||||
Mockito.when(lbVirtualServers.get(lbVirtualServerName)).thenReturn(existingVs);
|
||||
|
||||
client.createAndAddNsxLbVirtualServer(TIER_1_GATEWAY_NAME, lbId, "192.168.1.1", "443",
|
||||
memberList, "roundrobin", "TCP", "80");
|
||||
|
||||
verify(lbVirtualServers).get(lbVirtualServerName);
|
||||
verify(lbVirtualServers, never()).get(lbServiceName);
|
||||
verify(lbVirtualServers, never()).patch(anyString(), any(LBVirtualServer.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateAndAddNsxLbVirtualServerVirtualServerNotFoundPerformsPatch() {
|
||||
long lbId = 1L;
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, lbId);
|
||||
String lbVirtualServerName = NsxControllerUtils.getVirtualServerName(TIER_1_GATEWAY_NAME, lbId);
|
||||
String lbServiceName = NsxControllerUtils.getLoadBalancerName(TIER_1_GATEWAY_NAME);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(new NsxLoadBalancerMember(1L, "10.0.0.1", 80));
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
LBPool lbPool = Mockito.mock(LBPool.class);
|
||||
LbServices lbServices = Mockito.mock(LbServices.class);
|
||||
LBService lbService = Mockito.mock(LBService.class);
|
||||
LbVirtualServers lbVirtualServers = Mockito.mock(LbVirtualServers.class);
|
||||
|
||||
mockLbMonitorProfiles();
|
||||
mockLbAppProfiles();
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(nsxService.apply(LbServices.class)).thenReturn(lbServices);
|
||||
Mockito.when(nsxService.apply(LbVirtualServers.class)).thenReturn(lbVirtualServers);
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenThrow(new NotFound(null, null)).thenReturn(lbPool);
|
||||
Mockito.when(lbPool.getPath()).thenReturn("/infra/lb-pools/" + lbServerPoolName);
|
||||
Mockito.when(lbServices.get(lbServiceName)).thenReturn(lbService);
|
||||
Mockito.when(lbService.getPath()).thenReturn("/infra/lb-services/" + lbServiceName);
|
||||
Mockito.when(lbVirtualServers.get(lbVirtualServerName)).thenThrow(new NotFound(null, null));
|
||||
|
||||
client.createAndAddNsxLbVirtualServer(TIER_1_GATEWAY_NAME, lbId, "192.168.1.1", "443",
|
||||
memberList, "roundrobin", "TCP", "80");
|
||||
|
||||
verify(lbVirtualServers).get(lbVirtualServerName);
|
||||
verify(lbVirtualServers, never()).get(lbServiceName);
|
||||
verify(lbVirtualServers).patch(eq(lbVirtualServerName), any(LBVirtualServer.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateNsxLbServerPoolThrowsExceptionOnPatchError() {
|
||||
String lbServerPoolName = NsxControllerUtils.getServerPoolName(TIER_1_GATEWAY_NAME, 1L);
|
||||
List<NsxLoadBalancerMember> memberList = List.of(new NsxLoadBalancerMember(1L, "10.0.0.1", 80));
|
||||
|
||||
LbPools lbPools = Mockito.mock(LbPools.class);
|
||||
Structure errorData = Mockito.mock(Structure.class);
|
||||
ApiError apiError = new ApiError();
|
||||
apiError.setErrorData(errorData);
|
||||
|
||||
mockLbMonitorProfiles();
|
||||
Mockito.when(nsxService.apply(LbPools.class)).thenReturn(lbPools);
|
||||
Mockito.when(lbPools.get(lbServerPoolName)).thenThrow(new NotFound(null, null));
|
||||
when(errorData._convertTo(ApiError.class)).thenReturn(apiError);
|
||||
doThrow(new Error(List.of(), errorData)).when(lbPools).patch(eq(lbServerPoolName), any(LBPool.class));
|
||||
|
||||
CloudRuntimeException thrownException = assertThrows(CloudRuntimeException.class, () -> {
|
||||
client.createNsxLbServerPool(memberList, TIER_1_GATEWAY_NAME, lbServerPoolName, "roundrobin", "80", "TCP");
|
||||
});
|
||||
assertTrue(thrownException.getMessage().startsWith("Failed to create NSX LB server pool, due to"));
|
||||
}
|
||||
|
||||
private LbMonitorProfiles mockLbMonitorProfiles() {
|
||||
LbMonitorProfiles lbMonitorProfiles = Mockito.mock(LbMonitorProfiles.class);
|
||||
Structure monitorStructure = Mockito.mock(Structure.class, Mockito.RETURNS_DEEP_STUBS);
|
||||
|
||||
Mockito.when(nsxService.apply(LbMonitorProfiles.class)).thenReturn(lbMonitorProfiles);
|
||||
Mockito.when(lbMonitorProfiles.get(anyString())).thenReturn(monitorStructure);
|
||||
Mockito.when(monitorStructure._getDataValue().getField("path").toString()).thenReturn("/infra/lb-monitor-profiles/test");
|
||||
return lbMonitorProfiles;
|
||||
}
|
||||
|
||||
private void mockLbAppProfiles() {
|
||||
LbAppProfiles lbAppProfiles = Mockito.mock(LbAppProfiles.class);
|
||||
LBAppProfileListResult appProfileListResult = Mockito.mock(LBAppProfileListResult.class);
|
||||
Structure appProfile = Mockito.mock(Structure.class, Mockito.RETURNS_DEEP_STUBS);
|
||||
|
||||
Mockito.when(nsxService.apply(LbAppProfiles.class)).thenReturn(lbAppProfiles);
|
||||
Mockito.when(lbAppProfiles.list(null, null, null, null, null, null)).thenReturn(appProfileListResult);
|
||||
Mockito.when(appProfileListResult.getResults()).thenReturn(List.of(appProfile));
|
||||
Mockito.when(appProfile._getDataValue().getField("path").toString()).thenReturn("/infra/lb-app-profiles/default-tcp-profile");
|
||||
}
|
||||
|
||||
private LBPoolMember createPoolMember(long vmId, String ipAddress, int port) {
|
||||
return new LBPoolMember.Builder()
|
||||
.setDisplayName(NsxControllerUtils.getServerPoolMemberName(TIER_1_GATEWAY_NAME, vmId))
|
||||
.setIpAddress(ipAddress)
|
||||
.setPort(String.valueOf(port))
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,156 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.service;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertSame;
|
||||
|
||||
public class PagedFetcherTest {
|
||||
|
||||
private static class Page {
|
||||
private String cursor;
|
||||
private List<String> items;
|
||||
|
||||
Page(String cursor, List<String> items) {
|
||||
this.cursor = cursor;
|
||||
this.items = items;
|
||||
}
|
||||
|
||||
String getCursor() {
|
||||
return cursor;
|
||||
}
|
||||
|
||||
List<String> getItems() {
|
||||
return items;
|
||||
}
|
||||
|
||||
void setItems(List<String> items) {
|
||||
this.items = items;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFetchAllWhenThereIsNoPagination() {
|
||||
// given
|
||||
Page firstPage = new Page(null, new ArrayList<>(List.of("a", "b")));
|
||||
AtomicBoolean itemsSetterCalled = new AtomicBoolean(false);
|
||||
PagedFetcher<Page, String> fetcher = PagedFetcher.<Page, String>withPageFetcher(
|
||||
cursor -> {
|
||||
assertNull(cursor);
|
||||
return firstPage;
|
||||
})
|
||||
.cursorExtractor(Page::getCursor)
|
||||
.itemsExtractor(Page::getItems)
|
||||
.itemsSetter((page, items) -> itemsSetterCalled.set(true));
|
||||
|
||||
// when
|
||||
Page result = fetcher.fetchAll();
|
||||
|
||||
// then
|
||||
assertSame(firstPage, result);
|
||||
assertEquals(List.of("a", "b"), result.getItems());
|
||||
assertFalse("itemsSetter must not be called when there is no next page", itemsSetterCalled.get());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFetchAllWhenThereIsNoPaginationAndEmptyCursor() {
|
||||
// given
|
||||
Page firstPage = new Page("", new ArrayList<>(List.of("x")));
|
||||
|
||||
AtomicBoolean itemsSetterCalled = new AtomicBoolean(false);
|
||||
|
||||
PagedFetcher<Page, String> fetcher = PagedFetcher
|
||||
.<Page, String>withPageFetcher(cursor -> {
|
||||
assertNull(cursor);
|
||||
return firstPage;
|
||||
})
|
||||
.cursorExtractor(Page::getCursor)
|
||||
.itemsExtractor(Page::getItems)
|
||||
.itemsSetter((page, items) -> itemsSetterCalled.set(true));
|
||||
|
||||
// when
|
||||
Page result = fetcher.fetchAll();
|
||||
|
||||
// then
|
||||
assertSame(firstPage, result);
|
||||
assertEquals(List.of("x"), result.getItems());
|
||||
assertFalse("itemsSetter must not be called when there is no next page", itemsSetterCalled.get());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFetchAllWhenMultiPages() {
|
||||
// given
|
||||
Page page1 = new Page("c1", new ArrayList<>(List.of("p1a", "p1b")));
|
||||
Page page2 = new Page("c2", new ArrayList<>(List.of("p2a")));
|
||||
Page page3 = new Page(null, new ArrayList<>(List.of("p3a", "p3b")));
|
||||
|
||||
Map<String, Page> pagesByCursor = new HashMap<>();
|
||||
pagesByCursor.put(null, page1);
|
||||
pagesByCursor.put("c1", page2);
|
||||
pagesByCursor.put("c2", page3);
|
||||
|
||||
PagedFetcher<Page, String> fetcher = PagedFetcher
|
||||
.<Page, String>withPageFetcher(pagesByCursor::get)
|
||||
.cursorExtractor(Page::getCursor)
|
||||
.itemsExtractor(Page::getItems)
|
||||
.itemsSetter((page, items) -> {
|
||||
assertSame(page1, page);
|
||||
page.setItems(items);
|
||||
});
|
||||
|
||||
// when
|
||||
Page result = fetcher.fetchAll();
|
||||
|
||||
// then
|
||||
assertSame("Result must be the first page object", page1, result);
|
||||
assertEquals(List.of("p1a", "p1b", "p2a", "p3a", "p3b"), result.getItems());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFetchAllFirstPageItemsNullSecondWithItems() {
|
||||
// given
|
||||
Page page1 = new Page("next", null);
|
||||
Page page2 = new Page(null, new ArrayList<>(List.of("x", "y")));
|
||||
|
||||
Map<String, Page> pages = new HashMap<>();
|
||||
pages.put(null, page1);
|
||||
pages.put("next", page2);
|
||||
|
||||
PagedFetcher<Page, String> fetcher = PagedFetcher
|
||||
.<Page, String>withPageFetcher(pages::get)
|
||||
.cursorExtractor(Page::getCursor)
|
||||
.itemsExtractor(Page::getItems)
|
||||
.itemsSetter(Page::setItems);
|
||||
|
||||
// when
|
||||
Page result = fetcher.fetchAll();
|
||||
|
||||
// then
|
||||
assertSame(page1, result);
|
||||
assertEquals(List.of("x", "y"), result.getItems());
|
||||
}
|
||||
}
|
||||
|
|
@ -1162,21 +1162,19 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C
|
|||
|
||||
long reservedIpAddressesAmount = ipDedicatedAccountId == null ? 1L : 0L;
|
||||
try (CheckedReservation publicIpAddressReservation = new CheckedReservation(account, Resource.ResourceType.public_ip, reservedIpAddressesAmount, reservationDao, _resourceLimitMgr)) {
|
||||
|
||||
List<AccountVlanMapVO> maps = _accountVlanMapDao.listAccountVlanMapsByVlan(ipVO.getVlanId());
|
||||
ipVO.setAllocatedTime(new Date());
|
||||
ipVO.setAllocatedToAccountId(account.getAccountId());
|
||||
ipVO.setAllocatedInDomainId(account.getDomainId());
|
||||
ipVO.setState(State.Reserved);
|
||||
if (displayIp != null) {
|
||||
ipVO.setDisplay(displayIp);
|
||||
}
|
||||
ipVO = _ipAddressDao.persist(ipVO);
|
||||
if (reservedIpAddressesAmount > 0) {
|
||||
_resourceLimitMgr.incrementResourceCount(account.getId(), Resource.ResourceType.public_ip);
|
||||
}
|
||||
return ipVO;
|
||||
|
||||
List<AccountVlanMapVO> maps = _accountVlanMapDao.listAccountVlanMapsByVlan(ipVO.getVlanId());
|
||||
ipVO.setAllocatedTime(new Date());
|
||||
ipVO.setAllocatedToAccountId(account.getAccountId());
|
||||
ipVO.setAllocatedInDomainId(account.getDomainId());
|
||||
ipVO.setState(State.Reserved);
|
||||
if (displayIp != null) {
|
||||
ipVO.setDisplay(displayIp);
|
||||
}
|
||||
ipVO = _ipAddressDao.persist(ipVO);
|
||||
if (reservedIpAddressesAmount > 0) {
|
||||
_resourceLimitMgr.incrementResourceCount(account.getId(), Resource.ResourceType.public_ip);
|
||||
}
|
||||
return ipVO;
|
||||
} catch (ResourceAllocationException ex) {
|
||||
logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + account);
|
||||
throw new AccountLimitException("Maximum number of public IP addresses for account: " + account.getAccountName() + " has been exceeded.");
|
||||
|
|
|
|||
|
|
@ -396,7 +396,8 @@ public class CommandSetupHelper {
|
|||
}
|
||||
|
||||
final LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lbs, routerPublicIp, _routerControlHelper.getRouterIpInNetwork(guestNetworkId, router.getId()),
|
||||
router.getPrivateIpAddress(), _itMgr.toNicTO(nicProfile, router.getHypervisorType()), router.getVpcId(), maxconn, offering.isKeepAliveEnabled());
|
||||
router.getPrivateIpAddress(), _itMgr.toNicTO(nicProfile, router.getHypervisorType()), router.getVpcId(), maxconn, offering.isKeepAliveEnabled(),
|
||||
NetworkOrchestrationService.NETWORK_LB_HAPROXY_IDLE_TIMEOUT.value());
|
||||
|
||||
cmd.lbStatsVisibility = _configDao.getValue(Config.NetworkLBHaproxyStatsVisbility.key());
|
||||
cmd.lbStatsUri = _configDao.getValue(Config.NetworkLBHaproxyStatsUri.key());
|
||||
|
|
|
|||
|
|
@ -1690,7 +1690,7 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
|
|||
} else {
|
||||
loadBalancingData.append("maxconn=").append(offering.getConcurrentConnections());
|
||||
}
|
||||
|
||||
loadBalancingData.append(",idletimeout=").append(NetworkOrchestrationService.NETWORK_LB_HAPROXY_IDLE_TIMEOUT.value());
|
||||
loadBalancingData.append(",sourcePortStart=").append(firewallRuleVO.getSourcePortStart())
|
||||
.append(",sourcePortEnd=").append(firewallRuleVO.getSourcePortEnd());
|
||||
if (firewallRuleVO instanceof LoadBalancerVO) {
|
||||
|
|
|
|||
|
|
@ -1670,25 +1670,25 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis
|
|||
vpc.setUseRouterIpResolver(Boolean.TRUE.equals(useVrIpResolver));
|
||||
|
||||
try (CheckedReservation vpcReservation = new CheckedReservation(owner, ResourceType.vpc, null, null, 1L, reservationDao, _resourceLimitMgr)) {
|
||||
if (vpc.getCidr() == null && cidrSize != null) {
|
||||
// Allocate a CIDR for VPC
|
||||
Ipv4GuestSubnetNetworkMap subnet = routedIpv4Manager.getOrCreateIpv4SubnetForVpc(vpc, cidrSize);
|
||||
if (subnet != null) {
|
||||
vpc.setCidr(subnet.getSubnet());
|
||||
} else {
|
||||
throw new CloudRuntimeException("Failed to allocate a CIDR with requested size for VPC.");
|
||||
if (vpc.getCidr() == null && cidrSize != null) {
|
||||
// Allocate a CIDR for VPC
|
||||
Ipv4GuestSubnetNetworkMap subnet = routedIpv4Manager.getOrCreateIpv4SubnetForVpc(vpc, cidrSize);
|
||||
if (subnet != null) {
|
||||
vpc.setCidr(subnet.getSubnet());
|
||||
} else {
|
||||
throw new CloudRuntimeException("Failed to allocate a CIDR with requested size for VPC.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Vpc newVpc = createVpc(displayVpc, vpc);
|
||||
// assign Ipv4 subnet to Routed VPC
|
||||
if (routedIpv4Manager.isRoutedVpc(vpc)) {
|
||||
routedIpv4Manager.assignIpv4SubnetToVpc(newVpc);
|
||||
}
|
||||
if (CollectionUtils.isNotEmpty(bgpPeerIds)) {
|
||||
routedIpv4Manager.persistBgpPeersForVpc(newVpc.getId(), bgpPeerIds);
|
||||
}
|
||||
return newVpc;
|
||||
Vpc newVpc = createVpc(displayVpc, vpc);
|
||||
// assign Ipv4 subnet to Routed VPC
|
||||
if (routedIpv4Manager.isRoutedVpc(vpc)) {
|
||||
routedIpv4Manager.assignIpv4SubnetToVpc(newVpc);
|
||||
}
|
||||
if (CollectionUtils.isNotEmpty(bgpPeerIds)) {
|
||||
routedIpv4Manager.persistBgpPeersForVpc(newVpc.getId(), bgpPeerIds);
|
||||
}
|
||||
return newVpc;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -277,40 +277,39 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C
|
|||
}
|
||||
|
||||
try (CheckedReservation projectReservation = new CheckedReservation(owner, ResourceType.project, null, null, 1L, reservationDao, _resourceLimitMgr)) {
|
||||
final Account ownerFinal = owner;
|
||||
User finalUser = user;
|
||||
Project project = Transaction.execute(new TransactionCallback<Project>() {
|
||||
@Override
|
||||
public Project doInTransaction(TransactionStatus status) {
|
||||
|
||||
final Account ownerFinal = owner;
|
||||
User finalUser = user;
|
||||
Project project = Transaction.execute(new TransactionCallback<Project>() {
|
||||
@Override
|
||||
public Project doInTransaction(TransactionStatus status) {
|
||||
//Create an account associated with the project
|
||||
StringBuilder acctNm = new StringBuilder("PrjAcct-");
|
||||
acctNm.append(name).append("-").append(ownerFinal.getDomainId());
|
||||
|
||||
//Create an account associated with the project
|
||||
StringBuilder acctNm = new StringBuilder("PrjAcct-");
|
||||
acctNm.append(name).append("-").append(ownerFinal.getDomainId());
|
||||
Account projectAccount = _accountMgr.createAccount(acctNm.toString(), Account.Type.PROJECT, null, domainId, null, null, UUID.randomUUID().toString());
|
||||
|
||||
Account projectAccount = _accountMgr.createAccount(acctNm.toString(), Account.Type.PROJECT, null, domainId, null, null, UUID.randomUUID().toString());
|
||||
Project project = _projectDao.persist(new ProjectVO(name, displayText, ownerFinal.getDomainId(), projectAccount.getId()));
|
||||
|
||||
Project project = _projectDao.persist(new ProjectVO(name, displayText, ownerFinal.getDomainId(), projectAccount.getId()));
|
||||
|
||||
//assign owner to the project
|
||||
assignAccountToProject(project, ownerFinal.getId(), ProjectAccount.Role.Admin,
|
||||
Optional.ofNullable(finalUser).map(User::getId).orElse(null), null);
|
||||
//assign owner to the project
|
||||
assignAccountToProject(project, ownerFinal.getId(), ProjectAccount.Role.Admin,
|
||||
Optional.ofNullable(finalUser).map(User::getId).orElse(null), null);
|
||||
|
||||
if (project != null) {
|
||||
CallContext.current().setEventDetails("Project ID: " + project.getUuid());
|
||||
CallContext.current().putContextParameter(Project.class, project.getUuid());
|
||||
}
|
||||
|
||||
//Increment resource count
|
||||
_resourceLimitMgr.incrementResourceCount(ownerFinal.getId(), ResourceType.project);
|
||||
//Increment resource count
|
||||
_resourceLimitMgr.incrementResourceCount(ownerFinal.getId(), ResourceType.project);
|
||||
|
||||
return project;
|
||||
}
|
||||
});
|
||||
return project;
|
||||
}
|
||||
});
|
||||
|
||||
messageBus.publish(_name, ProjectManager.MESSAGE_CREATE_TUNGSTEN_PROJECT_EVENT, PublishScope.LOCAL, project);
|
||||
messageBus.publish(_name, ProjectManager.MESSAGE_CREATE_TUNGSTEN_PROJECT_EVENT, PublishScope.LOCAL, project);
|
||||
|
||||
return project;
|
||||
return project;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -604,16 +603,16 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C
|
|||
|
||||
boolean shouldIncrementResourceCount = projectRole != null && Role.Admin == projectRole;
|
||||
try (CheckedReservation cr = new CheckedReservation(userAccount, ResourceType.project, shouldIncrementResourceCount ? 1L : 0L, reservationDao, _resourceLimitMgr)) {
|
||||
if (assignUserToProject(project, user.getId(), user.getAccountId(), projectRole,
|
||||
Optional.ofNullable(role).map(ProjectRole::getId).orElse(null)) != null) {
|
||||
if (shouldIncrementResourceCount) {
|
||||
_resourceLimitMgr.incrementResourceCount(userAccount.getId(), ResourceType.project);
|
||||
if (assignUserToProject(project, user.getId(), user.getAccountId(), projectRole,
|
||||
Optional.ofNullable(role).map(ProjectRole::getId).orElse(null)) != null) {
|
||||
if (shouldIncrementResourceCount) {
|
||||
_resourceLimitMgr.incrementResourceCount(userAccount.getId(), ResourceType.project);
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
logger.warn("Failed to add user to project: {}", project);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
logger.warn("Failed to add user to project: {}", project);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -671,13 +670,13 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C
|
|||
boolean shouldIncrementResourceCount = Role.Admin == newAccRole;
|
||||
|
||||
try (CheckedReservation checkedReservation = new CheckedReservation(account, ResourceType.project, shouldIncrementResourceCount ? 1L : 0L, reservationDao, _resourceLimitMgr)) {
|
||||
futureOwner.setAccountRole(newAccRole);
|
||||
_projectAccountDao.update(futureOwner.getId(), futureOwner);
|
||||
if (shouldIncrementResourceCount) {
|
||||
_resourceLimitMgr.incrementResourceCount(accountId, ResourceType.project);
|
||||
} else {
|
||||
_resourceLimitMgr.decrementResourceCount(accountId, ResourceType.project);
|
||||
}
|
||||
futureOwner.setAccountRole(newAccRole);
|
||||
_projectAccountDao.update(futureOwner.getId(), futureOwner);
|
||||
if (shouldIncrementResourceCount) {
|
||||
_resourceLimitMgr.incrementResourceCount(accountId, ResourceType.project);
|
||||
} else {
|
||||
_resourceLimitMgr.decrementResourceCount(accountId, ResourceType.project);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -721,16 +720,16 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C
|
|||
}
|
||||
|
||||
try (CheckedReservation checkedReservation = new CheckedReservation(futureOwnerAccount, ResourceType.project, null, null, 1L, reservationDao, _resourceLimitMgr)) {
|
||||
//unset the role for the old owner
|
||||
ProjectAccountVO currentOwner = _projectAccountDao.findByProjectIdAccountId(projectId, currentOwnerAccount.getId());
|
||||
currentOwner.setAccountRole(Role.Regular);
|
||||
_projectAccountDao.update(currentOwner.getId(), currentOwner);
|
||||
_resourceLimitMgr.decrementResourceCount(currentOwnerAccount.getId(), ResourceType.project);
|
||||
//unset the role for the old owner
|
||||
ProjectAccountVO currentOwner = _projectAccountDao.findByProjectIdAccountId(projectId, currentOwnerAccount.getId());
|
||||
currentOwner.setAccountRole(Role.Regular);
|
||||
_projectAccountDao.update(currentOwner.getId(), currentOwner);
|
||||
_resourceLimitMgr.decrementResourceCount(currentOwnerAccount.getId(), ResourceType.project);
|
||||
|
||||
//set new owner
|
||||
futureOwner.setAccountRole(Role.Admin);
|
||||
_projectAccountDao.update(futureOwner.getId(), futureOwner);
|
||||
_resourceLimitMgr.incrementResourceCount(futureOwnerAccount.getId(), ResourceType.project);
|
||||
//set new owner
|
||||
futureOwner.setAccountRole(Role.Admin);
|
||||
_projectAccountDao.update(futureOwner.getId(), futureOwner);
|
||||
_resourceLimitMgr.incrementResourceCount(futureOwnerAccount.getId(), ResourceType.project);
|
||||
}
|
||||
} else {
|
||||
logger.trace("Future owner {}is already the owner of the project {}", newOwnerName, project);
|
||||
|
|
@ -877,16 +876,16 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager, C
|
|||
|
||||
boolean shouldIncrementResourceCount = projectRoleType != null && Role.Admin == projectRoleType;
|
||||
try (CheckedReservation cr = new CheckedReservation(account, ResourceType.project, shouldIncrementResourceCount ? 1L : 0L, reservationDao, _resourceLimitMgr)) {
|
||||
if (assignAccountToProject(project, account.getId(), projectRoleType, null,
|
||||
Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) {
|
||||
if (shouldIncrementResourceCount) {
|
||||
_resourceLimitMgr.incrementResourceCount(account.getId(), ResourceType.project);
|
||||
if (assignAccountToProject(project, account.getId(), projectRoleType, null,
|
||||
Optional.ofNullable(projectRole).map(ProjectRole::getId).orElse(null)) != null) {
|
||||
if (shouldIncrementResourceCount) {
|
||||
_resourceLimitMgr.incrementResourceCount(account.getId(), ResourceType.project);
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
logger.warn("Failed to add account {} to project {}", accountName, project);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
logger.warn("Failed to add account {} to project {}", accountName, project);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1172,7 +1172,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||
@Override
|
||||
public boolean deleteHost(final long hostId, final boolean isForced, final boolean isForceDeleteStorage) {
|
||||
try {
|
||||
final Boolean result = propagateResourceEvent(hostId, ResourceState.Event.DeleteHost);
|
||||
final Boolean result = propagateResourceEvent(hostId, ResourceState.Event.DeleteHost, isForced, isForceDeleteStorage);
|
||||
if (result != null) {
|
||||
return result;
|
||||
}
|
||||
|
|
@ -3904,13 +3904,18 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean executeUserRequest(final long hostId, final ResourceState.Event event) {
|
||||
public boolean executeUserRequest(final long hostId, final ResourceState.Event event) throws AgentUnavailableException {
|
||||
return executeUserRequest(hostId, event, false, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeUserRequest(final long hostId, final ResourceState.Event event, final boolean isForced, final boolean isForceDeleteStorage) throws AgentUnavailableException {
|
||||
if (event == ResourceState.Event.AdminAskMaintenance) {
|
||||
return doMaintain(hostId);
|
||||
} else if (event == ResourceState.Event.AdminCancelMaintenance) {
|
||||
return doCancelMaintenance(hostId);
|
||||
} else if (event == ResourceState.Event.DeleteHost) {
|
||||
return doDeleteHost(hostId, false, false);
|
||||
return doDeleteHost(hostId, isForced, isForceDeleteStorage);
|
||||
} else if (event == ResourceState.Event.Unmanaged) {
|
||||
return doUmanageHost(hostId);
|
||||
} else if (event == ResourceState.Event.UpdatePassword) {
|
||||
|
|
@ -4030,6 +4035,10 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||
}
|
||||
|
||||
public Boolean propagateResourceEvent(final long agentId, final ResourceState.Event event) throws AgentUnavailableException {
|
||||
return propagateResourceEvent(agentId, event, false, false);
|
||||
}
|
||||
|
||||
public Boolean propagateResourceEvent(final long agentId, final ResourceState.Event event, final boolean isForced, final boolean isForceDeleteStorage) throws AgentUnavailableException {
|
||||
final String msPeer = getPeerName(agentId);
|
||||
if (msPeer == null) {
|
||||
return null;
|
||||
|
|
@ -4037,7 +4046,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||
|
||||
logger.debug("Propagating resource request event:" + event.toString() + " to agent:" + agentId);
|
||||
final Command[] cmds = new Command[1];
|
||||
cmds[0] = new PropagateResourceEventCommand(agentId, event);
|
||||
cmds[0] = new PropagateResourceEventCommand(agentId, event, isForced, isForceDeleteStorage);
|
||||
|
||||
final String AnsStr = _clusterMgr.execute(msPeer, agentId, _gson.toJson(cmds), true);
|
||||
if (AnsStr == null) {
|
||||
|
|
@ -4050,6 +4059,13 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
|
|||
logger.debug("Result for agent change is " + answers[0].getResult());
|
||||
}
|
||||
|
||||
if (!answers[0].getResult()) {
|
||||
final String details = answers[0].getDetails();
|
||||
if (details != null && !details.isEmpty()) {
|
||||
throw new CloudRuntimeException(String.format("Failed to propagate resource event %s for host %d on peer %s: %s", event, agentId, msPeer, details));
|
||||
}
|
||||
}
|
||||
|
||||
return answers[0].getResult();
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -361,7 +361,7 @@ public class ImageStoreUploadMonitorImpl extends ManagerBase implements ImageSto
|
|||
boolean success = true;
|
||||
Long currentSize = answer.getVirtualSize() != 0 ? answer.getVirtualSize() : answer.getPhysicalSize();
|
||||
Long lastSize = volume.getSize() != null ? volume.getSize() : 0L;
|
||||
if (!checkAndUpdateSecondaryStorageResourceLimit(volume.getAccountId(), volume.getSize(), currentSize)) {
|
||||
if (!checkAndUpdateSecondaryStorageResourceLimit(volume.getAccountId(), lastSize, currentSize)) {
|
||||
volumeDataStore.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOAD_ERROR);
|
||||
volumeDataStore.setState(State.Failed);
|
||||
volumeDataStore.setErrorString("Storage Limit Reached");
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
// under the License.
|
||||
package com.cloud.storage;
|
||||
|
||||
import static com.cloud.configuration.ConfigurationManagerImpl.SystemVMUseLocalStorage;
|
||||
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
|
||||
|
||||
import java.io.UnsupportedEncodingException;
|
||||
|
|
@ -149,6 +150,7 @@ import org.apache.commons.collections.CollectionUtils;
|
|||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.lang.time.DateUtils;
|
||||
import org.apache.commons.lang3.ArrayUtils;
|
||||
import org.apache.commons.lang3.BooleanUtils;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
|
|
@ -181,7 +183,6 @@ import com.cloud.capacity.dao.CapacityDao;
|
|||
import com.cloud.cluster.ClusterManagerListener;
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.configuration.ConfigurationManager;
|
||||
import com.cloud.configuration.ConfigurationManagerImpl;
|
||||
import com.cloud.configuration.Resource.ResourceType;
|
||||
import com.cloud.cpu.CPU;
|
||||
import com.cloud.dc.ClusterVO;
|
||||
|
|
@ -822,6 +823,10 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
return createLocalStorage(host, pInfo);
|
||||
}
|
||||
|
||||
private boolean isLocalStorageEnabledForZone(DataCenterVO zone) {
|
||||
return zone.isLocalStorageEnabled() || BooleanUtils.toBoolean(SystemVMUseLocalStorage.valueIn(zone.getId()));
|
||||
}
|
||||
|
||||
@DB
|
||||
@Override
|
||||
public DataStore createLocalStorage(Host host, StoragePoolInfo pInfo) throws ConnectionException {
|
||||
|
|
@ -829,12 +834,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
if (dc == null) {
|
||||
return null;
|
||||
}
|
||||
boolean useLocalStorageForSystemVM = false;
|
||||
Boolean isLocal = ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dc.getId());
|
||||
if (isLocal != null) {
|
||||
useLocalStorageForSystemVM = isLocal.booleanValue();
|
||||
}
|
||||
if (!(dc.isLocalStorageEnabled() || useLocalStorageForSystemVM)) {
|
||||
if (!isLocalStorageEnabledForZone(dc)) {
|
||||
return null;
|
||||
}
|
||||
DataStore store = null;
|
||||
|
|
@ -1038,6 +1038,10 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
|
|||
if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getId())) {
|
||||
throw new PermissionDeniedException(String.format("Cannot perform this operation, Zone is currently disabled: %s", zone));
|
||||
}
|
||||
// Check if it's local storage and if it's enabled on the zone
|
||||
if (isFileScheme && !isLocalStorageEnabledForZone(zone)) {
|
||||
throw new InvalidParameterValueException("Local storage is not enabled for zone: " + zone);
|
||||
}
|
||||
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
params.put("zoneId", zone.getId());
|
||||
|
|
|
|||
|
|
@ -2806,13 +2806,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
|
||||
try (CheckedReservation primaryStorageReservation = new CheckedReservation(owner, ResourceType.primary_storage, resourceLimitStorageTags, requiredPrimaryStorageSpace, reservationDao, _resourceLimitMgr)) {
|
||||
|
||||
_jobMgr.updateAsyncJobAttachment(job.getId(), "Volume", volumeId);
|
||||
_jobMgr.updateAsyncJobAttachment(job.getId(), "Volume", volumeId);
|
||||
|
||||
if (asyncExecutionContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
|
||||
return safelyOrchestrateAttachVolume(vmId, volumeId, deviceId);
|
||||
} else {
|
||||
return getVolumeAttachJobResult(vmId, volumeId, deviceId);
|
||||
}
|
||||
if (asyncExecutionContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
|
||||
return safelyOrchestrateAttachVolume(vmId, volumeId, deviceId);
|
||||
} else {
|
||||
return getVolumeAttachJobResult(vmId, volumeId, deviceId);
|
||||
}
|
||||
|
||||
} catch (ResourceAllocationException e) {
|
||||
logger.error("primary storage resource limit check failed", e);
|
||||
|
|
@ -4445,12 +4445,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
try {
|
||||
_resourceLimitMgr.checkVolumeResourceLimit(newAccount, true, volume.getSize(), _diskOfferingDao.findById(volume.getDiskOfferingId()), reservations);
|
||||
|
||||
Transaction.execute(new TransactionCallbackNoReturn() {
|
||||
@Override
|
||||
public void doInTransactionWithoutResult(TransactionStatus status) {
|
||||
updateVolumeAccount(oldAccount, volume, newAccount);
|
||||
}
|
||||
});
|
||||
Transaction.execute(new TransactionCallbackNoReturn() {
|
||||
@Override
|
||||
public void doInTransactionWithoutResult(TransactionStatus status) {
|
||||
updateVolumeAccount(oldAccount, volume, newAccount);
|
||||
}
|
||||
});
|
||||
|
||||
return volume;
|
||||
|
||||
|
|
|
|||
|
|
@ -280,7 +280,7 @@ public class DownloadListener implements Listener {
|
|||
}
|
||||
|
||||
private Long getSizeFromDB() {
|
||||
Long lastSize = 0L;
|
||||
Long lastSize = null;
|
||||
if (DataObjectType.TEMPLATE.equals(object.getType())) {
|
||||
TemplateDataStoreVO t = _templateDataStoreDao.findByStoreTemplate(object.getDataStore().getId(), object.getId());
|
||||
lastSize = t.getSize();
|
||||
|
|
@ -288,7 +288,7 @@ public class DownloadListener implements Listener {
|
|||
VolumeVO v = _volumeDao.findById(object.getId());
|
||||
lastSize = v.getSize();
|
||||
}
|
||||
return lastSize;
|
||||
return lastSize == null ? 0L : lastSize;
|
||||
}
|
||||
|
||||
private Boolean checkAndUpdateResourceLimits(DownloadAnswer answer) {
|
||||
|
|
|
|||
|
|
@ -35,7 +35,6 @@ import javax.inject.Inject;
|
|||
import javax.naming.ConfigurationException;
|
||||
|
||||
import org.apache.cloudstack.acl.SecurityChecker;
|
||||
import com.cloud.api.ApiDBUtils;
|
||||
import org.apache.cloudstack.annotation.AnnotationService;
|
||||
import org.apache.cloudstack.annotation.dao.AnnotationDao;
|
||||
import org.apache.cloudstack.api.ApiCommandResourceType;
|
||||
|
|
@ -94,6 +93,7 @@ import com.cloud.agent.api.Answer;
|
|||
import com.cloud.agent.api.Command;
|
||||
import com.cloud.agent.api.DeleteSnapshotsDirCommand;
|
||||
import com.cloud.alert.AlertManager;
|
||||
import com.cloud.api.ApiDBUtils;
|
||||
import com.cloud.api.commands.ListRecurringSnapshotScheduleCmd;
|
||||
import com.cloud.api.query.MutualExclusiveIdsManagerBase;
|
||||
import com.cloud.configuration.Config;
|
||||
|
|
@ -2049,17 +2049,17 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement
|
|||
|
||||
try (CheckedReservation volumeSnapshotReservation = new CheckedReservation(owner, ResourceType.snapshot, null, null, 1L, reservationDao, _resourceLimitMgr);
|
||||
CheckedReservation storageReservation = new CheckedReservation(owner, storeResourceType, null, null, volume.getSize(), reservationDao, _resourceLimitMgr)) {
|
||||
SnapshotVO snapshotVO = new SnapshotVO(volume.getDataCenterId(), volume.getAccountId(), volume.getDomainId(), volume.getId(), volume.getDiskOfferingId(), snapshotName,
|
||||
(short)snapshotType.ordinal(), snapshotType.name(), volume.getSize(), volume.getMinIops(), volume.getMaxIops(), hypervisorType, locationType);
|
||||
SnapshotVO snapshotVO = new SnapshotVO(volume.getDataCenterId(), volume.getAccountId(), volume.getDomainId(), volume.getId(), volume.getDiskOfferingId(), snapshotName,
|
||||
(short)snapshotType.ordinal(), snapshotType.name(), volume.getSize(), volume.getMinIops(), volume.getMaxIops(), hypervisorType, locationType);
|
||||
|
||||
SnapshotVO snapshot = _snapshotDao.persist(snapshotVO);
|
||||
if (snapshot == null) {
|
||||
throw new CloudRuntimeException(String.format("Failed to create snapshot for volume: %s", volume));
|
||||
}
|
||||
CallContext.current().putContextParameter(Snapshot.class, snapshot.getUuid());
|
||||
_resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.snapshot);
|
||||
_resourceLimitMgr.incrementResourceCount(volume.getAccountId(), storeResourceType, volume.getSize());
|
||||
return snapshot;
|
||||
SnapshotVO snapshot = _snapshotDao.persist(snapshotVO);
|
||||
if (snapshot == null) {
|
||||
throw new CloudRuntimeException(String.format("Failed to create snapshot for volume: %s", volume));
|
||||
}
|
||||
CallContext.current().putContextParameter(Snapshot.class, snapshot.getUuid());
|
||||
_resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.snapshot);
|
||||
_resourceLimitMgr.incrementResourceCount(volume.getAccountId(), storeResourceType, volume.getSize());
|
||||
return snapshot;
|
||||
} catch (ResourceAllocationException e) {
|
||||
if (snapshotType != Type.MANUAL) {
|
||||
String msg = String.format("Snapshot resource limit exceeded for account id : %s. Failed to create recurring snapshots", owner.getId());
|
||||
|
|
|
|||
|
|
@ -375,10 +375,10 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
|
|||
if (template != null) {
|
||||
CallContext.current().putContextParameter(VirtualMachineTemplate.class, template.getUuid());
|
||||
return template;
|
||||
} else {
|
||||
throw new CloudRuntimeException("Failed to create ISO");
|
||||
}
|
||||
}
|
||||
|
||||
throw new CloudRuntimeException("Failed to create ISO");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -407,7 +407,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
|
|||
TemplateProfile profile = adapter.prepare(cmd);
|
||||
VMTemplateVO template = adapter.create(profile);
|
||||
|
||||
// Secondary storage resource usage will be recalculated in com.cloud.template.HypervisorTemplateAdapter.createTemplateAsyncCallBack
|
||||
// Secondary storage resource usage will be incremented in com.cloud.template.HypervisorTemplateAdapter.createTemplateAsyncCallBack
|
||||
// for HypervisorTemplateAdapter
|
||||
_resourceLimitMgr.incrementResourceCount(profile.getAccountId(), ResourceType.template);
|
||||
if (secondaryStorageUsage > 0) {
|
||||
|
|
@ -420,10 +420,9 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
|
|||
vnfTemplateManager.persistVnfTemplate(template.getId(), (RegisterVnfTemplateCmd) cmd);
|
||||
}
|
||||
return template;
|
||||
} else {
|
||||
throw new CloudRuntimeException("Failed to create a Template");
|
||||
}
|
||||
}
|
||||
throw new CloudRuntimeException("Failed to create a Template");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1034,12 +1033,14 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
|
|||
logger.debug("There is Template {} in secondary storage {} in zone {} , don't need to copy", template, dstSecStore, dataCenterVOs.get(destZoneId));
|
||||
continue;
|
||||
}
|
||||
try (CheckedReservation secondaryStorageReservation = new CheckedReservation(templateOwner, ResourceType.secondary_storage, null, null, template.getSize(), reservationDao, _resourceLimitMgr)) {
|
||||
if (!copy(userId, template, srcSecStore, dataCenterVOs.get(destZoneId))) {
|
||||
failedZones.add(dataCenterVOs.get(destZoneId).getName());
|
||||
continue;
|
||||
if (template.getSize() != null) {
|
||||
try (CheckedReservation secondaryStorageReservation = new CheckedReservation(templateOwner, ResourceType.secondary_storage, null, null, template.getSize(), reservationDao, _resourceLimitMgr)) {
|
||||
if (!copy(userId, template, srcSecStore, dataCenterVOs.get(destZoneId))) {
|
||||
failedZones.add(dataCenterVOs.get(destZoneId).getName());
|
||||
continue;
|
||||
}
|
||||
_resourceLimitMgr.incrementResourceCount(templateOwner.getId(), ResourceType.secondary_storage, template.getSize());
|
||||
}
|
||||
_resourceLimitMgr.incrementResourceCount(templateOwner.getId(), ResourceType.secondary_storage, template.getSize());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1312,46 +1312,45 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
|
||||
@Override
|
||||
public void validateCustomParameters(ServiceOfferingVO serviceOffering, Map<String, String> customParameters) {
|
||||
//TODO need to validate custom cpu, and memory against min/max CPU/Memory ranges from service_offering_details table
|
||||
if (customParameters.size() != 0) {
|
||||
Map<String, String> offeringDetails = serviceOfferingDetailsDao.listDetailsKeyPairs(serviceOffering.getId());
|
||||
if (serviceOffering.getCpu() == null) {
|
||||
int minCPU = NumbersUtil.parseInt(offeringDetails.get(ApiConstants.MIN_CPU_NUMBER), 1);
|
||||
int maxCPU = NumbersUtil.parseInt(offeringDetails.get(ApiConstants.MAX_CPU_NUMBER), Integer.MAX_VALUE);
|
||||
int cpuNumber = NumbersUtil.parseInt(customParameters.get(UsageEventVO.DynamicParameters.cpuNumber.name()), -1);
|
||||
Integer maxCPUCores = ConfigurationManagerImpl.VM_SERVICE_OFFERING_MAX_CPU_CORES.value() == 0 ? Integer.MAX_VALUE: ConfigurationManagerImpl.VM_SERVICE_OFFERING_MAX_CPU_CORES.value();
|
||||
if (cpuNumber < minCPU || cpuNumber > maxCPU || cpuNumber > maxCPUCores) {
|
||||
throw new InvalidParameterValueException(String.format("Invalid CPU cores value, specify a value between %d and %d", minCPU, Math.min(maxCPUCores, maxCPU)));
|
||||
}
|
||||
} else if (customParameters.containsKey(UsageEventVO.DynamicParameters.cpuNumber.name())) {
|
||||
throw new InvalidParameterValueException("The CPU cores of this offering id:" + serviceOffering.getUuid()
|
||||
+ " is not customizable. This is predefined in the Template.");
|
||||
}
|
||||
|
||||
if (serviceOffering.getSpeed() == null) {
|
||||
String cpuSpeed = customParameters.get(UsageEventVO.DynamicParameters.cpuSpeed.name());
|
||||
if ((cpuSpeed == null) || (NumbersUtil.parseInt(cpuSpeed, -1) <= 0)) {
|
||||
throw new InvalidParameterValueException("Invalid CPU speed value, specify a value between 1 and " + Integer.MAX_VALUE);
|
||||
}
|
||||
} else if (!serviceOffering.isCustomCpuSpeedSupported() && customParameters.containsKey(UsageEventVO.DynamicParameters.cpuSpeed.name())) {
|
||||
throw new InvalidParameterValueException("The CPU speed of this offering id:" + serviceOffering.getUuid()
|
||||
+ " is not customizable. This is predefined in the Template.");
|
||||
}
|
||||
|
||||
if (serviceOffering.getRamSize() == null) {
|
||||
int minMemory = NumbersUtil.parseInt(offeringDetails.get(ApiConstants.MIN_MEMORY), 32);
|
||||
int maxMemory = NumbersUtil.parseInt(offeringDetails.get(ApiConstants.MAX_MEMORY), Integer.MAX_VALUE);
|
||||
int memory = NumbersUtil.parseInt(customParameters.get(UsageEventVO.DynamicParameters.memory.name()), -1);
|
||||
Integer maxRAMSize = ConfigurationManagerImpl.VM_SERVICE_OFFERING_MAX_RAM_SIZE.value() == 0 ? Integer.MAX_VALUE: ConfigurationManagerImpl.VM_SERVICE_OFFERING_MAX_RAM_SIZE.value();
|
||||
if (memory < minMemory || memory > maxMemory || memory > maxRAMSize) {
|
||||
throw new InvalidParameterValueException(String.format("Invalid memory value, specify a value between %d and %d", minMemory, Math.min(maxRAMSize, maxMemory)));
|
||||
}
|
||||
} else if (customParameters.containsKey(UsageEventVO.DynamicParameters.memory.name())) {
|
||||
throw new InvalidParameterValueException("The memory of this offering id:" + serviceOffering.getUuid() + " is not customizable. This is predefined in the Template.");
|
||||
}
|
||||
} else {
|
||||
if (MapUtils.isEmpty(customParameters) && serviceOffering.isDynamic()) {
|
||||
throw new InvalidParameterValueException("Need to specify custom parameter values cpu, cpu speed and memory when using custom offering");
|
||||
}
|
||||
Map<String, String> offeringDetails = serviceOfferingDetailsDao.listDetailsKeyPairs(serviceOffering.getId());
|
||||
if (serviceOffering.getCpu() == null) {
|
||||
int minCPU = NumbersUtil.parseInt(offeringDetails.get(ApiConstants.MIN_CPU_NUMBER), 1);
|
||||
int maxCPU = NumbersUtil.parseInt(offeringDetails.get(ApiConstants.MAX_CPU_NUMBER), Integer.MAX_VALUE);
|
||||
int cpuNumber = NumbersUtil.parseInt(customParameters.get(UsageEventVO.DynamicParameters.cpuNumber.name()), -1);
|
||||
int maxCPUCores = ConfigurationManagerImpl.VM_SERVICE_OFFERING_MAX_CPU_CORES.value() == 0 ? Integer.MAX_VALUE: ConfigurationManagerImpl.VM_SERVICE_OFFERING_MAX_CPU_CORES.value();
|
||||
if (cpuNumber < minCPU || cpuNumber > maxCPU || cpuNumber > maxCPUCores) {
|
||||
throw new InvalidParameterValueException(String.format("Invalid CPU cores value, specify a value between %d and %d", minCPU, Math.min(maxCPUCores, maxCPU)));
|
||||
}
|
||||
} else if (customParameters.containsKey(UsageEventVO.DynamicParameters.cpuNumber.name())) {
|
||||
throw new InvalidParameterValueException("The CPU cores of this offering id:" + serviceOffering.getUuid()
|
||||
+ " is not customizable. This is predefined in the Template.");
|
||||
}
|
||||
|
||||
if (serviceOffering.getSpeed() == null) {
|
||||
String cpuSpeed = customParameters.get(UsageEventVO.DynamicParameters.cpuSpeed.name());
|
||||
if ((cpuSpeed == null) || (NumbersUtil.parseInt(cpuSpeed, -1) <= 0)) {
|
||||
throw new InvalidParameterValueException("Invalid CPU speed value, specify a value between 1 and " + Integer.MAX_VALUE);
|
||||
}
|
||||
} else if (!serviceOffering.isCustomCpuSpeedSupported() && customParameters.containsKey(UsageEventVO.DynamicParameters.cpuSpeed.name())) {
|
||||
throw new InvalidParameterValueException(String.format("The CPU speed of this offering id:%s"
|
||||
+ " is not customizable. This is predefined as %d MHz.",
|
||||
serviceOffering.getUuid(), serviceOffering.getSpeed()));
|
||||
}
|
||||
|
||||
if (serviceOffering.getRamSize() == null) {
|
||||
int minMemory = NumbersUtil.parseInt(offeringDetails.get(ApiConstants.MIN_MEMORY), 32);
|
||||
int maxMemory = NumbersUtil.parseInt(offeringDetails.get(ApiConstants.MAX_MEMORY), Integer.MAX_VALUE);
|
||||
int memory = NumbersUtil.parseInt(customParameters.get(UsageEventVO.DynamicParameters.memory.name()), -1);
|
||||
int maxRAMSize = ConfigurationManagerImpl.VM_SERVICE_OFFERING_MAX_RAM_SIZE.value() == 0 ? Integer.MAX_VALUE: ConfigurationManagerImpl.VM_SERVICE_OFFERING_MAX_RAM_SIZE.value();
|
||||
if (memory < minMemory || memory > maxMemory || memory > maxRAMSize) {
|
||||
throw new InvalidParameterValueException(String.format("Invalid memory value, specify a value between %d and %d", minMemory, Math.min(maxRAMSize, maxMemory)));
|
||||
}
|
||||
} else if (customParameters.containsKey(UsageEventVO.DynamicParameters.memory.name())) {
|
||||
throw new InvalidParameterValueException("The memory of this offering id:" + serviceOffering.getUuid() + " is not customizable. This is predefined in the Template.");
|
||||
}
|
||||
}
|
||||
|
||||
private UserVm upgradeStoppedVirtualMachine(Long vmId, Long svcOffId, Map<String, String> customParameters) throws ResourceAllocationException {
|
||||
|
|
@ -1383,30 +1382,30 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
|
||||
List<Reserver> reservations = new ArrayList<>();
|
||||
try {
|
||||
if (!VirtualMachineManager.ResourceCountRunningVMsonly.value()) {
|
||||
_resourceLimitMgr.checkVmResourceLimitsForServiceOfferingChange(owner, vmInstance.isDisplay(), (long) currentCpu, (long) newCpu,
|
||||
(long) currentMemory, (long) newMemory, currentServiceOffering, newServiceOffering, template, reservations);
|
||||
}
|
||||
if (!VirtualMachineManager.ResourceCountRunningVMsonly.value()) {
|
||||
_resourceLimitMgr.checkVmResourceLimitsForServiceOfferingChange(owner, vmInstance.isDisplay(), (long) currentCpu, (long) newCpu,
|
||||
(long) currentMemory, (long) newMemory, currentServiceOffering, newServiceOffering, template, reservations);
|
||||
}
|
||||
|
||||
// Check that the specified service offering ID is valid
|
||||
_itMgr.checkIfCanUpgrade(vmInstance, newServiceOffering);
|
||||
// Check that the specified service offering ID is valid
|
||||
_itMgr.checkIfCanUpgrade(vmInstance, newServiceOffering);
|
||||
|
||||
// Check if the new service offering can be applied to vm instance
|
||||
_accountMgr.checkAccess(owner, newServiceOffering, _dcDao.findById(vmInstance.getDataCenterId()));
|
||||
// Check if the new service offering can be applied to vm instance
|
||||
_accountMgr.checkAccess(owner, newServiceOffering, _dcDao.findById(vmInstance.getDataCenterId()));
|
||||
|
||||
// resize and migrate the root volume if required
|
||||
DiskOfferingVO newDiskOffering = _diskOfferingDao.findById(newServiceOffering.getDiskOfferingId());
|
||||
changeDiskOfferingForRootVolume(vmId, newDiskOffering, customParameters, vmInstance.getDataCenterId());
|
||||
// resize and migrate the root volume if required
|
||||
DiskOfferingVO newDiskOffering = _diskOfferingDao.findById(newServiceOffering.getDiskOfferingId());
|
||||
changeDiskOfferingForRootVolume(vmId, newDiskOffering, customParameters, vmInstance.getDataCenterId());
|
||||
|
||||
_itMgr.upgradeVmDb(vmId, newServiceOffering, currentServiceOffering);
|
||||
_itMgr.upgradeVmDb(vmId, newServiceOffering, currentServiceOffering);
|
||||
|
||||
// Increment or decrement CPU and Memory count accordingly.
|
||||
if (!VirtualMachineManager.ResourceCountRunningVMsonly.value()) {
|
||||
_resourceLimitMgr.updateVmResourceCountForServiceOfferingChange(owner.getAccountId(), vmInstance.isDisplay(), (long) currentCpu, (long) newCpu,
|
||||
(long) currentMemory, (long) newMemory, currentServiceOffering, newServiceOffering, template);
|
||||
}
|
||||
// Increment or decrement CPU and Memory count accordingly.
|
||||
if (!VirtualMachineManager.ResourceCountRunningVMsonly.value()) {
|
||||
_resourceLimitMgr.updateVmResourceCountForServiceOfferingChange(owner.getAccountId(), vmInstance.isDisplay(), (long) currentCpu, (long) newCpu,
|
||||
(long) currentMemory, (long) newMemory, currentServiceOffering, newServiceOffering, template);
|
||||
}
|
||||
|
||||
return _vmDao.findById(vmInstance.getId());
|
||||
return _vmDao.findById(vmInstance.getId());
|
||||
|
||||
} finally {
|
||||
ReservationHelper.closeAll(reservations);
|
||||
|
|
@ -2419,34 +2418,34 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
|
||||
List<Reserver> reservations = new ArrayList<>();
|
||||
try {
|
||||
// First check that the maximum number of UserVMs, CPU and Memory limit for the given
|
||||
// accountId will not be exceeded
|
||||
if (!VirtualMachineManager.ResourceCountRunningVMsonly.value()) {
|
||||
resourceLimitService.checkVmResourceLimit(account, vm.isDisplayVm(), serviceOffering, template, reservations);
|
||||
}
|
||||
// First check that the maximum number of UserVMs, CPU and Memory limit for the given
|
||||
// accountId will not be exceeded
|
||||
if (!VirtualMachineManager.ResourceCountRunningVMsonly.value()) {
|
||||
resourceLimitService.checkVmResourceLimit(account, vm.isDisplayVm(), serviceOffering, template, reservations);
|
||||
}
|
||||
|
||||
_haMgr.cancelDestroy(vm, vm.getHostId());
|
||||
_haMgr.cancelDestroy(vm, vm.getHostId());
|
||||
|
||||
try {
|
||||
if (!_itMgr.stateTransitTo(vm, VirtualMachine.Event.RecoveryRequested, null)) {
|
||||
logger.debug("Unable to recover the vm {} because it is not in the correct state. current state: {}", vm, vm.getState());
|
||||
try {
|
||||
if (!_itMgr.stateTransitTo(vm, VirtualMachine.Event.RecoveryRequested, null)) {
|
||||
logger.debug("Unable to recover the vm {} because it is not in the correct state. current state: {}", vm, vm.getState());
|
||||
throw new InvalidParameterValueException(String.format("Unable to recover the vm %s because it is not in the correct state. current state: %s", vm, vm.getState()));
|
||||
}
|
||||
} catch (NoTransitionException e) {
|
||||
throw new InvalidParameterValueException(String.format("Unable to recover the vm %s because it is not in the correct state. current state: %s", vm, vm.getState()));
|
||||
}
|
||||
} catch (NoTransitionException e) {
|
||||
throw new InvalidParameterValueException(String.format("Unable to recover the vm %s because it is not in the correct state. current state: %s", vm, vm.getState()));
|
||||
}
|
||||
|
||||
// Recover the VM's disks
|
||||
List<VolumeVO> volumes = _volsDao.findByInstance(vmId);
|
||||
for (VolumeVO volume : volumes) {
|
||||
if (volume.getVolumeType().equals(Volume.Type.ROOT)) {
|
||||
recoverRootVolume(volume, vmId);
|
||||
break;
|
||||
// Recover the VM's disks
|
||||
List<VolumeVO> volumes = _volsDao.findByInstance(vmId);
|
||||
for (VolumeVO volume : volumes) {
|
||||
if (volume.getVolumeType().equals(Volume.Type.ROOT)) {
|
||||
recoverRootVolume(volume, vmId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//Update Resource Count for the given account
|
||||
resourceCountIncrement(account.getId(), vm.isDisplayVm(), serviceOffering, template);
|
||||
//Update Resource Count for the given account
|
||||
resourceCountIncrement(account.getId(), vm.isDisplayVm(), serviceOffering, template);
|
||||
|
||||
} finally {
|
||||
ReservationHelper.closeAll(reservations);
|
||||
|
|
@ -2874,10 +2873,16 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
Map<String, String> customParameters = new HashMap<>();
|
||||
customParameters.put(VmDetailConstants.CPU_NUMBER, String.valueOf(newCpu));
|
||||
customParameters.put(VmDetailConstants.MEMORY, String.valueOf(newMemory));
|
||||
if (svcOffering.isCustomCpuSpeedSupported()) {
|
||||
if (details.containsKey(VmDetailConstants.CPU_SPEED)) {
|
||||
customParameters.put(VmDetailConstants.CPU_SPEED, details.get(VmDetailConstants.CPU_SPEED));
|
||||
}
|
||||
validateCustomParameters(svcOffering, customParameters);
|
||||
} else {
|
||||
if (details.containsKey(VmDetailConstants.CPU_NUMBER) || details.containsKey(VmDetailConstants.MEMORY) ||
|
||||
details.containsKey(VmDetailConstants.CPU_SPEED)) {
|
||||
throw new InvalidParameterValueException("CPU number, Memory and CPU speed cannot be updated for a " +
|
||||
"non-dynamic offering");
|
||||
}
|
||||
}
|
||||
if (VirtualMachineManager.ResourceCountRunningVMsonly.value()) {
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -758,11 +758,17 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme
|
|||
"Instance Snapshot reverting failed because the Instance is not in Running or Stopped state.");
|
||||
}
|
||||
|
||||
if (userVm.getState() == VirtualMachine.State.Running && vmSnapshotVo.getType() == VMSnapshot.Type.Disk || userVm.getState() == VirtualMachine.State.Stopped
|
||||
&& vmSnapshotVo.getType() == VMSnapshot.Type.DiskAndMemory) {
|
||||
if (userVm.getState() == VirtualMachine.State.Running && vmSnapshotVo.getType() == VMSnapshot.Type.Disk) {
|
||||
throw new InvalidParameterValueException(
|
||||
"Reverting to the Instance Snapshot is not allowed for running Instances as this would result in a Instance state change. For running Instances only Snapshots with memory can be reverted. In order to revert to a Snapshot without memory you need to first stop the Instance."
|
||||
+ " Snapshot");
|
||||
"Reverting to the Instance Snapshot is not allowed for running Instances as this would result in an Instance state change. " +
|
||||
"For running Instances only Snapshots with memory can be reverted. " +
|
||||
"In order to revert to a Snapshot without memory you need to first stop the Instance.");
|
||||
}
|
||||
|
||||
if (userVm.getState() == VirtualMachine.State.Stopped && vmSnapshotVo.getType() == VMSnapshot.Type.DiskAndMemory) {
|
||||
throw new InvalidParameterValueException(
|
||||
"Reverting to the Instance Snapshot is not allowed for stopped Instances when the Snapshot contains memory as this would result in an Instance state change. " +
|
||||
"In order to revert to a Snapshot with memory you need to first start the Instance.");
|
||||
}
|
||||
|
||||
// if snapshot is not created, error out
|
||||
|
|
@ -815,20 +821,36 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme
|
|||
}
|
||||
|
||||
/**
|
||||
* If snapshot was taken with a different service offering than actual used in vm, should change it back to it.
|
||||
* We also call <code>changeUserVmServiceOffering</code> in case the service offering is dynamic in order to
|
||||
* perform resource limit validation, as the amount of CPUs or memory may have been changed.
|
||||
* @param vmSnapshotVo vm snapshot
|
||||
* Check if service offering change is needed for user vm when reverting to vm snapshot.
|
||||
* Service offering change is needed when snapshot was taken with a different service offering than actual used in vm.
|
||||
* Service offering change is also needed when service offering is dynamic and the amount of cpu, memory or cpu speed
|
||||
* has been changed since snapshot was taken.
|
||||
* @param userVm
|
||||
* @param vmSnapshotVo
|
||||
* @return true if service offering change is needed; false otherwise
|
||||
*/
|
||||
protected void updateUserVmServiceOffering(UserVm userVm, VMSnapshotVO vmSnapshotVo) {
|
||||
protected boolean userVmServiceOfferingNeedsChange(UserVm userVm, VMSnapshotVO vmSnapshotVo) {
|
||||
if (vmSnapshotVo.getServiceOfferingId() != userVm.getServiceOfferingId()) {
|
||||
changeUserVmServiceOffering(userVm, vmSnapshotVo);
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
ServiceOfferingVO serviceOffering = _serviceOfferingDao.findById(userVm.getServiceOfferingId());
|
||||
if (serviceOffering.isDynamic()) {
|
||||
changeUserVmServiceOffering(userVm, vmSnapshotVo);
|
||||
|
||||
ServiceOfferingVO currentServiceOffering = _serviceOfferingDao.findByIdIncludingRemoved(userVm.getId(), userVm.getServiceOfferingId());
|
||||
if (currentServiceOffering.isDynamic()) {
|
||||
Map<String, String> vmDetails = getVmMapDetails(vmSnapshotVo);
|
||||
ServiceOfferingVO newServiceOffering = _serviceOfferingDao.getComputeOffering(currentServiceOffering, vmDetails);
|
||||
|
||||
int newCpu = newServiceOffering.getCpu();
|
||||
int newMemory = newServiceOffering.getRamSize();
|
||||
int newSpeed = newServiceOffering.getSpeed();
|
||||
int currentCpu = currentServiceOffering.getCpu();
|
||||
int currentMemory = currentServiceOffering.getRamSize();
|
||||
int currentSpeed = currentServiceOffering.getSpeed();
|
||||
|
||||
if (newCpu != currentCpu || newMemory != currentMemory || newSpeed != currentSpeed) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -948,8 +970,10 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme
|
|||
Transaction.execute(new TransactionCallbackWithExceptionNoReturn<CloudRuntimeException>() {
|
||||
@Override
|
||||
public void doInTransactionWithoutResult(TransactionStatus status) throws CloudRuntimeException {
|
||||
if (userVmServiceOfferingNeedsChange(userVm, vmSnapshotVo)) {
|
||||
changeUserVmServiceOffering(userVm, vmSnapshotVo);
|
||||
}
|
||||
revertCustomServiceOfferingDetailsFromVmSnapshot(userVm, vmSnapshotVo);
|
||||
updateUserVmServiceOffering(userVm, vmSnapshotVo);
|
||||
}
|
||||
});
|
||||
return userVm;
|
||||
|
|
|
|||
|
|
@ -991,18 +991,12 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager {
|
|||
resourceLimitMgr.incrementResourceCount(vm.getAccountId(), Resource.ResourceType.backup);
|
||||
resourceLimitMgr.incrementResourceCount(vm.getAccountId(), Resource.ResourceType.backup_storage, backup.getSize());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
if (e instanceof ResourceAllocationException) {
|
||||
ResourceAllocationException rae = (ResourceAllocationException)e;
|
||||
if (isScheduledBackup && (Resource.ResourceType.backup.equals(rae.getResourceType()) ||
|
||||
Resource.ResourceType.backup_storage.equals(rae.getResourceType()))) {
|
||||
sendExceededBackupLimitAlert(owner.getUuid(), rae.getResourceType());
|
||||
}
|
||||
throw rae;
|
||||
} else if (e instanceof CloudRuntimeException) {
|
||||
throw (CloudRuntimeException)e;
|
||||
} catch (ResourceAllocationException e) {
|
||||
if (isScheduledBackup && (Resource.ResourceType.backup.equals(e.getResourceType()) ||
|
||||
Resource.ResourceType.backup_storage.equals(e.getResourceType()))) {
|
||||
sendExceededBackupLimitAlert(owner.getUuid(), e.getResourceType());
|
||||
}
|
||||
throw new CloudRuntimeException("Failed to create backup for VM with ID: " + vm.getUuid(), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1055,7 +1049,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager {
|
|||
* @param vmId The ID of the VM associated with the backups
|
||||
* @param backupScheduleId Backup schedule ID of the backups
|
||||
*/
|
||||
protected void deleteOldestBackupFromScheduleIfRequired(Long vmId, long backupScheduleId) {
|
||||
protected void deleteOldestBackupFromScheduleIfRequired(Long vmId, long backupScheduleId) throws ResourceAllocationException {
|
||||
BackupScheduleVO backupScheduleVO = backupScheduleDao.findById(backupScheduleId);
|
||||
if (backupScheduleVO == null || backupScheduleVO.getMaxBackups() == 0) {
|
||||
logger.info("The schedule does not have a retention specified and, hence, not deleting any backups from it.", vmId);
|
||||
|
|
@ -1079,7 +1073,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager {
|
|||
* @param amountOfBackupsToDelete Number of backups to be deleted from the list of backups
|
||||
* @param backupScheduleId ID of the backup schedule associated with the backups
|
||||
*/
|
||||
protected void deleteExcessBackups(List<BackupVO> backups, int amountOfBackupsToDelete, long backupScheduleId) {
|
||||
protected void deleteExcessBackups(List<BackupVO> backups, int amountOfBackupsToDelete, long backupScheduleId) throws ResourceAllocationException {
|
||||
logger.debug("Deleting the [{}] oldest backups from the schedule [ID: {}].", amountOfBackupsToDelete, backupScheduleId);
|
||||
|
||||
for (int i = 0; i < amountOfBackupsToDelete; i++) {
|
||||
|
|
@ -1677,7 +1671,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager {
|
|||
|
||||
@Override
|
||||
@ActionEvent(eventType = EventTypes.EVENT_VM_BACKUP_DELETE, eventDescription = "deleting VM backup", async = true)
|
||||
public boolean deleteBackup(final Long backupId, final Boolean forced) {
|
||||
public boolean deleteBackup(final Long backupId, final Boolean forced) throws ResourceAllocationException {
|
||||
final BackupVO backup = backupDao.findByIdIncludingRemoved(backupId);
|
||||
if (backup == null) {
|
||||
throw new CloudRuntimeException("Backup " + backupId + " does not exist");
|
||||
|
|
@ -1702,7 +1696,7 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager {
|
|||
return deleteCheckedBackup(forced, backupProvider, backup, vm);
|
||||
}
|
||||
|
||||
private boolean deleteCheckedBackup(Boolean forced, BackupProvider backupProvider, BackupVO backup, VMInstanceVO vm) {
|
||||
private boolean deleteCheckedBackup(Boolean forced, BackupProvider backupProvider, BackupVO backup, VMInstanceVO vm) throws ResourceAllocationException {
|
||||
Account owner = accountManager.getAccount(backup.getAccountId());
|
||||
long backupSize = backup.getSize() != null ? backup.getSize() : 0L;
|
||||
try (CheckedReservation backupReservation = new CheckedReservation(owner, Resource.ResourceType.backup,
|
||||
|
|
@ -1722,11 +1716,6 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager {
|
|||
}
|
||||
}
|
||||
throw new CloudRuntimeException("Failed to delete the backup");
|
||||
} catch (Exception e) {
|
||||
if (e instanceof CloudRuntimeException) {
|
||||
throw (CloudRuntimeException) e;
|
||||
}
|
||||
throw new CloudRuntimeException("Failed to delete the backup due to: " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -163,13 +163,6 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic
|
|||
(cmd.getQuota() * Resource.ResourceType.bytesToGiB));
|
||||
}
|
||||
return bucket;
|
||||
} catch (Exception e) {
|
||||
if (e instanceof ResourceAllocationException) {
|
||||
throw (ResourceAllocationException)e;
|
||||
} else if (e instanceof CloudRuntimeException) {
|
||||
throw (CloudRuntimeException)e;
|
||||
}
|
||||
throw new CloudRuntimeException(String.format("Failed to create bucket due to: %s", e.getMessage()), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -236,7 +229,7 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic
|
|||
|
||||
@Override
|
||||
@ActionEvent(eventType = EventTypes.EVENT_BUCKET_DELETE, eventDescription = "deleting bucket")
|
||||
public boolean deleteBucket(long bucketId, Account caller) {
|
||||
public boolean deleteBucket(long bucketId, Account caller) throws ResourceAllocationException {
|
||||
Bucket bucket = _bucketDao.findById(bucketId);
|
||||
if (bucket == null) {
|
||||
throw new InvalidParameterValueException("Unable to find bucket with ID: " + bucketId);
|
||||
|
|
@ -247,7 +240,7 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic
|
|||
return deleteCheckedBucket(objectStore, bucket, objectStoreVO);
|
||||
}
|
||||
|
||||
private boolean deleteCheckedBucket(ObjectStoreEntity objectStore, Bucket bucket, ObjectStoreVO objectStoreVO) {
|
||||
private boolean deleteCheckedBucket(ObjectStoreEntity objectStore, Bucket bucket, ObjectStoreVO objectStoreVO) throws ResourceAllocationException {
|
||||
Account owner = _accountMgr.getAccount(bucket.getAccountId());
|
||||
try (CheckedReservation bucketReservation = new CheckedReservation(owner, Resource.ResourceType.bucket,
|
||||
bucket.getId(), null, -1L, reservationDao, resourceLimitManager);
|
||||
|
|
@ -265,11 +258,6 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic
|
|||
return true;
|
||||
}
|
||||
return false;
|
||||
} catch (Exception e) {
|
||||
if (e instanceof CloudRuntimeException) {
|
||||
throw (CloudRuntimeException) e;
|
||||
}
|
||||
throw new CloudRuntimeException(String.format("Failed to delete bucket due to: %s", e.getMessage()), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -284,16 +272,6 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic
|
|||
_accountMgr.checkAccess(caller, null, true, bucket);
|
||||
ObjectStoreVO objectStoreVO = _objectStoreDao.findById(bucket.getObjectStoreId());
|
||||
ObjectStoreEntity objectStore = (ObjectStoreEntity)_dataStoreMgr.getDataStore(objectStoreVO.getId(), DataStoreRole.Object);
|
||||
Integer quota = cmd.getQuota();
|
||||
Integer quotaDelta = null;
|
||||
|
||||
if (quota != null) {
|
||||
quotaDelta = quota - bucket.getQuota();
|
||||
if (quotaDelta > 0) {
|
||||
Account owner = _accountMgr.getActiveAccountById(bucket.getAccountId());
|
||||
resourceLimitManager.checkResourceLimit(owner, Resource.ResourceType.object_storage, (quotaDelta * Resource.ResourceType.bytesToGiB));
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
if (cmd.getEncryption() != null) {
|
||||
|
|
@ -319,16 +297,8 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic
|
|||
bucket.setPolicy(cmd.getPolicy());
|
||||
}
|
||||
|
||||
if (cmd.getQuota() != null) {
|
||||
objectStore.setQuota(bucketTO, cmd.getQuota());
|
||||
bucket.setQuota(cmd.getQuota());
|
||||
if (quotaDelta > 0) {
|
||||
resourceLimitManager.incrementResourceCount(bucket.getAccountId(), Resource.ResourceType.object_storage, (quotaDelta * Resource.ResourceType.bytesToGiB));
|
||||
} else {
|
||||
resourceLimitManager.decrementResourceCount(bucket.getAccountId(), Resource.ResourceType.object_storage, ((-quotaDelta) * Resource.ResourceType.bytesToGiB));
|
||||
}
|
||||
_objectStoreDao.updateAllocatedSize(objectStoreVO, (quotaDelta * Resource.ResourceType.bytesToGiB));
|
||||
}
|
||||
updateBucketQuota(cmd, bucket, objectStore, objectStoreVO, bucketTO);
|
||||
|
||||
_bucketDao.update(bucket.getId(), bucket);
|
||||
} catch (Exception e) {
|
||||
throw new CloudRuntimeException("Error while updating bucket: " +bucket.getName() +". "+e.getMessage());
|
||||
|
|
@ -337,6 +307,31 @@ public class BucketApiServiceImpl extends ManagerBase implements BucketApiServic
|
|||
return true;
|
||||
}
|
||||
|
||||
private void updateBucketQuota(UpdateBucketCmd cmd, BucketVO bucket, ObjectStoreEntity objectStore, ObjectStoreVO objectStoreVO, BucketTO bucketTO) throws ResourceAllocationException {
|
||||
Integer quota = cmd.getQuota();
|
||||
if (quota == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
int quotaDelta = quota - bucket.getQuota();
|
||||
objectStore.setQuota(bucketTO, quota);
|
||||
bucket.setQuota(quota);
|
||||
|
||||
long diff = quotaDelta * Resource.ResourceType.bytesToGiB;
|
||||
|
||||
if (quotaDelta < 0) {
|
||||
resourceLimitManager.decrementResourceCount(bucket.getAccountId(), Resource.ResourceType.object_storage, Math.abs(diff));
|
||||
_objectStoreDao.updateAllocatedSize(objectStoreVO, diff);
|
||||
return;
|
||||
}
|
||||
|
||||
Account owner = _accountMgr.getActiveAccountById(bucket.getAccountId());
|
||||
try (CheckedReservation objectStorageReservation = new CheckedReservation(owner, Resource.ResourceType.object_storage, diff, reservationDao, resourceLimitManager)) {
|
||||
resourceLimitManager.incrementResourceCount(bucket.getAccountId(), Resource.ResourceType.object_storage, diff);
|
||||
_objectStoreDao.updateAllocatedSize(objectStoreVO, diff);
|
||||
}
|
||||
}
|
||||
|
||||
public void getBucketUsage() {
|
||||
//ToDo track usage one last time when object store or bucket is removed
|
||||
List<ObjectStoreVO> objectStores = _objectStoreDao.listObjectStores();
|
||||
|
|
|
|||
|
|
@ -212,20 +212,20 @@ public class VolumeImportUnmanageManagerImpl implements VolumeImportUnmanageServ
|
|||
|
||||
List<Reserver> reservations = new ArrayList<>();
|
||||
try {
|
||||
// 6. check resource limitation
|
||||
checkResourceLimitForImportVolume(owner, volume, diskOffering, reservations);
|
||||
// 6. check resource limitation
|
||||
checkResourceLimitForImportVolume(owner, volume, diskOffering, reservations);
|
||||
|
||||
// 7. create records
|
||||
String volumeName = StringUtils.isNotBlank(cmd.getName()) ? cmd.getName().trim() : volumePath;
|
||||
VolumeVO volumeVO = importVolumeInternal(volume, diskOffering, owner, pool, volumeName);
|
||||
// 7. create records
|
||||
String volumeName = StringUtils.isNotBlank(cmd.getName()) ? cmd.getName().trim() : volumePath;
|
||||
VolumeVO volumeVO = importVolumeInternal(volume, diskOffering, owner, pool, volumeName);
|
||||
|
||||
// 8. Update resource count
|
||||
updateResourceLimitForVolumeImport(volumeVO);
|
||||
// 8. Update resource count
|
||||
updateResourceLimitForVolumeImport(volumeVO);
|
||||
|
||||
// 9. Publish event
|
||||
publicUsageEventForVolumeImportAndUnmanage(volumeVO, true);
|
||||
// 9. Publish event
|
||||
publicUsageEventForVolumeImportAndUnmanage(volumeVO, true);
|
||||
|
||||
return responseGenerator.createVolumeResponse(ResponseObject.ResponseView.Full, volumeVO);
|
||||
return responseGenerator.createVolumeResponse(ResponseObject.ResponseView.Full, volumeVO);
|
||||
|
||||
} finally {
|
||||
ReservationHelper.closeAll(reservations);
|
||||
|
|
|
|||
|
|
@ -212,6 +212,8 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
private static final List<Storage.StoragePoolType> forceConvertToPoolAllowedTypes =
|
||||
Arrays.asList(Storage.StoragePoolType.NetworkFilesystem, Storage.StoragePoolType.Filesystem,
|
||||
Storage.StoragePoolType.SharedMountPoint);
|
||||
private static final String DETAIL_VDDK_TRANSPORTS = "vddk.transports";
|
||||
private static final String DETAIL_VDDK_THUMBPRINT = "vddk.thumbprint";
|
||||
|
||||
ConfigKey<Boolean> ConvertVmwareInstanceToKvmExtraParamsAllowed = new ConfigKey<>(Boolean.class,
|
||||
"convert.vmware.instance.to.kvm.extra.params.allowed",
|
||||
|
|
@ -1498,7 +1500,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
String hostName, Account caller, Account owner, long userId,
|
||||
ServiceOfferingVO serviceOffering, Map<String, Long> dataDiskOfferingMap,
|
||||
Map<String, Long> nicNetworkMap, Map<String, Network.IpAddresses> nicIpAddressMap,
|
||||
Map<String, String> details, Boolean migrateAllowed, List<String> managedVms, boolean forced) {
|
||||
Map<String, String> details, Boolean migrateAllowed, List<String> managedVms, boolean forced) throws ResourceAllocationException {
|
||||
UserVm userVm = null;
|
||||
for (HostVO host : hosts) {
|
||||
HashMap<String, UnmanagedInstanceTO> unmanagedInstances = getUnmanagedInstancesForHost(host, instanceName, managedVms);
|
||||
|
|
@ -1542,11 +1544,18 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
|
||||
template.setGuestOSId(guestOSHypervisor.getGuestOsId());
|
||||
}
|
||||
userVm = importVirtualMachineInternal(unmanagedInstance, instanceName, zone, cluster, host,
|
||||
template, displayName, hostName, CallContext.current().getCallingAccount(), owner, userId,
|
||||
serviceOffering, dataDiskOfferingMap,
|
||||
nicNetworkMap, nicIpAddressMap, null,
|
||||
details, migrateAllowed, forced, true);
|
||||
|
||||
List<Reserver> reservations = new ArrayList<>();
|
||||
try {
|
||||
checkVmResourceLimitsForUnmanagedInstanceImport(owner, unmanagedInstance, serviceOffering, template, reservations);
|
||||
userVm = importVirtualMachineInternal(unmanagedInstance, instanceName, zone, cluster, host,
|
||||
template, displayName, hostName, CallContext.current().getCallingAccount(), owner, userId,
|
||||
serviceOffering, dataDiskOfferingMap,
|
||||
nicNetworkMap, nicIpAddressMap, null,
|
||||
details, migrateAllowed, forced, true);
|
||||
} finally {
|
||||
ReservationHelper.closeAll(reservations);
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (userVm != null) {
|
||||
|
|
@ -1556,6 +1565,36 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
return userVm;
|
||||
}
|
||||
|
||||
protected void checkVmResourceLimitsForUnmanagedInstanceImport(Account owner, UnmanagedInstanceTO unmanagedInstance, ServiceOfferingVO serviceOffering, VMTemplateVO template, List<Reserver> reservations) throws ResourceAllocationException {
|
||||
// When importing an unmanaged instance, the amount of CPUs and memory is obtained from the hypervisor unless powered off
|
||||
// and not using a dynamic offering, unlike the external VM import that always obtains it from the compute offering
|
||||
Integer cpu = serviceOffering.getCpu();
|
||||
Integer memory = serviceOffering.getRamSize();
|
||||
|
||||
if (serviceOffering.isDynamic() || !UnmanagedInstanceTO.PowerState.PowerOff.equals(unmanagedInstance.getPowerState())) {
|
||||
cpu = unmanagedInstance.getCpuCores();
|
||||
memory = unmanagedInstance.getMemory();
|
||||
}
|
||||
|
||||
if (cpu == null || cpu == 0) {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("CPU cores [%s] is not valid for importing VM [%s].", cpu, unmanagedInstance.getName()));
|
||||
}
|
||||
if (memory == null || memory == 0) {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Memory [%s] is not valid for importing VM [%s].", memory, unmanagedInstance.getName()));
|
||||
}
|
||||
|
||||
List<String> resourceLimitHostTags = resourceLimitService.getResourceLimitHostTags(serviceOffering, template);
|
||||
|
||||
CheckedReservation vmReservation = new CheckedReservation(owner, Resource.ResourceType.user_vm, resourceLimitHostTags, 1L, reservationDao, resourceLimitService);
|
||||
reservations.add(vmReservation);
|
||||
|
||||
CheckedReservation cpuReservation = new CheckedReservation(owner, Resource.ResourceType.cpu, resourceLimitHostTags, cpu.longValue(), reservationDao, resourceLimitService);
|
||||
reservations.add(cpuReservation);
|
||||
|
||||
CheckedReservation memReservation = new CheckedReservation(owner, Resource.ResourceType.memory, resourceLimitHostTags, memory.longValue(), reservationDao, resourceLimitService);
|
||||
reservations.add(memReservation);
|
||||
}
|
||||
|
||||
private Pair<UnmanagedInstanceTO, Boolean> getSourceVmwareUnmanagedInstance(String vcenter, String datacenterName, String username,
|
||||
String password, String clusterName, String sourceHostName,
|
||||
String sourceVM, ServiceOfferingVO serviceOffering) {
|
||||
|
|
@ -1612,7 +1651,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
Account caller, Account owner, long userId,
|
||||
ServiceOfferingVO serviceOffering, Map<String, Long> dataDiskOfferingMap,
|
||||
Map<String, Long> nicNetworkMap, Map<String, Network.IpAddresses> nicIpAddressMap,
|
||||
Map<String, String> details, ImportVmCmd cmd, boolean forced) {
|
||||
Map<String, String> details, ImportVmCmd cmd, boolean forced) throws ResourceAllocationException {
|
||||
Long existingVcenterId = cmd.getExistingVcenterId();
|
||||
String vcenter = cmd.getVcenter();
|
||||
String datacenterName = cmd.getDatacenterName();
|
||||
|
|
@ -1626,6 +1665,8 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
String extraParams = cmd.getExtraParams();
|
||||
boolean forceConvertToPool = cmd.getForceConvertToPool();
|
||||
Long guestOsId = cmd.getGuestOsId();
|
||||
boolean forceMsToImportVmFiles = Boolean.TRUE.equals(cmd.getForceMsToImportVmFiles());
|
||||
boolean useVddk = cmd.getUseVddk();
|
||||
|
||||
if ((existingVcenterId == null && vcenter == null) || (existingVcenterId != null && vcenter != null)) {
|
||||
throw new ServerApiException(ApiErrorCode.PARAM_ERROR,
|
||||
|
|
@ -1635,8 +1676,14 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
throw new ServerApiException(ApiErrorCode.PARAM_ERROR,
|
||||
"Please set all the information for a vCenter IP/Name, datacenter, username and password");
|
||||
}
|
||||
if (forceMsToImportVmFiles && useVddk) {
|
||||
throw new ServerApiException(ApiErrorCode.PARAM_ERROR,
|
||||
String.format("Parameters %s and %s are mutually exclusive",
|
||||
ApiConstants.FORCE_MS_TO_IMPORT_VM_FILES, ApiConstants.USE_VDDK));
|
||||
}
|
||||
|
||||
checkConversionStoragePool(convertStoragePoolId, forceConvertToPool);
|
||||
validateSelectedConversionStoragePoolForVddk(useVddk, convertStoragePoolId, serviceOffering, dataDiskOfferingMap);
|
||||
|
||||
checkExtraParamsAllowed(extraParams);
|
||||
|
||||
|
|
@ -1658,10 +1705,18 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
DataStoreTO temporaryConvertLocation = null;
|
||||
String ovfTemplateOnConvertLocation = null;
|
||||
ImportVmTask importVMTask = null;
|
||||
List<Reserver> reservations = new ArrayList<>();
|
||||
try {
|
||||
HostVO convertHost = selectKVMHostForConversionInCluster(destinationCluster, convertInstanceHostId);
|
||||
HostVO importHost = selectKVMHostForImportingInCluster(destinationCluster, importInstanceHostId);
|
||||
CheckConvertInstanceAnswer conversionSupportAnswer = checkConversionSupportOnHost(convertHost, sourceVMName, false);
|
||||
HostVO convertHost = selectKVMHostForConversionInCluster(destinationCluster, convertInstanceHostId, useVddk);
|
||||
HostVO importHost = (useVddk && importInstanceHostId == null)
|
||||
? convertHost
|
||||
: selectKVMHostForImportingInCluster(destinationCluster, importInstanceHostId);
|
||||
|
||||
boolean isOvfExportSupported = false;
|
||||
CheckConvertInstanceAnswer conversionSupportAnswer = checkConversionSupportOnHost(convertHost, sourceVMName, false, useVddk, details);
|
||||
if (!useVddk) {
|
||||
isOvfExportSupported = conversionSupportAnswer.isOvfExportSupported();
|
||||
}
|
||||
logger.debug("The host {} is selected to execute the conversion of the " +
|
||||
"instance {} from VMware to KVM ", convertHost, sourceVMName);
|
||||
|
||||
|
|
@ -1680,14 +1735,18 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
Pair<UnmanagedInstanceTO, Boolean> sourceInstanceDetails = getSourceVmwareUnmanagedInstance(vcenter, datacenterName, username, password, clusterName, sourceHostName, sourceVMName, serviceOffering);
|
||||
sourceVMwareInstance = sourceInstanceDetails.first();
|
||||
isClonedInstance = sourceInstanceDetails.second();
|
||||
|
||||
// Ensure that the configured resource limits will not be exceeded before beginning the conversion process
|
||||
checkVmResourceLimitsForUnmanagedInstanceImport(owner, sourceVMwareInstance, serviceOffering, template, reservations);
|
||||
|
||||
boolean isWindowsVm = sourceVMwareInstance.getOperatingSystem().toLowerCase().contains("windows");
|
||||
if (isWindowsVm) {
|
||||
checkConversionSupportOnHost(convertHost, sourceVMName, true);
|
||||
checkConversionSupportOnHost(convertHost, sourceVMName, true, useVddk, details);
|
||||
}
|
||||
|
||||
checkNetworkingBeforeConvertingVmwareInstance(zone, owner, displayName, hostName, sourceVMwareInstance, nicNetworkMap, nicIpAddressMap, forced);
|
||||
UnmanagedInstanceTO convertedInstance;
|
||||
if (cmd.getForceMsToImportVmFiles() || !conversionSupportAnswer.isOvfExportSupported()) {
|
||||
if (!useVddk && (forceMsToImportVmFiles || !isOvfExportSupported)) {
|
||||
// Uses MS for OVF export to temporary conversion location
|
||||
int noOfThreads = UnmanagedVMsManager.ThreadsOnMSToImportVMwareVMFiles.value();
|
||||
importVmTasksManager.updateImportVMTaskStep(importVMTask, zone, owner, convertHost, importHost, null, ConvertingInstance);
|
||||
|
|
@ -1699,12 +1758,12 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
serviceOffering, dataDiskOfferingMap, temporaryConvertLocation,
|
||||
ovfTemplateOnConvertLocation, forceConvertToPool, extraParams);
|
||||
} else {
|
||||
// Uses KVM Host for OVF export to temporary conversion location, through ovftool
|
||||
// Uses KVM Host for direct conversion using VDDK, or for OVF export to temporary conversion location through ovftool
|
||||
importVmTasksManager.updateImportVMTaskStep(importVMTask, zone, owner, convertHost, importHost, null, ConvertingInstance);
|
||||
convertedInstance = convertVmwareInstanceToKVMAfterExportingOVFToConvertLocation(
|
||||
convertedInstance = convertVmwareInstanceToKVMUsingVDDKOrAfterExportingOVFToConvertLocation(
|
||||
sourceVMName, sourceVMwareInstance, convertHost, importHost,
|
||||
convertStoragePools, serviceOffering, dataDiskOfferingMap,
|
||||
temporaryConvertLocation, vcenter, username, password, datacenterName, forceConvertToPool, extraParams);
|
||||
temporaryConvertLocation, vcenter, username, password, datacenterName, forceConvertToPool, extraParams, useVddk, details);
|
||||
}
|
||||
|
||||
sanitizeConvertedInstance(convertedInstance, sourceVMwareInstance);
|
||||
|
|
@ -1732,6 +1791,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
if (temporaryConvertLocation != null && StringUtils.isNotBlank(ovfTemplateOnConvertLocation)) {
|
||||
removeTemplate(temporaryConvertLocation, ovfTemplateOnConvertLocation);
|
||||
}
|
||||
ReservationHelper.closeAll(reservations);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1759,6 +1819,45 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
}
|
||||
}
|
||||
|
||||
protected void validateSelectedConversionStoragePoolForVddk(boolean useVddk, Long convertStoragePoolId,
|
||||
ServiceOfferingVO serviceOffering, Map<String, Long> dataDiskOfferingMap) {
|
||||
if (!useVddk || convertStoragePoolId == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
StoragePoolVO selectedStoragePool = primaryDataStoreDao.findById(convertStoragePoolId);
|
||||
if (selectedStoragePool == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (serviceOffering.getDiskOfferingId() != null) {
|
||||
DiskOfferingVO rootDiskOffering = diskOfferingDao.findById(serviceOffering.getDiskOfferingId());
|
||||
if (rootDiskOffering == null) {
|
||||
throw new InvalidParameterValueException(String.format("Cannot find disk offering with ID %s that belongs to the service offering %s",
|
||||
serviceOffering.getDiskOfferingId(), serviceOffering.getName()));
|
||||
}
|
||||
if (!volumeApiService.doesStoragePoolSupportDiskOffering(selectedStoragePool, rootDiskOffering)) {
|
||||
throw new InvalidParameterValueException(String.format("The root disk offering '%s' is not supported by the selected conversion storage pool '%s'. " +
|
||||
"When using VDDK, all selected disk offerings must be compatible with the conversion storage pool, as it will become the primary storage for the imported volumes.",
|
||||
rootDiskOffering.getName(), selectedStoragePool.getName()));
|
||||
}
|
||||
}
|
||||
|
||||
if (MapUtils.isNotEmpty(dataDiskOfferingMap)) {
|
||||
for (Long diskOfferingId : dataDiskOfferingMap.values()) {
|
||||
DiskOfferingVO diskOffering = diskOfferingDao.findById(diskOfferingId);
|
||||
if (diskOffering == null) {
|
||||
throw new InvalidParameterValueException(String.format("Cannot find disk offering with ID %s", diskOfferingId));
|
||||
}
|
||||
if (!volumeApiService.doesStoragePoolSupportDiskOffering(selectedStoragePool, diskOffering)) {
|
||||
throw new InvalidParameterValueException(String.format("The data disk offering '%s' is not supported by the selected conversion storage pool '%s'. " +
|
||||
"When using VDDK, all selected disk offerings must be compatible with the conversion storage pool, as it will become the primary storage for the imported volumes.",
|
||||
diskOffering.getName(), selectedStoragePool.getName()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void checkNetworkingBeforeConvertingVmwareInstance(DataCenter zone, Account owner, String displayName,
|
||||
String hostName, UnmanagedInstanceTO sourceVMwareInstance,
|
||||
Map<String, Long> nicNetworkMap,
|
||||
|
|
@ -1921,7 +2020,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
throw new CloudRuntimeException(err);
|
||||
}
|
||||
|
||||
HostVO selectKVMHostForConversionInCluster(Cluster destinationCluster, Long convertInstanceHostId) {
|
||||
HostVO selectKVMHostForConversionInCluster(Cluster destinationCluster, Long convertInstanceHostId, boolean useVddk) {
|
||||
if (convertInstanceHostId != null) {
|
||||
HostVO selectedHost = hostDao.findById(convertInstanceHostId);
|
||||
String err = null;
|
||||
|
|
@ -1955,24 +2054,58 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
// Auto select host with conversion capability
|
||||
List<HostVO> hosts = hostDao.listByClusterHypervisorTypeAndHostCapability(destinationCluster.getId(), destinationCluster.getHypervisorType(), Host.HOST_INSTANCE_CONVERSION);
|
||||
if (CollectionUtils.isNotEmpty(hosts)) {
|
||||
return hosts.get(new Random().nextInt(hosts.size()));
|
||||
if (useVddk) {
|
||||
List<HostVO> vddkHosts = filterHostsWithVddkSupport(hosts);
|
||||
if (CollectionUtils.isNotEmpty(vddkHosts)) {
|
||||
hosts = vddkHosts;
|
||||
}
|
||||
}
|
||||
if (CollectionUtils.isNotEmpty(hosts)) {
|
||||
return hosts.get(new Random().nextInt(hosts.size()));
|
||||
}
|
||||
}
|
||||
|
||||
// Try without host capability check
|
||||
hosts = hostDao.listByClusterAndHypervisorType(destinationCluster.getId(), destinationCluster.getHypervisorType());
|
||||
if (CollectionUtils.isNotEmpty(hosts)) {
|
||||
return hosts.get(new Random().nextInt(hosts.size()));
|
||||
if (useVddk) {
|
||||
List<HostVO> vddkHosts = filterHostsWithVddkSupport(hosts);
|
||||
if (CollectionUtils.isNotEmpty(vddkHosts)) {
|
||||
hosts = vddkHosts;
|
||||
}
|
||||
}
|
||||
if (CollectionUtils.isNotEmpty(hosts)) {
|
||||
return hosts.get(new Random().nextInt(hosts.size()));
|
||||
}
|
||||
}
|
||||
|
||||
String err = String.format("Could not find any suitable %s host in cluster %s to perform the instance conversion",
|
||||
destinationCluster.getHypervisorType(), destinationCluster);
|
||||
String err = useVddk
|
||||
? String.format("Could not find any suitable %s host in cluster %s with '%s' configured to perform the VDDK-based instance conversion",
|
||||
destinationCluster.getHypervisorType(), destinationCluster, Host.HOST_VDDK_SUPPORT)
|
||||
: String.format("Could not find any suitable %s host in cluster %s to perform the instance conversion",
|
||||
destinationCluster.getHypervisorType(), destinationCluster);
|
||||
logger.error(err);
|
||||
throw new CloudRuntimeException(err);
|
||||
}
|
||||
|
||||
private CheckConvertInstanceAnswer checkConversionSupportOnHost(HostVO convertHost, String sourceVM, boolean checkWindowsGuestConversionSupport) {
|
||||
logger.debug(String.format("Checking the %s conversion support on the host %s", checkWindowsGuestConversionSupport? "windows guest" : "", convertHost));
|
||||
CheckConvertInstanceCommand cmd = new CheckConvertInstanceCommand(checkWindowsGuestConversionSupport);
|
||||
private List<HostVO> filterHostsWithVddkSupport(List<HostVO> hosts) {
|
||||
return hosts.stream().filter(h -> {
|
||||
hostDao.loadDetails(h);
|
||||
return Boolean.parseBoolean(h.getDetail(Host.HOST_VDDK_SUPPORT));
|
||||
}).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private CheckConvertInstanceAnswer checkConversionSupportOnHost(HostVO convertHost, String sourceVM,
|
||||
boolean checkWindowsGuestConversionSupport,
|
||||
boolean useVddk, Map<String, String> details) {
|
||||
logger.debug(String.format("Checking the %s%s conversion support on the host %s",
|
||||
useVddk ? "VDDK " : "",
|
||||
checkWindowsGuestConversionSupport ? "windows guest " : "",
|
||||
convertHost));
|
||||
CheckConvertInstanceCommand cmd = new CheckConvertInstanceCommand(checkWindowsGuestConversionSupport, useVddk);
|
||||
if (MapUtils.isNotEmpty(details)) {
|
||||
cmd.setVddkLibDir(StringUtils.trimToNull(details.get(Host.HOST_VDDK_LIB_DIR)));
|
||||
}
|
||||
int timeoutSeconds = 60;
|
||||
cmd.setWait(timeoutSeconds);
|
||||
|
||||
|
|
@ -2006,7 +2139,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
logger.debug("Delegating the conversion of instance {} from VMware to KVM to the host {} using OVF {} on conversion datastore",
|
||||
sourceVM, convertHost, ovfTemplateDirConvertLocation);
|
||||
|
||||
RemoteInstanceTO remoteInstanceTO = new RemoteInstanceTO(sourceVM);
|
||||
RemoteInstanceTO remoteInstanceTO = new RemoteInstanceTO(sourceVM, sourceVMwareInstance.getClusterName(), sourceVMwareInstance.getHostName());
|
||||
List<String> destinationStoragePools = selectInstanceConversionStoragePools(convertStoragePools, sourceVMwareInstance.getDisks(), serviceOffering, dataDiskOfferingMap);
|
||||
ConvertInstanceCommand cmd = new ConvertInstanceCommand(remoteInstanceTO,
|
||||
Hypervisor.HypervisorType.KVM, temporaryConvertLocation,
|
||||
|
|
@ -2021,15 +2154,16 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
remoteInstanceTO, destinationStoragePools, temporaryConvertLocation, forceConvertToPool);
|
||||
}
|
||||
|
||||
private UnmanagedInstanceTO convertVmwareInstanceToKVMAfterExportingOVFToConvertLocation(
|
||||
private UnmanagedInstanceTO convertVmwareInstanceToKVMUsingVDDKOrAfterExportingOVFToConvertLocation(
|
||||
String sourceVM, UnmanagedInstanceTO sourceVMwareInstance, HostVO convertHost,
|
||||
HostVO importHost, List<StoragePoolVO> convertStoragePools,
|
||||
ServiceOfferingVO serviceOffering, Map<String, Long> dataDiskOfferingMap,
|
||||
DataStoreTO temporaryConvertLocation, String vcenterHost, String vcenterUsername,
|
||||
String vcenterPassword, String datacenterName, boolean forceConvertToPool, String extraParams) {
|
||||
String vcenterPassword, String datacenterName, boolean forceConvertToPool, String extraParams,
|
||||
boolean useVddk, Map<String, String> details) {
|
||||
logger.debug("Delegating the conversion of instance {} from VMware to KVM to the host {} after OVF export through ovftool", sourceVM, convertHost);
|
||||
|
||||
RemoteInstanceTO remoteInstanceTO = new RemoteInstanceTO(sourceVMwareInstance.getName(), sourceVMwareInstance.getPath(), vcenterHost, vcenterUsername, vcenterPassword, datacenterName);
|
||||
RemoteInstanceTO remoteInstanceTO = new RemoteInstanceTO(sourceVMwareInstance.getName(), sourceVMwareInstance.getPath(), vcenterHost, vcenterUsername, vcenterPassword, datacenterName, sourceVMwareInstance.getClusterName(), sourceVMwareInstance.getHostName());
|
||||
List<String> destinationStoragePools = selectInstanceConversionStoragePools(convertStoragePools, sourceVMwareInstance.getDisks(), serviceOffering, dataDiskOfferingMap);
|
||||
ConvertInstanceCommand cmd = new ConvertInstanceCommand(remoteInstanceTO,
|
||||
Hypervisor.HypervisorType.KVM, temporaryConvertLocation, null, false, true, sourceVM);
|
||||
|
|
@ -2044,10 +2178,22 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
if (StringUtils.isNotBlank(extraParams)) {
|
||||
cmd.setExtraParams(extraParams);
|
||||
}
|
||||
cmd.setUseVddk(useVddk);
|
||||
applyVddkOverridesFromDetails(cmd, details);
|
||||
return convertAndImportToKVM(cmd, convertHost, importHost, sourceVM,
|
||||
remoteInstanceTO, destinationStoragePools, temporaryConvertLocation, forceConvertToPool);
|
||||
}
|
||||
|
||||
private void applyVddkOverridesFromDetails(ConvertInstanceCommand cmd, Map<String, String> details) {
|
||||
if (MapUtils.isEmpty(details)) {
|
||||
return;
|
||||
}
|
||||
|
||||
cmd.setVddkLibDir(StringUtils.trimToNull(details.get(Host.HOST_VDDK_LIB_DIR)));
|
||||
cmd.setVddkTransports(StringUtils.trimToNull(details.get(DETAIL_VDDK_TRANSPORTS)));
|
||||
cmd.setVddkThumbprint(StringUtils.trimToNull(details.get(DETAIL_VDDK_THUMBPRINT)));
|
||||
}
|
||||
|
||||
private UnmanagedInstanceTO convertAndImportToKVM(ConvertInstanceCommand convertInstanceCommand, HostVO convertHost, HostVO importHost,
|
||||
String sourceVM,
|
||||
RemoteInstanceTO remoteInstanceTO,
|
||||
|
|
@ -2593,6 +2739,7 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
|
||||
List<Reserver> reservations = new ArrayList<>();
|
||||
try {
|
||||
checkVmResourceLimitsForExternalKvmVmImport(owner, serviceOffering, (VMTemplateVO) template, details, reservations);
|
||||
checkVolumeResourceLimitsForExternalKvmVmImport(owner, rootDisk, dataDisks, diskOffering, dataDiskOfferingMap, reservations);
|
||||
|
||||
// Check NICs and supplied networks
|
||||
|
|
@ -2757,101 +2904,138 @@ public class UnmanagedVMsManagerImpl implements UnmanagedVMsManager {
|
|||
profiles.add(nicProfile);
|
||||
networkNicMap.put(network.getUuid(), profiles);
|
||||
|
||||
List<Reserver> reservations = new ArrayList<>();
|
||||
try {
|
||||
checkVmResourceLimitsForExternalKvmVmImport(owner, serviceOffering, (VMTemplateVO) template, details, reservations);
|
||||
userVm = userVmManager.importVM(zone, null, template, null, displayName, owner,
|
||||
null, caller, true, null, owner.getAccountId(), userId,
|
||||
serviceOffering, null, null, hostName,
|
||||
Hypervisor.HypervisorType.KVM, allDetails, powerState, networkNicMap);
|
||||
} catch (InsufficientCapacityException ice) {
|
||||
|
||||
if (userVm == null) {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import vm name: %s", instanceName));
|
||||
}
|
||||
|
||||
DiskOfferingVO diskOffering = diskOfferingDao.findById(serviceOffering.getDiskOfferingId());
|
||||
List<String> resourceLimitStorageTags = resourceLimitService.getResourceLimitStorageTagsForResourceCountOperation(true, diskOffering);
|
||||
CheckedReservation volumeReservation = new CheckedReservation(owner, Resource.ResourceType.volume, resourceLimitStorageTags,
|
||||
CollectionUtils.isNotEmpty(resourceLimitStorageTags) ? 1L : 0L, reservationDao, resourceLimitService);
|
||||
reservations.add(volumeReservation);
|
||||
|
||||
String rootVolumeName = String.format("ROOT-%s", userVm.getId());
|
||||
DiskProfile diskProfile = volumeManager.allocateRawVolume(Volume.Type.ROOT, rootVolumeName, diskOffering, null, null, null, userVm, template, owner, null, false);
|
||||
|
||||
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(userVm, template, serviceOffering, owner, null);
|
||||
ServiceOfferingVO dummyOffering = serviceOfferingDao.findById(userVm.getId(), serviceOffering.getId());
|
||||
profile.setServiceOffering(dummyOffering);
|
||||
DeploymentPlanner.ExcludeList excludeList = new DeploymentPlanner.ExcludeList();
|
||||
final DataCenterDeployment plan = new DataCenterDeployment(zone.getId(), null, null, hostId, poolId, null);
|
||||
DeployDestination dest = null;
|
||||
try {
|
||||
dest = deploymentPlanningManager.planDeployment(profile, plan, excludeList, null);
|
||||
} catch (Exception e) {
|
||||
logger.warn("Import failed for Vm: {} while finding deployment destination", userVm, e);
|
||||
cleanupFailedImportVM(userVm);
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName()));
|
||||
}
|
||||
if(dest == null) {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Import failed for Vm: %s. Suitable deployment destination not found", userVm.getInstanceName()));
|
||||
}
|
||||
|
||||
Map<Volume, StoragePool> storage = dest.getStorageForDisks();
|
||||
Volume volume = volumeDao.findById(diskProfile.getVolumeId());
|
||||
StoragePool storagePool = storage.get(volume);
|
||||
CheckVolumeCommand checkVolumeCommand = new CheckVolumeCommand();
|
||||
checkVolumeCommand.setSrcFile(diskPath);
|
||||
StorageFilerTO storageTO = new StorageFilerTO(storagePool);
|
||||
checkVolumeCommand.setStorageFilerTO(storageTO);
|
||||
Answer answer = agentManager.easySend(dest.getHost().getId(), checkVolumeCommand);
|
||||
if (!(answer instanceof CheckVolumeAnswer)) {
|
||||
cleanupFailedImportVM(userVm);
|
||||
throw new CloudRuntimeException("Disk not found or is invalid");
|
||||
}
|
||||
CheckVolumeAnswer checkVolumeAnswer = (CheckVolumeAnswer) answer;
|
||||
try {
|
||||
checkVolume(checkVolumeAnswer.getVolumeDetails());
|
||||
} catch (CloudRuntimeException e) {
|
||||
cleanupFailedImportVM(userVm);
|
||||
throw e;
|
||||
}
|
||||
if (!checkVolumeAnswer.getResult()) {
|
||||
cleanupFailedImportVM(userVm);
|
||||
throw new CloudRuntimeException("Disk not found or is invalid");
|
||||
}
|
||||
diskProfile.setSize(checkVolumeAnswer.getSize());
|
||||
|
||||
CheckedReservation primaryStorageReservation = new CheckedReservation(owner, Resource.ResourceType.primary_storage, resourceLimitStorageTags,
|
||||
CollectionUtils.isNotEmpty(resourceLimitStorageTags) ? diskProfile.getSize() : 0L, reservationDao, resourceLimitService);
|
||||
reservations.add(primaryStorageReservation);
|
||||
|
||||
List<Pair<DiskProfile, StoragePool>> diskProfileStoragePoolList = new ArrayList<>();
|
||||
try {
|
||||
long deviceId = 1L;
|
||||
if(ImportSource.SHARED == importSource) {
|
||||
diskProfileStoragePoolList.add(importKVMSharedDisk(userVm, diskOffering, Volume.Type.ROOT,
|
||||
template, deviceId, poolId, diskPath, diskProfile));
|
||||
} else if(ImportSource.LOCAL == importSource) {
|
||||
diskProfileStoragePoolList.add(importKVMLocalDisk(userVm, diskOffering, Volume.Type.ROOT,
|
||||
template, deviceId, hostId, diskPath, diskProfile));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error(String.format("Failed to import volumes while importing vm: %s", instanceName), e);
|
||||
cleanupFailedImportVM(userVm);
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import volumes while importing vm: %s. %s", instanceName, StringUtils.defaultString(e.getMessage())));
|
||||
}
|
||||
networkOrchestrationService.importNic(macAddress, 0, network, true, userVm, requestedIpPair, zone, true);
|
||||
publishVMUsageUpdateResourceCount(userVm, dummyOffering, template);
|
||||
return userVm;
|
||||
|
||||
} catch (InsufficientCapacityException ice) { // This will be thrown by com.cloud.vm.UserVmService.importVM
|
||||
logger.error(String.format("Failed to import vm name: %s", instanceName), ice);
|
||||
throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ice.getMessage());
|
||||
}
|
||||
if (userVm == null) {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import vm name: %s", instanceName));
|
||||
}
|
||||
|
||||
DiskOfferingVO diskOffering = diskOfferingDao.findById(serviceOffering.getDiskOfferingId());
|
||||
|
||||
List<Reserver> reservations = new ArrayList<>();
|
||||
List<String> resourceLimitStorageTags = resourceLimitService.getResourceLimitStorageTagsForResourceCountOperation(true, diskOffering);
|
||||
try {
|
||||
CheckedReservation volumeReservation = new CheckedReservation(owner, Resource.ResourceType.volume, resourceLimitStorageTags,
|
||||
CollectionUtils.isNotEmpty(resourceLimitStorageTags) ? 1L : 0L, reservationDao, resourceLimitService);
|
||||
reservations.add(volumeReservation);
|
||||
|
||||
String rootVolumeName = String.format("ROOT-%s", userVm.getId());
|
||||
DiskProfile diskProfile = volumeManager.allocateRawVolume(Volume.Type.ROOT, rootVolumeName, diskOffering, null, null, null, userVm, template, owner, null, false);
|
||||
|
||||
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(userVm, template, serviceOffering, owner, null);
|
||||
ServiceOfferingVO dummyOffering = serviceOfferingDao.findById(userVm.getId(), serviceOffering.getId());
|
||||
profile.setServiceOffering(dummyOffering);
|
||||
DeploymentPlanner.ExcludeList excludeList = new DeploymentPlanner.ExcludeList();
|
||||
final DataCenterDeployment plan = new DataCenterDeployment(zone.getId(), null, null, hostId, poolId, null);
|
||||
DeployDestination dest = null;
|
||||
try {
|
||||
dest = deploymentPlanningManager.planDeployment(profile, plan, excludeList, null);
|
||||
} catch (Exception e) {
|
||||
logger.warn("Import failed for Vm: {} while finding deployment destination", userVm, e);
|
||||
cleanupFailedImportVM(userVm);
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Import failed for Vm: %s while finding deployment destination", userVm.getInstanceName()));
|
||||
}
|
||||
if(dest == null) {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Import failed for Vm: %s. Suitable deployment destination not found", userVm.getInstanceName()));
|
||||
}
|
||||
|
||||
Map<Volume, StoragePool> storage = dest.getStorageForDisks();
|
||||
Volume volume = volumeDao.findById(diskProfile.getVolumeId());
|
||||
StoragePool storagePool = storage.get(volume);
|
||||
CheckVolumeCommand checkVolumeCommand = new CheckVolumeCommand();
|
||||
checkVolumeCommand.setSrcFile(diskPath);
|
||||
StorageFilerTO storageTO = new StorageFilerTO(storagePool);
|
||||
checkVolumeCommand.setStorageFilerTO(storageTO);
|
||||
Answer answer = agentManager.easySend(dest.getHost().getId(), checkVolumeCommand);
|
||||
if (!(answer instanceof CheckVolumeAnswer)) {
|
||||
cleanupFailedImportVM(userVm);
|
||||
throw new CloudRuntimeException("Disk not found or is invalid");
|
||||
}
|
||||
CheckVolumeAnswer checkVolumeAnswer = (CheckVolumeAnswer) answer;
|
||||
try {
|
||||
checkVolume(checkVolumeAnswer.getVolumeDetails());
|
||||
} catch (CloudRuntimeException e) {
|
||||
} catch (ResourceAllocationException e) {
|
||||
cleanupFailedImportVM(userVm);
|
||||
throw e;
|
||||
}
|
||||
if (!checkVolumeAnswer.getResult()) {
|
||||
cleanupFailedImportVM(userVm);
|
||||
throw new CloudRuntimeException("Disk not found or is invalid");
|
||||
}
|
||||
diskProfile.setSize(checkVolumeAnswer.getSize());
|
||||
|
||||
CheckedReservation primaryStorageReservation = new CheckedReservation(owner, Resource.ResourceType.primary_storage, resourceLimitStorageTags,
|
||||
CollectionUtils.isNotEmpty(resourceLimitStorageTags) ? diskProfile.getSize() : 0L, reservationDao, resourceLimitService);
|
||||
reservations.add(primaryStorageReservation);
|
||||
|
||||
List<Pair<DiskProfile, StoragePool>> diskProfileStoragePoolList = new ArrayList<>();
|
||||
try {
|
||||
long deviceId = 1L;
|
||||
if(ImportSource.SHARED == importSource) {
|
||||
diskProfileStoragePoolList.add(importKVMSharedDisk(userVm, diskOffering, Volume.Type.ROOT,
|
||||
template, deviceId, poolId, diskPath, diskProfile));
|
||||
} else if(ImportSource.LOCAL == importSource) {
|
||||
diskProfileStoragePoolList.add(importKVMLocalDisk(userVm, diskOffering, Volume.Type.ROOT,
|
||||
template, deviceId, hostId, diskPath, diskProfile));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error(String.format("Failed to import volumes while importing vm: %s", instanceName), e);
|
||||
cleanupFailedImportVM(userVm);
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to import volumes while importing vm: %s. %s", instanceName, StringUtils.defaultString(e.getMessage())));
|
||||
}
|
||||
networkOrchestrationService.importNic(macAddress, 0, network, true, userVm, requestedIpPair, zone, true);
|
||||
publishVMUsageUpdateResourceCount(userVm, dummyOffering, template);
|
||||
return userVm;
|
||||
|
||||
} finally {
|
||||
ReservationHelper.closeAll(reservations);
|
||||
}
|
||||
}
|
||||
|
||||
protected void checkVmResourceLimitsForExternalKvmVmImport(Account owner, ServiceOfferingVO serviceOffering, VMTemplateVO template, Map<String, String> details, List<Reserver> reservations) throws ResourceAllocationException {
|
||||
// When importing an external VM, the amount of CPUs and memory is always obtained from the compute offering,
|
||||
// unlike the unmanaged instance import that obtains it from the hypervisor unless the VM is powered off and the offering is fixed
|
||||
Integer cpu = serviceOffering.getCpu();
|
||||
Integer memory = serviceOffering.getRamSize();
|
||||
|
||||
if (serviceOffering.isDynamic()) {
|
||||
cpu = getDetailAsInteger(VmDetailConstants.CPU_NUMBER, details);
|
||||
memory = getDetailAsInteger(VmDetailConstants.MEMORY, details);
|
||||
}
|
||||
|
||||
List<String> resourceLimitHostTags = resourceLimitService.getResourceLimitHostTags(serviceOffering, template);
|
||||
|
||||
CheckedReservation vmReservation = new CheckedReservation(owner, Resource.ResourceType.user_vm, resourceLimitHostTags, 1L, reservationDao, resourceLimitService);
|
||||
reservations.add(vmReservation);
|
||||
|
||||
CheckedReservation cpuReservation = new CheckedReservation(owner, Resource.ResourceType.cpu, resourceLimitHostTags, cpu.longValue(), reservationDao, resourceLimitService);
|
||||
reservations.add(cpuReservation);
|
||||
|
||||
CheckedReservation memReservation = new CheckedReservation(owner, Resource.ResourceType.memory, resourceLimitHostTags, memory.longValue(), reservationDao, resourceLimitService);
|
||||
reservations.add(memReservation);
|
||||
}
|
||||
|
||||
protected Integer getDetailAsInteger(String key, Map<String, String> details) {
|
||||
String detail = details.get(key);
|
||||
if (detail == null) {
|
||||
throw new InvalidParameterValueException(String.format("Detail '%s' must be provided.", key));
|
||||
}
|
||||
try {
|
||||
return Integer.valueOf(detail);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new InvalidParameterValueException(String.format("Please provide a valid integer value for detail '%s'.", key));
|
||||
}
|
||||
}
|
||||
|
||||
private void checkVolume(Map<VolumeOnStorageTO.Detail, String> volumeDetails) {
|
||||
if (MapUtils.isEmpty(volumeDetails)) {
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -16,6 +16,25 @@
|
|||
// under the License.
|
||||
package com.cloud.hypervisor;
|
||||
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.utils.bytescale.ByteScaleUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import com.cloud.agent.api.to.NicTO;
|
||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||
import com.cloud.configuration.ConfigurationManagerImpl;
|
||||
|
|
@ -34,23 +53,6 @@ import com.cloud.storage.dao.GuestOSHypervisorDao;
|
|||
import com.cloud.utils.Pair;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.utils.bytescale.ByteScaleUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class KVMGuruTest {
|
||||
|
|
@ -111,8 +113,15 @@ public class KVMGuruTest {
|
|||
private static final String detail2Key = "detail2";
|
||||
private static final String detail2Value = "value2";
|
||||
|
||||
private ConfigKey<Integer> originalVmServiceOfferingMaxCpuCores;
|
||||
private ConfigKey<Integer> originalVmServiceOfferingMaxRAMSize;
|
||||
|
||||
@Before
|
||||
public void setup() throws UnsupportedEncodingException {
|
||||
// Preserve the original value for restoration in tearDown
|
||||
originalVmServiceOfferingMaxCpuCores = ConfigurationManagerImpl.VM_SERVICE_OFFERING_MAX_CPU_CORES;
|
||||
originalVmServiceOfferingMaxRAMSize = ConfigurationManagerImpl.VM_SERVICE_OFFERING_MAX_RAM_SIZE;
|
||||
|
||||
Mockito.when(vmTO.isLimitCpuUse()).thenReturn(true);
|
||||
Mockito.when(vmProfile.getVirtualMachine()).thenReturn(vm);
|
||||
Mockito.when(vm.getHostId()).thenReturn(hostId);
|
||||
|
|
@ -134,6 +143,13 @@ public class KVMGuruTest {
|
|||
Arrays.asList(detail1, detail2));
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() {
|
||||
// Restore the original value
|
||||
ConfigurationManagerImpl.VM_SERVICE_OFFERING_MAX_CPU_CORES = originalVmServiceOfferingMaxCpuCores;
|
||||
ConfigurationManagerImpl.VM_SERVICE_OFFERING_MAX_RAM_SIZE = originalVmServiceOfferingMaxRAMSize;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetVmQuotaPercentage() {
|
||||
guru.setVmQuotaPercentage(vmTO, vmProfile);
|
||||
|
|
|
|||
|
|
@ -554,6 +554,7 @@ public class VpcManagerImplTest {
|
|||
doReturn(ipv4GuestSubnetNetworkMap).when(routedIpv4Manager).getOrCreateIpv4SubnetForVpc(any(), anyInt());
|
||||
List<Long> bgpPeerIds = Arrays.asList(11L, 12L);
|
||||
try (MockedConstruction<CheckedReservation> mockCheckedReservation = Mockito.mockConstruction(CheckedReservation.class)) {
|
||||
|
||||
manager.createVpc(zoneId, vpcOfferingId, vpcOwnerId, vpcName, vpcName, null, vpcDomain,
|
||||
ip4Dns[0], ip4Dns[1], null, null, true, 1500, 24, null, bgpPeerIds, false);
|
||||
} catch (ResourceAllocationException e) {
|
||||
|
|
|
|||
|
|
@ -318,6 +318,11 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeUserRequest(long hostId, Event event, boolean isForced, boolean isForceDeleteStorage) throws AgentUnavailableException {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see com.cloud.resource.ResourceManager#resourceStateTransitTo(com.cloud.host.Host, com.cloud.resource.ResourceState.Event, long)
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -1184,4 +1184,31 @@ public class ResourceManagerImplTest {
|
|||
Mockito.verify(host).setStorageAccessGroups("group1,group2");
|
||||
Mockito.verify(hostDao).update(hostId, host);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void executeUserRequestDeleteHostPassesForcedFlags() throws Exception {
|
||||
Mockito.doReturn(true).when(resourceManager).doDeleteHost(anyLong(), anyBoolean(), anyBoolean());
|
||||
|
||||
resourceManager.executeUserRequest(hostId, ResourceState.Event.DeleteHost, true, true);
|
||||
|
||||
Mockito.verify(resourceManager).doDeleteHost(hostId, true, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void executeUserRequestDeleteHostPassesNonForcedFlags() throws Exception {
|
||||
Mockito.doReturn(true).when(resourceManager).doDeleteHost(anyLong(), anyBoolean(), anyBoolean());
|
||||
|
||||
resourceManager.executeUserRequest(hostId, ResourceState.Event.DeleteHost, false, false);
|
||||
|
||||
Mockito.verify(resourceManager).doDeleteHost(hostId, false, false);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void executeUserRequestDefaultOverloadPassesFalseForDeleteHost() throws Exception {
|
||||
Mockito.doReturn(true).when(resourceManager).doDeleteHost(anyLong(), anyBoolean(), anyBoolean());
|
||||
|
||||
resourceManager.executeUserRequest(hostId, ResourceState.Event.DeleteHost);
|
||||
|
||||
Mockito.verify(resourceManager).doDeleteHost(hostId, false, false);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -107,8 +107,8 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
|
|||
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
|
||||
import org.apache.cloudstack.storage.heuristics.HeuristicRuleHelper;
|
||||
import org.apache.cloudstack.storage.template.VnfTemplateManager;
|
||||
|
||||
import org.apache.cloudstack.test.utils.SpringUtils;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
|
|
|||
|
|
@ -61,20 +61,12 @@ import java.util.Map;
|
|||
import java.util.TimeZone;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockedConstruction;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
import org.springframework.test.util.ReflectionTestUtils;
|
||||
|
||||
import com.cloud.network.as.AutoScaleManager;
|
||||
import com.cloud.network.dao.FirewallRulesDao;
|
||||
import com.cloud.network.dao.IPAddressDao;
|
||||
import com.cloud.network.dao.IPAddressVO;
|
||||
import com.cloud.network.dao.LoadBalancerVMMapDao;
|
||||
import com.cloud.network.dao.LoadBalancerVMMapVO;
|
||||
import org.apache.cloudstack.acl.ControlledEntity;
|
||||
import org.apache.cloudstack.acl.SecurityChecker;
|
||||
import org.apache.cloudstack.api.ApiCommandResourceType;
|
||||
|
|
@ -112,6 +104,19 @@ import org.apache.cloudstack.storage.template.VnfTemplateManager;
|
|||
import org.apache.cloudstack.userdata.UserDataManager;
|
||||
import org.apache.cloudstack.vm.UnmanagedVMsManager;
|
||||
import org.apache.cloudstack.vm.lease.VMLeaseManager;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockedConstruction;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
import org.springframework.test.util.ReflectionTestUtils;
|
||||
|
||||
import com.cloud.api.query.dao.ServiceOfferingJoinDao;
|
||||
import com.cloud.api.query.vo.ServiceOfferingJoinVO;
|
||||
|
|
@ -141,12 +146,6 @@ import com.cloud.host.dao.HostDao;
|
|||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.network.Network;
|
||||
import com.cloud.network.NetworkModel;
|
||||
import com.cloud.network.as.AutoScaleManager;
|
||||
import com.cloud.network.dao.FirewallRulesDao;
|
||||
import com.cloud.network.dao.IPAddressDao;
|
||||
import com.cloud.network.dao.IPAddressVO;
|
||||
import com.cloud.network.dao.LoadBalancerVMMapDao;
|
||||
import com.cloud.network.dao.LoadBalancerVMMapVO;
|
||||
import com.cloud.network.dao.NetworkDao;
|
||||
import com.cloud.network.dao.NetworkVO;
|
||||
import com.cloud.network.dao.PhysicalNetworkDao;
|
||||
|
|
@ -167,6 +166,7 @@ import com.cloud.resourcelimit.CheckedReservation;
|
|||
import com.cloud.server.ManagementService;
|
||||
import com.cloud.service.ServiceOfferingVO;
|
||||
import com.cloud.service.dao.ServiceOfferingDao;
|
||||
import com.cloud.service.dao.ServiceOfferingDetailsDao;
|
||||
import com.cloud.storage.DiskOfferingVO;
|
||||
import com.cloud.storage.GuestOSVO;
|
||||
import com.cloud.storage.ScopeType;
|
||||
|
|
@ -463,6 +463,9 @@ public class UserVmManagerImplTest {
|
|||
|
||||
MockedStatic<UnmanagedVMsManager> unmanagedVMsManagerMockedStatic;
|
||||
|
||||
@Mock
|
||||
ServiceOfferingDetailsDao serviceOfferingDetailsDao;
|
||||
|
||||
private static final long vmId = 1l;
|
||||
private static final long zoneId = 2L;
|
||||
private static final long accountId = 3L;
|
||||
|
|
@ -4360,4 +4363,96 @@ public class UserVmManagerImplTest {
|
|||
method.setAccessible(true);
|
||||
method.invoke(userVmManagerImpl, vmId);
|
||||
}
|
||||
|
||||
private ServiceOfferingVO getMockedServiceOffering(boolean custom, boolean customSpeed) {
|
||||
ServiceOfferingVO serviceOffering = mock(ServiceOfferingVO.class);
|
||||
when(serviceOffering.getUuid()).thenReturn("offering-uuid");
|
||||
when(serviceOffering.isDynamic()).thenReturn(custom);
|
||||
when(serviceOffering.isCustomCpuSpeedSupported()).thenReturn(customSpeed);
|
||||
if (custom) {
|
||||
when(serviceOffering.getCpu()).thenReturn(null);
|
||||
when(serviceOffering.getRamSize()).thenReturn(null);
|
||||
}
|
||||
if (customSpeed) {
|
||||
when(serviceOffering.getSpeed()).thenReturn(null);
|
||||
} else {
|
||||
when(serviceOffering.isCustomCpuSpeedSupported()).thenReturn(false);
|
||||
when(serviceOffering.getSpeed()).thenReturn(1000);
|
||||
}
|
||||
return serviceOffering;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void customOfferingNeedsCustomizationThrowsException() {
|
||||
ServiceOfferingVO serviceOffering = getMockedServiceOffering(true, true);
|
||||
InvalidParameterValueException ex = Assert.assertThrows(InvalidParameterValueException.class, () ->
|
||||
userVmManagerImpl.validateCustomParameters(serviceOffering, Collections.emptyMap()));
|
||||
assertEquals("Need to specify custom parameter values cpu, cpu speed and memory when using custom offering", ex.getMessage());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void cpuSpeedCustomizationNotAllowedThrowsException() {
|
||||
ServiceOfferingVO serviceOffering = getMockedServiceOffering(true, false);
|
||||
|
||||
Map<String, String> customParameters = new HashMap<>();
|
||||
customParameters.put(VmDetailConstants.CPU_NUMBER, "1");
|
||||
customParameters.put(VmDetailConstants.CPU_SPEED, "2500");
|
||||
|
||||
InvalidParameterValueException ex = Assert.assertThrows(InvalidParameterValueException.class, () ->
|
||||
userVmManagerImpl.validateCustomParameters(serviceOffering, customParameters));
|
||||
Assert.assertTrue(ex.getMessage().startsWith("The CPU speed of this offering"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void cpuSpeedCustomizationAllowedDoesNotThrowException() {
|
||||
ServiceOfferingVO serviceOffering = getMockedServiceOffering(true, true);
|
||||
|
||||
when(serviceOfferingDetailsDao.listDetailsKeyPairs(anyLong())).thenReturn(
|
||||
Map.of(ApiConstants.MIN_CPU_NUMBER, "1",
|
||||
ApiConstants.MAX_CPU_NUMBER, "4",
|
||||
ApiConstants.MIN_MEMORY, "256",
|
||||
ApiConstants.MAX_MEMORY, "8192"));
|
||||
|
||||
Map<String, String> customParameters = new HashMap<>();
|
||||
customParameters.put(VmDetailConstants.CPU_NUMBER, "1");
|
||||
customParameters.put(VmDetailConstants.CPU_SPEED, "2500");
|
||||
customParameters.put(VmDetailConstants.MEMORY, "256");
|
||||
|
||||
userVmManagerImpl.validateCustomParameters(serviceOffering, customParameters);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void verifyVmLimits_fixedOffering_throwsException() {
|
||||
when(userVmVoMock.getId()).thenReturn(1L);
|
||||
when(userVmVoMock.getServiceOfferingId()).thenReturn(1L);
|
||||
when(accountDao.findById(anyLong())).thenReturn(callerAccount);
|
||||
ServiceOfferingVO serviceOffering = getMockedServiceOffering(false, false);
|
||||
when(_serviceOfferingDao.findById(anyLong())).thenReturn(serviceOffering);
|
||||
when(_serviceOfferingDao.findByIdIncludingRemoved(anyLong(), anyLong())).thenReturn(serviceOffering);
|
||||
|
||||
Map<String, String> customParameters = new HashMap<>();
|
||||
customParameters.put(VmDetailConstants.CPU_SPEED, "2500");
|
||||
|
||||
InvalidParameterValueException ex = Assert.assertThrows(InvalidParameterValueException.class, () ->
|
||||
userVmManagerImpl.verifyVmLimits(userVmVoMock, customParameters));
|
||||
assertEquals("CPU number, Memory and CPU speed cannot be updated for a non-dynamic offering", ex.getMessage());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void verifyVmLimits_constrainedOffering_throwsException() {
|
||||
when(userVmVoMock.getId()).thenReturn(1L);
|
||||
when(userVmVoMock.getServiceOfferingId()).thenReturn(1L);
|
||||
when(accountDao.findById(anyLong())).thenReturn(callerAccount);
|
||||
ServiceOfferingVO serviceOffering = getMockedServiceOffering(true, false);
|
||||
when(_serviceOfferingDao.findById(anyLong())).thenReturn(serviceOffering);
|
||||
when(_serviceOfferingDao.findByIdIncludingRemoved(anyLong(), anyLong())).thenReturn(serviceOffering);
|
||||
|
||||
Map<String, String> customParameters = new HashMap<>();
|
||||
customParameters.put(VmDetailConstants.CPU_NUMBER, "1");
|
||||
customParameters.put(VmDetailConstants.CPU_SPEED, "2500");
|
||||
|
||||
InvalidParameterValueException ex = Assert.assertThrows(InvalidParameterValueException.class, () ->
|
||||
userVmManagerImpl.verifyVmLimits(userVmVoMock, customParameters));
|
||||
Assert.assertTrue(ex.getMessage().startsWith("The CPU speed of this offering"));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -231,6 +231,7 @@ public class VMSnapshotManagerTest {
|
|||
when(vmSnapshotVO.getId()).thenReturn(VM_SNAPSHOT_ID);
|
||||
when(serviceOffering.isDynamic()).thenReturn(false);
|
||||
when(_serviceOfferingDao.findById(SERVICE_OFFERING_ID)).thenReturn(serviceOffering);
|
||||
when(_serviceOfferingDao.findByIdIncludingRemoved(TEST_VM_ID, SERVICE_OFFERING_ID)).thenReturn(serviceOffering);
|
||||
|
||||
for (ResourceDetail detail : Arrays.asList(userVmDetailCpuNumber, vmSnapshotDetailCpuNumber)) {
|
||||
when(detail.getName()).thenReturn(VmDetailConstants.CPU_NUMBER);
|
||||
|
|
@ -360,20 +361,51 @@ public class VMSnapshotManagerTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateUserVmServiceOfferingSameServiceOffering() {
|
||||
_vmSnapshotMgr.updateUserVmServiceOffering(userVm, vmSnapshotVO);
|
||||
verify(_vmSnapshotMgr, never()).changeUserVmServiceOffering(userVm, vmSnapshotVO);
|
||||
public void testUserVmServiceOfferingNeedsChangeWhenSnapshotOfferingDiffers() {
|
||||
when(userVm.getServiceOfferingId()).thenReturn(SERVICE_OFFERING_DIFFERENT_ID);
|
||||
when(vmSnapshotVO.getServiceOfferingId()).thenReturn(SERVICE_OFFERING_ID);
|
||||
|
||||
assertTrue(_vmSnapshotMgr.userVmServiceOfferingNeedsChange(userVm, vmSnapshotVO));
|
||||
|
||||
verify(_serviceOfferingDao, never()).findByIdIncludingRemoved(anyLong(), anyLong());
|
||||
verify(_serviceOfferingDao, never()).getComputeOffering(any(ServiceOfferingVO.class), any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateUserVmServiceOfferingDifferentServiceOffering() throws ConcurrentOperationException, ResourceUnavailableException, ManagementServerException, VirtualMachineMigrationException {
|
||||
when(userVm.getServiceOfferingId()).thenReturn(SERVICE_OFFERING_DIFFERENT_ID);
|
||||
when(_userVmManager.upgradeVirtualMachine(eq(TEST_VM_ID), eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture())).thenReturn(true);
|
||||
_vmSnapshotMgr.updateUserVmServiceOffering(userVm, vmSnapshotVO);
|
||||
public void testUserVmServiceOfferingNeedsChangeWhenSameNonDynamicOffering() {
|
||||
assertFalse(_vmSnapshotMgr.userVmServiceOfferingNeedsChange(userVm, vmSnapshotVO));
|
||||
|
||||
verify(_vmSnapshotMgr).changeUserVmServiceOffering(userVm, vmSnapshotVO);
|
||||
verify(_serviceOfferingDao).findByIdIncludingRemoved(TEST_VM_ID, SERVICE_OFFERING_ID);
|
||||
verify(_serviceOfferingDao, never()).getComputeOffering(any(ServiceOfferingVO.class), any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUserVmServiceOfferingNeedsChangeWhenDynamicOfferingMatchesSnapshot() {
|
||||
when(serviceOffering.isDynamic()).thenReturn(true);
|
||||
when(serviceOffering.getCpu()).thenReturn(2);
|
||||
when(serviceOffering.getRamSize()).thenReturn(2048);
|
||||
when(serviceOffering.getSpeed()).thenReturn(1000);
|
||||
when(_serviceOfferingDao.getComputeOffering(eq(serviceOffering), any())).thenReturn(serviceOffering);
|
||||
|
||||
assertFalse(_vmSnapshotMgr.userVmServiceOfferingNeedsChange(userVm, vmSnapshotVO));
|
||||
|
||||
verify(_serviceOfferingDao).getComputeOffering(eq(serviceOffering), any());
|
||||
verify(_vmSnapshotMgr).getVmMapDetails(vmSnapshotVO);
|
||||
verify(_vmSnapshotMgr).upgradeUserVmServiceOffering(eq(userVm), eq(SERVICE_OFFERING_ID), mapDetailsCaptor.capture());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUserVmServiceOfferingNeedsChangeWhenDynamicCpuDiffersFromSnapshot() {
|
||||
when(serviceOffering.isDynamic()).thenReturn(true);
|
||||
when(serviceOffering.getCpu()).thenReturn(2);
|
||||
when(serviceOffering.getRamSize()).thenReturn(2048);
|
||||
when(serviceOffering.getSpeed()).thenReturn(1000);
|
||||
ServiceOfferingVO fromSnapshot = mock(ServiceOfferingVO.class);
|
||||
when(fromSnapshot.getCpu()).thenReturn(4);
|
||||
when(fromSnapshot.getRamSize()).thenReturn(2048);
|
||||
when(fromSnapshot.getSpeed()).thenReturn(1000);
|
||||
when(_serviceOfferingDao.getComputeOffering(eq(serviceOffering), any())).thenReturn(fromSnapshot);
|
||||
|
||||
assertTrue(_vmSnapshotMgr.userVmServiceOfferingNeedsChange(userVm, vmSnapshotVO));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
|||
|
|
@ -1547,7 +1547,7 @@ public class BackupManagerTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteBackupVmNotFound() {
|
||||
public void testDeleteBackupVmNotFound() throws ResourceAllocationException {
|
||||
Long backupId = 1L;
|
||||
Long vmId = 2L;
|
||||
Long zoneId = 3L;
|
||||
|
|
@ -1601,7 +1601,7 @@ public class BackupManagerTest {
|
|||
}
|
||||
|
||||
@Test(expected = CloudRuntimeException.class)
|
||||
public void testDeleteBackupBlockedByPendingJobs() {
|
||||
public void testDeleteBackupBlockedByPendingJobs() throws ResourceAllocationException {
|
||||
Long backupId = 1L;
|
||||
Long vmId = 2L;
|
||||
|
||||
|
|
@ -1829,13 +1829,13 @@ public class BackupManagerTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void deleteOldestBackupFromScheduleIfRequiredTestSkipDeletionWhenBackupScheduleIsNotFound() {
|
||||
public void deleteOldestBackupFromScheduleIfRequiredTestSkipDeletionWhenBackupScheduleIsNotFound() throws ResourceAllocationException {
|
||||
backupManager.deleteOldestBackupFromScheduleIfRequired(1L, 1L);
|
||||
Mockito.verify(backupManager, Mockito.never()).deleteExcessBackups(Mockito.anyList(), Mockito.anyInt(), Mockito.anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void deleteOldestBackupFromScheduleIfRequiredTestSkipDeletionWhenRetentionIsEqualToZero() {
|
||||
public void deleteOldestBackupFromScheduleIfRequiredTestSkipDeletionWhenRetentionIsEqualToZero() throws ResourceAllocationException {
|
||||
Mockito.when(backupScheduleDao.findById(1L)).thenReturn(backupScheduleVOMock);
|
||||
Mockito.when(backupScheduleVOMock.getMaxBackups()).thenReturn(0);
|
||||
backupManager.deleteOldestBackupFromScheduleIfRequired(1L, 1L);
|
||||
|
|
@ -1843,7 +1843,7 @@ public class BackupManagerTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void deleteOldestBackupFromScheduleIfRequiredTestSkipDeletionWhenAmountOfBackupsToBeDeletedIsLessThanOne() {
|
||||
public void deleteOldestBackupFromScheduleIfRequiredTestSkipDeletionWhenAmountOfBackupsToBeDeletedIsLessThanOne() throws ResourceAllocationException {
|
||||
List<BackupVO> backups = List.of(Mockito.mock(BackupVO.class), Mockito.mock(BackupVO.class));
|
||||
Mockito.when(backupScheduleDao.findById(1L)).thenReturn(backupScheduleVOMock);
|
||||
Mockito.when(backupScheduleVOMock.getMaxBackups()).thenReturn(2);
|
||||
|
|
@ -1853,7 +1853,7 @@ public class BackupManagerTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void deleteOldestBackupFromScheduleIfRequiredTestDeleteBackupsWhenRequired() {
|
||||
public void deleteOldestBackupFromScheduleIfRequiredTestDeleteBackupsWhenRequired() throws ResourceAllocationException {
|
||||
List<BackupVO> backups = List.of(Mockito.mock(BackupVO.class), Mockito.mock(BackupVO.class));
|
||||
Mockito.when(backupScheduleDao.findById(1L)).thenReturn(backupScheduleVOMock);
|
||||
Mockito.when(backupScheduleVOMock.getMaxBackups()).thenReturn(1);
|
||||
|
|
@ -1864,7 +1864,7 @@ public class BackupManagerTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void deleteExcessBackupsTestEnsureBackupsAreDeletedWhenMethodIsCalled() {
|
||||
public void deleteExcessBackupsTestEnsureBackupsAreDeletedWhenMethodIsCalled() throws ResourceAllocationException {
|
||||
try (MockedStatic<ActionEventUtils> actionEventUtils = Mockito.mockStatic(ActionEventUtils.class)) {
|
||||
List<BackupVO> backups = List.of(Mockito.mock(BackupVO.class),
|
||||
Mockito.mock(BackupVO.class),
|
||||
|
|
|
|||
|
|
@ -199,7 +199,7 @@ public class BucketApiServiceImplTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteBucket() {
|
||||
public void testDeleteBucket() throws ResourceAllocationException {
|
||||
Long bucketId = 1L;
|
||||
Long objectStoreId = 3L;
|
||||
String bucketName = "bucket1";
|
||||
|
|
|
|||
|
|
@ -39,22 +39,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.BDDMockito;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockedConstruction;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.api.ResponseGenerator;
|
||||
import org.apache.cloudstack.api.ResponseObject;
|
||||
|
|
@ -73,10 +57,26 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
|||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
import org.apache.cloudstack.resourcelimit.Reserver;
|
||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.BDDMockito;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockedConstruction;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.api.Answer;
|
||||
|
|
@ -110,6 +110,7 @@ import com.cloud.exception.AgentUnavailableException;
|
|||
import com.cloud.exception.InsufficientServerCapacityException;
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import com.cloud.exception.OperationTimedoutException;
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import com.cloud.exception.PermissionDeniedException;
|
||||
import com.cloud.exception.UnsupportedServiceException;
|
||||
import com.cloud.host.Host;
|
||||
|
|
@ -182,7 +183,7 @@ public class UnmanagedVMsManagerImplTest {
|
|||
|
||||
@Spy
|
||||
@InjectMocks
|
||||
private UnmanagedVMsManagerImpl unmanagedVMsManager = new UnmanagedVMsManagerImpl();
|
||||
private UnmanagedVMsManagerImpl unmanagedVMsManager;
|
||||
|
||||
@Mock
|
||||
private UserVmManager userVmManager;
|
||||
|
|
@ -264,6 +265,14 @@ public class UnmanagedVMsManagerImplTest {
|
|||
private ConfigKey<Boolean> configKeyMockParamsAllowed;
|
||||
@Mock
|
||||
private ConfigKey<String> configKeyMockParamsAllowedList;
|
||||
@Mock
|
||||
private Account accountMock;
|
||||
@Mock
|
||||
private ServiceOfferingVO serviceOfferingMock;
|
||||
@Mock
|
||||
private VMTemplateVO templateMock;
|
||||
@Mock
|
||||
private UnmanagedInstanceTO unmanagedInstanceMock;
|
||||
|
||||
private static final long virtualMachineId = 1L;
|
||||
|
||||
|
|
@ -390,6 +399,11 @@ public class UnmanagedVMsManagerImplTest {
|
|||
|
||||
when(vmDao.findById(virtualMachineId)).thenReturn(virtualMachine);
|
||||
when(virtualMachine.getState()).thenReturn(VirtualMachine.State.Running);
|
||||
|
||||
when(unmanagedInstanceMock.getCpuCores()).thenReturn(8);
|
||||
when(unmanagedInstanceMock.getMemory()).thenReturn(4096);
|
||||
when(serviceOfferingMock.getCpu()).thenReturn(4);
|
||||
when(serviceOfferingMock.getRamSize()).thenReturn(2048);
|
||||
}
|
||||
|
||||
@NotNull
|
||||
|
|
@ -467,7 +481,9 @@ public class UnmanagedVMsManagerImplTest {
|
|||
ImportUnmanagedInstanceCmd importUnmanageInstanceCmd = Mockito.mock(ImportUnmanagedInstanceCmd.class);
|
||||
when(importUnmanageInstanceCmd.getName()).thenReturn("SomeInstance");
|
||||
when(importUnmanageInstanceCmd.getDomainId()).thenReturn(null);
|
||||
unmanagedVMsManager.importUnmanagedInstance(importUnmanageInstanceCmd);
|
||||
try (MockedConstruction<CheckedReservation> mockCheckedReservation = Mockito.mockConstruction(CheckedReservation.class)) {
|
||||
unmanagedVMsManager.importUnmanagedInstance(importUnmanageInstanceCmd);
|
||||
}
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
|
|
@ -716,7 +732,17 @@ public class UnmanagedVMsManagerImplTest {
|
|||
}
|
||||
|
||||
private enum VcenterParameter {
|
||||
EXISTING, EXTERNAL, BOTH, NONE, EXISTING_INVALID, AGENT_UNAVAILABLE, CONVERT_FAILURE
|
||||
EXISTING,
|
||||
EXTERNAL,
|
||||
BOTH,
|
||||
NONE,
|
||||
EXISTING_INVALID,
|
||||
AGENT_UNAVAILABLE,
|
||||
CONVERT_FAILURE,
|
||||
FORCE_MS_AND_USE_VDDK,
|
||||
USE_VDDK_OVF_UNSUPPORTED,
|
||||
USE_VDDK_OVF_SUPPORTED,
|
||||
USE_VDDK_DETAILS_OVERRIDES
|
||||
}
|
||||
|
||||
private void baseTestImportVmFromVmwareToKvm(VcenterParameter vcenterParameter, boolean selectConvertHost,
|
||||
|
|
@ -753,6 +779,34 @@ public class UnmanagedVMsManagerImplTest {
|
|||
when(importVmCmd.getConvertInstanceHostId()).thenReturn(null);
|
||||
when(importVmCmd.getImportInstanceHostId()).thenReturn(null);
|
||||
when(importVmCmd.getConvertStoragePoolId()).thenReturn(null);
|
||||
when(importVmCmd.getExistingVcenterId()).thenReturn(null);
|
||||
when(importVmCmd.getVcenter()).thenReturn(null);
|
||||
when(importVmCmd.getDatacenterName()).thenReturn(null);
|
||||
when(importVmCmd.getUsername()).thenReturn(null);
|
||||
when(importVmCmd.getPassword()).thenReturn(null);
|
||||
when(importVmCmd.getDetails()).thenReturn(new HashMap<>());
|
||||
|
||||
boolean forceMsToImportVmFiles = false;
|
||||
boolean useVddk = false;
|
||||
boolean ovfExportSupported = false;
|
||||
if (VcenterParameter.FORCE_MS_AND_USE_VDDK == vcenterParameter) {
|
||||
forceMsToImportVmFiles = true;
|
||||
useVddk = true;
|
||||
} else if (VcenterParameter.USE_VDDK_OVF_UNSUPPORTED == vcenterParameter) {
|
||||
useVddk = true;
|
||||
} else if (VcenterParameter.USE_VDDK_OVF_SUPPORTED == vcenterParameter) {
|
||||
useVddk = true;
|
||||
ovfExportSupported = true;
|
||||
} else if (VcenterParameter.USE_VDDK_DETAILS_OVERRIDES == vcenterParameter) {
|
||||
useVddk = true;
|
||||
ovfExportSupported = true;
|
||||
when(importVmCmd.getDetails()).thenReturn(Map.of(
|
||||
"vddk.lib.dir", "/opt/vmware-vddk/override",
|
||||
"vddk.transports", "nbd:nbdssl",
|
||||
"vddk.thumbprint", "AA:BB:CC:DD:EE"));
|
||||
}
|
||||
when(importVmCmd.getForceMsToImportVmFiles()).thenReturn(forceMsToImportVmFiles);
|
||||
when(importVmCmd.getUseVddk()).thenReturn(useVddk);
|
||||
|
||||
NetworkVO networkVO = Mockito.mock(NetworkVO.class);
|
||||
when(networkVO.getGuestType()).thenReturn(Network.GuestType.L2);
|
||||
|
|
@ -814,11 +868,6 @@ public class UnmanagedVMsManagerImplTest {
|
|||
when(datacenterVO.getPassword()).thenReturn(password);
|
||||
when(importVmCmd.getExistingVcenterId()).thenReturn(existingDatacenterId);
|
||||
when(vmwareDatacenterDao.findById(existingDatacenterId)).thenReturn(datacenterVO);
|
||||
} else if (VcenterParameter.EXTERNAL == vcenterParameter) {
|
||||
when(importVmCmd.getVcenter()).thenReturn(vcenterHost);
|
||||
when(importVmCmd.getDatacenterName()).thenReturn(datacenter);
|
||||
when(importVmCmd.getUsername()).thenReturn(username);
|
||||
when(importVmCmd.getPassword()).thenReturn(password);
|
||||
}
|
||||
|
||||
if (VcenterParameter.BOTH == vcenterParameter) {
|
||||
|
|
@ -832,8 +881,20 @@ public class UnmanagedVMsManagerImplTest {
|
|||
when(vmwareDatacenterDao.findById(existingDatacenterId)).thenReturn(null);
|
||||
}
|
||||
|
||||
if (VcenterParameter.FORCE_MS_AND_USE_VDDK == vcenterParameter
|
||||
|| VcenterParameter.USE_VDDK_OVF_UNSUPPORTED == vcenterParameter
|
||||
|| VcenterParameter.USE_VDDK_OVF_SUPPORTED == vcenterParameter
|
||||
|| VcenterParameter.USE_VDDK_DETAILS_OVERRIDES == vcenterParameter) {
|
||||
Mockito.doReturn((Long) null).when(importVmCmd).getExistingVcenterId();
|
||||
Mockito.doReturn(vcenterHost).when(importVmCmd).getVcenter();
|
||||
Mockito.doReturn(datacenter).when(importVmCmd).getDatacenterName();
|
||||
Mockito.doReturn(username).when(importVmCmd).getUsername();
|
||||
Mockito.doReturn(password).when(importVmCmd).getPassword();
|
||||
}
|
||||
|
||||
CheckConvertInstanceAnswer checkConvertInstanceAnswer = mock(CheckConvertInstanceAnswer.class);
|
||||
when(checkConvertInstanceAnswer.getResult()).thenReturn(vcenterParameter != VcenterParameter.CONVERT_FAILURE);
|
||||
when(checkConvertInstanceAnswer.isOvfExportSupported()).thenReturn(ovfExportSupported);
|
||||
if (VcenterParameter.AGENT_UNAVAILABLE != vcenterParameter) {
|
||||
when(agentManager.send(Mockito.eq(convertHostId), Mockito.any(CheckConvertInstanceCommand.class))).thenReturn(checkConvertInstanceAnswer);
|
||||
}
|
||||
|
|
@ -856,9 +917,29 @@ public class UnmanagedVMsManagerImplTest {
|
|||
MockedConstruction<CheckedReservation> mockCheckedReservation = Mockito.mockConstruction(CheckedReservation.class)) {
|
||||
unmanagedVMsManager.importVm(importVmCmd);
|
||||
verify(vmwareGuru).getHypervisorVMOutOfBandAndCloneIfRequired(Mockito.eq(host), Mockito.eq(vmName), anyMap());
|
||||
verify(vmwareGuru).createVMTemplateOutOfBand(Mockito.eq(host), Mockito.eq(vmName), anyMap(), any(DataStoreTO.class), anyInt());
|
||||
if (VcenterParameter.USE_VDDK_OVF_SUPPORTED == vcenterParameter) {
|
||||
verify(vmwareGuru, Mockito.never()).createVMTemplateOutOfBand(anyString(), anyString(), anyMap(), any(DataStoreTO.class), anyInt());
|
||||
verify(agentManager).send(Mockito.eq(convertHostId), Mockito.<com.cloud.agent.api.Command>argThat(command ->
|
||||
command instanceof ConvertInstanceCommand && ((ConvertInstanceCommand) command).isUseVddk()));
|
||||
verify(vmwareGuru, Mockito.never()).removeVMTemplateOutOfBand(any(DataStoreTO.class), anyString());
|
||||
} else if (VcenterParameter.USE_VDDK_DETAILS_OVERRIDES == vcenterParameter) {
|
||||
verify(vmwareGuru, Mockito.never()).createVMTemplateOutOfBand(anyString(), anyString(), anyMap(), any(DataStoreTO.class), anyInt());
|
||||
verify(agentManager).send(Mockito.eq(convertHostId), Mockito.<com.cloud.agent.api.Command>argThat(command -> {
|
||||
if (!(command instanceof ConvertInstanceCommand)) {
|
||||
return false;
|
||||
}
|
||||
ConvertInstanceCommand convertCmd = (ConvertInstanceCommand) command;
|
||||
return convertCmd.isUseVddk()
|
||||
&& "/opt/vmware-vddk/override".equals(convertCmd.getVddkLibDir())
|
||||
&& "nbd:nbdssl".equals(convertCmd.getVddkTransports())
|
||||
&& "AA:BB:CC:DD:EE".equals(convertCmd.getVddkThumbprint());
|
||||
}));
|
||||
verify(vmwareGuru, Mockito.never()).removeVMTemplateOutOfBand(any(DataStoreTO.class), anyString());
|
||||
} else {
|
||||
verify(vmwareGuru).createVMTemplateOutOfBand(Mockito.eq(host), Mockito.eq(vmName), anyMap(), any(DataStoreTO.class), anyInt());
|
||||
verify(vmwareGuru).removeVMTemplateOutOfBand(any(DataStoreTO.class), anyString());
|
||||
}
|
||||
verify(vmwareGuru).removeClonedHypervisorVMOutOfBand(Mockito.eq(host), Mockito.eq(vmName), anyMap());
|
||||
verify(vmwareGuru).removeVMTemplateOutOfBand(any(DataStoreTO.class), anyString());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -952,6 +1033,49 @@ public class UnmanagedVMsManagerImplTest {
|
|||
baseTestImportVmFromVmwareToKvm(VcenterParameter.CONVERT_FAILURE, false, false);
|
||||
}
|
||||
|
||||
@Test(expected = ServerApiException.class)
|
||||
public void testImportVmFromVmwareToKvmForceMsMutuallyExclusiveWithUseVddk() throws OperationTimedoutException, AgentUnavailableException {
|
||||
baseTestImportVmFromVmwareToKvm(VcenterParameter.FORCE_MS_AND_USE_VDDK, false, false);
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void testValidateSelectedConversionStoragePoolForVddkFailsWhenPoolDoesNotSupportDiskOfferings() {
|
||||
long poolId = 11L;
|
||||
StoragePoolVO selectedPool = mock(StoragePoolVO.class);
|
||||
ServiceOfferingVO serviceOffering = mock(ServiceOfferingVO.class);
|
||||
DiskOfferingVO rootDiskOffering = mock(DiskOfferingVO.class);
|
||||
DiskOfferingVO dataDiskOffering = mock(DiskOfferingVO.class);
|
||||
|
||||
when(serviceOffering.getDiskOfferingId()).thenReturn(21L);
|
||||
when(primaryDataStoreDao.findById(poolId)).thenReturn(selectedPool);
|
||||
when(diskOfferingDao.findById(21L)).thenReturn(rootDiskOffering);
|
||||
when(diskOfferingDao.findById(22L)).thenReturn(dataDiskOffering);
|
||||
when(volumeApiService.doesStoragePoolSupportDiskOffering(selectedPool, rootDiskOffering)).thenReturn(true);
|
||||
when(volumeApiService.doesStoragePoolSupportDiskOffering(selectedPool, dataDiskOffering)).thenReturn(false);
|
||||
|
||||
unmanagedVMsManager.validateSelectedConversionStoragePoolForVddk(true, poolId,
|
||||
serviceOffering, Map.of("1000-2", 22L));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testValidateSelectedConversionStoragePoolForVddkPassesWhenPoolSupportsAllDiskOfferings() {
|
||||
long poolId = 12L;
|
||||
StoragePoolVO selectedPool = mock(StoragePoolVO.class);
|
||||
ServiceOfferingVO serviceOffering = mock(ServiceOfferingVO.class);
|
||||
DiskOfferingVO rootDiskOffering = mock(DiskOfferingVO.class);
|
||||
DiskOfferingVO dataDiskOffering = mock(DiskOfferingVO.class);
|
||||
|
||||
when(serviceOffering.getDiskOfferingId()).thenReturn(31L);
|
||||
when(primaryDataStoreDao.findById(poolId)).thenReturn(selectedPool);
|
||||
when(diskOfferingDao.findById(31L)).thenReturn(rootDiskOffering);
|
||||
when(diskOfferingDao.findById(32L)).thenReturn(dataDiskOffering);
|
||||
when(volumeApiService.doesStoragePoolSupportDiskOffering(selectedPool, rootDiskOffering)).thenReturn(true);
|
||||
when(volumeApiService.doesStoragePoolSupportDiskOffering(selectedPool, dataDiskOffering)).thenReturn(true);
|
||||
|
||||
unmanagedVMsManager.validateSelectedConversionStoragePoolForVddk(true, poolId,
|
||||
serviceOffering, Map.of("1000-2", 32L));
|
||||
}
|
||||
|
||||
private ClusterVO getClusterForTests() {
|
||||
ClusterVO cluster = mock(ClusterVO.class);
|
||||
when(cluster.getId()).thenReturn(1L);
|
||||
|
|
@ -1133,7 +1257,7 @@ public class UnmanagedVMsManagerImplTest {
|
|||
|
||||
when(hostDao.findById(hostId)).thenReturn(host);
|
||||
|
||||
HostVO returnedHost = unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, hostId);
|
||||
HostVO returnedHost = unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, hostId, false);
|
||||
Assert.assertEquals(host, returnedHost);
|
||||
}
|
||||
|
||||
|
|
@ -1149,7 +1273,7 @@ public class UnmanagedVMsManagerImplTest {
|
|||
|
||||
when(hostDao.findById(hostId)).thenReturn(host);
|
||||
|
||||
HostVO returnedHost = unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, hostId);
|
||||
HostVO returnedHost = unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, hostId, false);
|
||||
Assert.assertEquals(host, returnedHost);
|
||||
}
|
||||
|
||||
|
|
@ -1161,7 +1285,7 @@ public class UnmanagedVMsManagerImplTest {
|
|||
when(hostDao.listByClusterHypervisorTypeAndHostCapability(cluster.getId(),
|
||||
cluster.getHypervisorType(), Host.HOST_INSTANCE_CONVERSION)).thenReturn(List.of(host));
|
||||
|
||||
HostVO returnedHost = unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, null);
|
||||
HostVO returnedHost = unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, null, false);
|
||||
Assert.assertEquals(host, returnedHost);
|
||||
}
|
||||
|
||||
|
|
@ -1175,7 +1299,7 @@ public class UnmanagedVMsManagerImplTest {
|
|||
|
||||
when(hostDao.listByClusterAndHypervisorType(cluster.getId(), cluster.getHypervisorType())).thenReturn(List.of(host));
|
||||
|
||||
HostVO returnedHost = unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, null);
|
||||
HostVO returnedHost = unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, null, false);
|
||||
Assert.assertEquals(host, returnedHost);
|
||||
}
|
||||
|
||||
|
|
@ -1188,7 +1312,7 @@ public class UnmanagedVMsManagerImplTest {
|
|||
|
||||
when(hostDao.listByClusterAndHypervisorType(cluster.getId(), cluster.getHypervisorType())).thenReturn(List.of());
|
||||
|
||||
unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, null);
|
||||
unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, null, false);
|
||||
}
|
||||
|
||||
@Test(expected = CloudRuntimeException.class)
|
||||
|
|
@ -1203,7 +1327,7 @@ public class UnmanagedVMsManagerImplTest {
|
|||
|
||||
when(hostDao.findById(hostId)).thenReturn(host);
|
||||
|
||||
unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, hostId);
|
||||
unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, hostId, false);
|
||||
}
|
||||
|
||||
@Test(expected = CloudRuntimeException.class)
|
||||
|
|
@ -1217,7 +1341,7 @@ public class UnmanagedVMsManagerImplTest {
|
|||
|
||||
when(hostDao.findById(hostId)).thenReturn(host);
|
||||
|
||||
unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, hostId);
|
||||
unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, hostId, false);
|
||||
}
|
||||
|
||||
@Test(expected = CloudRuntimeException.class)
|
||||
|
|
@ -1230,7 +1354,7 @@ public class UnmanagedVMsManagerImplTest {
|
|||
|
||||
when(hostDao.findById(hostId)).thenReturn(host);
|
||||
|
||||
unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, hostId);
|
||||
unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, hostId, false);
|
||||
}
|
||||
|
||||
@Test(expected = CloudRuntimeException.class)
|
||||
|
|
@ -1242,7 +1366,7 @@ public class UnmanagedVMsManagerImplTest {
|
|||
|
||||
when(hostDao.findById(hostId)).thenReturn(host);
|
||||
|
||||
unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, hostId);
|
||||
unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, hostId, false);
|
||||
}
|
||||
|
||||
@Test(expected = CloudRuntimeException.class)
|
||||
|
|
@ -1252,7 +1376,23 @@ public class UnmanagedVMsManagerImplTest {
|
|||
|
||||
when(hostDao.findById(hostId)).thenReturn(null);
|
||||
|
||||
unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, hostId);
|
||||
unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, hostId, false);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSelectKVMHostForConversionInClusterVddkAutoSelectsHostWithVddkSupport() {
|
||||
ClusterVO cluster = getClusterForTests();
|
||||
HostVO hostWithVddk = Mockito.mock(HostVO.class);
|
||||
HostVO hostWithoutVddk = Mockito.mock(HostVO.class);
|
||||
when(hostWithVddk.getDetail(Host.HOST_VDDK_SUPPORT)).thenReturn("true");
|
||||
when(hostWithoutVddk.getDetail(Host.HOST_VDDK_SUPPORT)).thenReturn(null);
|
||||
|
||||
when(hostDao.listByClusterHypervisorTypeAndHostCapability(cluster.getId(),
|
||||
cluster.getHypervisorType(), Host.HOST_INSTANCE_CONVERSION))
|
||||
.thenReturn(List.of(hostWithoutVddk, hostWithVddk));
|
||||
|
||||
HostVO returnedHost = unmanagedVMsManager.selectKVMHostForConversionInCluster(cluster, null, true);
|
||||
Assert.assertEquals(hostWithVddk, returnedHost);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -1372,4 +1512,102 @@ public class UnmanagedVMsManagerImplTest {
|
|||
Assert.assertFalse(params.containsKey(VmDetailConstants.CPU_SPEED));
|
||||
Assert.assertFalse(params.containsKey(VmDetailConstants.MEMORY));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void checkVmResourceLimitsForUnmanagedInstanceImportTestUsesInformationFromHypervisorWhenOfferingIsDynamic() throws Exception {
|
||||
when(serviceOfferingMock.isDynamic()).thenReturn(true);
|
||||
List<Reserver> reservations = new ArrayList<>();
|
||||
|
||||
try (MockedConstruction<CheckedReservation> mockedConstruction = Mockito.mockConstruction(CheckedReservation.class)) {
|
||||
unmanagedVMsManager.checkVmResourceLimitsForUnmanagedInstanceImport(accountMock, unmanagedInstanceMock, serviceOfferingMock, templateMock, reservations);
|
||||
|
||||
Assert.assertEquals(3, mockedConstruction.constructed().size());
|
||||
Assert.assertEquals(3, reservations.size());
|
||||
verify(unmanagedInstanceMock).getCpuCores();
|
||||
verify(unmanagedInstanceMock).getMemory();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void checkVmResourceLimitsForUnmanagedInstanceImportTestUsesInformationFromHypervisorWhenVmIsPoweredOn() throws Exception {
|
||||
when(unmanagedInstanceMock.getPowerState()).thenReturn(UnmanagedInstanceTO.PowerState.PowerOn);
|
||||
when(serviceOfferingMock.isDynamic()).thenReturn(false);
|
||||
List<Reserver> reservations = new ArrayList<>();
|
||||
|
||||
try (MockedConstruction<CheckedReservation> mockedConstruction = Mockito.mockConstruction(CheckedReservation.class)) {
|
||||
unmanagedVMsManager.checkVmResourceLimitsForUnmanagedInstanceImport(accountMock, unmanagedInstanceMock, serviceOfferingMock, templateMock, reservations);
|
||||
|
||||
Assert.assertEquals(3, mockedConstruction.constructed().size());
|
||||
Assert.assertEquals(3, reservations.size());
|
||||
verify(unmanagedInstanceMock).getCpuCores();
|
||||
verify(unmanagedInstanceMock).getMemory();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void checkVmResourceLimitsForUnmanagedInstanceImportTestUsesInformationFromOfferingWhenOfferingIsNotDynamicAndVmIsPoweredOff() throws Exception {
|
||||
when(unmanagedInstanceMock.getPowerState()).thenReturn(UnmanagedInstanceTO.PowerState.PowerOff);
|
||||
when(serviceOfferingMock.isDynamic()).thenReturn(false);
|
||||
List<Reserver> reservations = new ArrayList<>();
|
||||
|
||||
try (MockedConstruction<CheckedReservation> mockedConstruction = Mockito.mockConstruction(CheckedReservation.class)) {
|
||||
unmanagedVMsManager.checkVmResourceLimitsForUnmanagedInstanceImport(accountMock, unmanagedInstanceMock, serviceOfferingMock, templateMock, reservations);
|
||||
|
||||
Assert.assertEquals(3, mockedConstruction.constructed().size());
|
||||
Assert.assertEquals(3, reservations.size());
|
||||
verify(serviceOfferingMock).getCpu();
|
||||
verify(serviceOfferingMock).getRamSize();
|
||||
verify(unmanagedInstanceMock, Mockito.never()).getCpuCores();
|
||||
verify(unmanagedInstanceMock, Mockito.never()).getMemory();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void checkVmResourceLimitsForExternalKvmVmImportTestUsesInformationFromOfferingWhenOfferingIsNotDynamic() throws ResourceAllocationException {
|
||||
when(serviceOfferingMock.isDynamic()).thenReturn(false);
|
||||
Map<String, String> details = new HashMap<>();
|
||||
List<Reserver> reservations = new ArrayList<>();
|
||||
|
||||
try (MockedConstruction<CheckedReservation> mockedConstruction = Mockito.mockConstruction(CheckedReservation.class)) {
|
||||
unmanagedVMsManager.checkVmResourceLimitsForExternalKvmVmImport(accountMock, serviceOfferingMock, templateMock, details, reservations);
|
||||
|
||||
Assert.assertEquals(3, mockedConstruction.constructed().size());
|
||||
Assert.assertEquals(3, reservations.size());
|
||||
verify(serviceOfferingMock).getCpu();
|
||||
verify(serviceOfferingMock).getRamSize();
|
||||
verify(unmanagedVMsManager, Mockito.never()).getDetailAsInteger(VmDetailConstants.CPU_NUMBER, details);
|
||||
verify(unmanagedVMsManager, Mockito.never()).getDetailAsInteger(VmDetailConstants.MEMORY, details);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void checkVmResourceLimitsForExternalKvmVmImportTestUsesInformationFromDetailsWhenOfferingIsDynamic() throws ResourceAllocationException {
|
||||
when(serviceOfferingMock.isDynamic()).thenReturn(true);
|
||||
Map<String, String> details = new HashMap<>();
|
||||
details.put(VmDetailConstants.CPU_NUMBER, "8");
|
||||
details.put(VmDetailConstants.MEMORY, "4096");
|
||||
List<Reserver> reservations = new ArrayList<>();
|
||||
|
||||
try (MockedConstruction<CheckedReservation> mockedConstruction = Mockito.mockConstruction(CheckedReservation.class)) {
|
||||
unmanagedVMsManager.checkVmResourceLimitsForExternalKvmVmImport(accountMock, serviceOfferingMock, templateMock, details, reservations);
|
||||
|
||||
Assert.assertEquals(3, mockedConstruction.constructed().size());
|
||||
Assert.assertEquals(3, reservations.size());
|
||||
verify(unmanagedVMsManager).getDetailAsInteger(VmDetailConstants.CPU_NUMBER, details);
|
||||
verify(unmanagedVMsManager).getDetailAsInteger(VmDetailConstants.MEMORY, details);
|
||||
}
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void getDetailAsIntegerTestThrowsInvalidParameterValueExceptionWhenDetailIsNull() {
|
||||
Map<String, String> details = new HashMap<>();
|
||||
unmanagedVMsManager.getDetailAsInteger("non-existent", details);
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void getDetailAsIntegerTestThrowsInvalidParameterValueExceptionWhenValueIsInvalid() {
|
||||
Map<String, String> details = new HashMap<>();
|
||||
details.put("key", "not-a-number");
|
||||
unmanagedVMsManager.getDetailAsInteger("key", details);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -584,6 +584,37 @@ class CsIP:
|
|||
"-A PREROUTING -m state --state NEW -i %s -s %s ! -d %s/32 -j ACL_OUTBOUND_%s" %
|
||||
(self.dev, guestNetworkCidr, self.address['gateway'], self.dev)])
|
||||
|
||||
# Process static routes for this interface
|
||||
static_routes = CsStaticRoutes("staticroutes", self.config)
|
||||
if static_routes:
|
||||
for item in static_routes.get_bag():
|
||||
if item == "id":
|
||||
continue
|
||||
static_route = static_routes.get_bag()[item]
|
||||
if static_route['revoke']:
|
||||
continue
|
||||
|
||||
# Check if this static route applies to this interface
|
||||
# Old style: ip_address field matches this interface's public_ip
|
||||
# New style (nexthop): gateway is in this interface's subnet
|
||||
applies_to_interface = False
|
||||
if 'ip_address' in static_route and static_route['ip_address'] == self.address['public_ip']:
|
||||
applies_to_interface = True
|
||||
elif 'gateway' in static_route:
|
||||
device = CsHelper.find_device_for_gateway(self.config, static_route['gateway'])
|
||||
if device == self.dev:
|
||||
applies_to_interface = True
|
||||
|
||||
if applies_to_interface:
|
||||
self.fw.append(["mangle", "",
|
||||
"-A PREROUTING -m state --state NEW -i %s -s %s ! -d %s/32 -j ACL_OUTBOUND_%s" %
|
||||
(self.dev, static_route['network'], self.address['public_ip'], self.dev)])
|
||||
self.fw.append(["filter", "front", "-A FORWARD -d %s -o %s -j ACL_INBOUND_%s" %
|
||||
(static_route['network'], self.dev, self.dev)])
|
||||
self.fw.append(["filter", "front",
|
||||
"-A FORWARD -d %s -o %s -m state --state RELATED,ESTABLISHED -j ACCEPT" %
|
||||
(static_route['network'], self.dev)])
|
||||
|
||||
if self.is_private_gateway():
|
||||
self.fw.append(["filter", "front", "-A FORWARD -d %s -o %s -j ACL_INBOUND_%s" %
|
||||
(self.address['network'], self.dev, self.dev)])
|
||||
|
|
@ -597,22 +628,6 @@ class CsIP:
|
|||
"-A PREROUTING -s %s -d %s -m state --state NEW -j MARK --set-xmark %s/0xffffffff" %
|
||||
(self.cl.get_vpccidr(), self.address['network'], hex(100 + int(self.dev[3:])))])
|
||||
|
||||
static_routes = CsStaticRoutes("staticroutes", self.config)
|
||||
if static_routes:
|
||||
for item in static_routes.get_bag():
|
||||
if item == "id":
|
||||
continue
|
||||
static_route = static_routes.get_bag()[item]
|
||||
if 'ip_address' in static_route and static_route['ip_address'] == self.address['public_ip'] and not static_route['revoke']:
|
||||
self.fw.append(["mangle", "",
|
||||
"-A PREROUTING -m state --state NEW -i %s -s %s ! -d %s/32 -j ACL_OUTBOUND_%s" %
|
||||
(self.dev, static_route['network'], static_route['ip_address'], self.dev)])
|
||||
self.fw.append(["filter", "front", "-A FORWARD -d %s -o %s -j ACL_INBOUND_%s" %
|
||||
(static_route['network'], self.dev, self.dev)])
|
||||
self.fw.append(["filter", "front",
|
||||
"-A FORWARD -d %s -o %s -m state --state RELATED,ESTABLISHED -j ACCEPT" %
|
||||
(static_route['network'], self.dev)])
|
||||
|
||||
if self.address["source_nat"]:
|
||||
self.fw.append(["nat", "front",
|
||||
"-A POSTROUTING -o %s -j SNAT --to-source %s" %
|
||||
|
|
|
|||
|
|
@ -25,8 +25,12 @@ import sys
|
|||
import os.path
|
||||
import re
|
||||
import shutil
|
||||
from typing import Optional, TYPE_CHECKING
|
||||
from netaddr import *
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .CsConfig import CsConfig
|
||||
|
||||
PUBLIC_INTERFACES = {"router": "eth2", "vpcrouter": "eth1"}
|
||||
|
||||
STATE_COMMANDS = {"router": "ip addr show dev eth0 | grep inet | wc -l | xargs bash -c 'if [ $0 == 2 ]; then echo \"PRIMARY\"; else echo \"BACKUP\"; fi'",
|
||||
|
|
@ -270,3 +274,29 @@ def copy(src, dest):
|
|||
logging.error("Could not copy %s to %s" % (src, dest))
|
||||
else:
|
||||
logging.info("Copied %s to %s" % (src, dest))
|
||||
|
||||
|
||||
def find_device_for_gateway(config: 'CsConfig', gateway_ip: str) -> Optional[str]:
|
||||
"""
|
||||
Find which ethernet device the gateway IP belongs to by checking
|
||||
if the gateway is in any of the configured interface subnets.
|
||||
|
||||
Args:
|
||||
config: CsConfig instance containing network configuration
|
||||
gateway_ip: IP address of the gateway to locate
|
||||
|
||||
Returns:
|
||||
Device name (e.g., 'eth2') or None if not found
|
||||
"""
|
||||
try:
|
||||
interfaces = config.address().get_interfaces()
|
||||
for interface in interfaces:
|
||||
if not interface.is_added():
|
||||
continue
|
||||
if interface.ip_in_subnet(gateway_ip):
|
||||
return interface.get_device()
|
||||
logging.debug("No matching device found for gateway %s" % gateway_ip)
|
||||
return None
|
||||
except Exception as e:
|
||||
logging.error("Error finding device for gateway %s: %s" % (gateway_ip, e))
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -244,6 +244,8 @@ class CsNetfilters(object):
|
|||
CsHelper.execute("nft add chain %s %s %s '{ %s }'" % (address_family, table, chain, chain_policy))
|
||||
if hook == "input" or hook == "output":
|
||||
CsHelper.execute("nft add rule %s %s %s icmp type { echo-request, echo-reply } accept" % (address_family, table, chain))
|
||||
elif hook == "forward":
|
||||
CsHelper.execute("nft add rule %s %s %s ct state established,related accept" % (address_family, table, chain))
|
||||
|
||||
def apply_nft_ipv4_rules(self, rules, type):
|
||||
if len(rules) == 0:
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@
|
|||
import logging
|
||||
from . import CsHelper
|
||||
from .CsDatabag import CsDataBag
|
||||
from .CsRoute import CsRoute
|
||||
|
||||
|
||||
class CsStaticRoutes(CsDataBag):
|
||||
|
|
@ -31,13 +32,46 @@ class CsStaticRoutes(CsDataBag):
|
|||
continue
|
||||
self.__update(self.dbag[item])
|
||||
|
||||
|
||||
|
||||
def __update(self, route):
|
||||
network = route['network']
|
||||
gateway = route['gateway']
|
||||
|
||||
if route['revoke']:
|
||||
command = "ip route del %s via %s" % (route['network'], route['gateway'])
|
||||
# Delete from main table
|
||||
command = "ip route del %s via %s" % (network, gateway)
|
||||
CsHelper.execute(command)
|
||||
|
||||
# Delete from PBR table if applicable
|
||||
device = CsHelper.find_device_for_gateway(self.config, gateway)
|
||||
if device:
|
||||
cs_route = CsRoute()
|
||||
table_name = cs_route.get_tablename(device)
|
||||
command = "ip route del %s via %s table %s" % (network, gateway, table_name)
|
||||
CsHelper.execute(command)
|
||||
logging.info("Deleted static route %s via %s from PBR table %s" % (network, gateway, table_name))
|
||||
else:
|
||||
command = "ip route show | grep %s | awk '{print $1, $3}'" % route['network']
|
||||
# Add to main table (existing logic)
|
||||
command = "ip route show | grep '^%s' | awk '{print $1, $3}'" % network
|
||||
result = CsHelper.execute(command)
|
||||
if not result:
|
||||
route_command = "ip route add %s via %s" % (route['network'], route['gateway'])
|
||||
route_command = "ip route add %s via %s" % (network, gateway)
|
||||
CsHelper.execute(route_command)
|
||||
logging.info("Added static route %s via %s to main table" % (network, gateway))
|
||||
|
||||
# Add to PBR table if applicable
|
||||
device = CsHelper.find_device_for_gateway(self.config, gateway)
|
||||
if device:
|
||||
cs_route = CsRoute()
|
||||
table_name = cs_route.get_tablename(device)
|
||||
# Check if route already exists in the PBR table
|
||||
check_command = "ip route show table %s | grep '^%s' | awk '{print $1, $3}'" % (table_name, network)
|
||||
result = CsHelper.execute(check_command)
|
||||
if not result:
|
||||
# Add route to the interface-specific table
|
||||
route_command = "ip route add %s via %s dev %s table %s" % (network, gateway, device, table_name)
|
||||
CsHelper.execute(route_command)
|
||||
logging.info("Added static route %s via %s to PBR table %s" % (network, gateway, table_name))
|
||||
else:
|
||||
logging.info("Static route %s via %s added to main table only (no matching interface found for PBR table)" % (network, gateway))
|
||||
|
|
|
|||
|
|
@ -28,6 +28,46 @@ def checkMaxconn(haproxyData, haCfgSections):
|
|||
|
||||
return True
|
||||
|
||||
def checkIdletimeout(haproxyData, haCfgSections):
|
||||
if "idletimeout" not in haproxyData:
|
||||
return True
|
||||
|
||||
# Normalize idletimeout value to string for comparison
|
||||
idle_value = str(haproxyData["idletimeout"]).strip()
|
||||
|
||||
# Safely get the defaults section and its timeout directives
|
||||
defaults_section = haCfgSections.get("defaults", {})
|
||||
timeout_lines = defaults_section.get("timeout", [])
|
||||
|
||||
# Extract client and server timeout values from the parsed "timeout" entries
|
||||
timeout_values = {}
|
||||
for tline in timeout_lines:
|
||||
tline = tline.strip()
|
||||
if not tline:
|
||||
continue
|
||||
parts = tline.split(None, 1)
|
||||
if len(parts) < 2:
|
||||
continue
|
||||
kind, value = parts[0].strip(), parts[1].strip()
|
||||
if kind in ("client", "server"):
|
||||
timeout_values[kind] = value
|
||||
|
||||
# Special handling for idletimeout == 0: there should be no client/server timeouts configured
|
||||
if idle_value == "0":
|
||||
if "client" in timeout_values or "server" in timeout_values:
|
||||
print("defaults timeout client or timeout server should be absent when idletimeout is 0")
|
||||
return False
|
||||
return True
|
||||
|
||||
# Non-zero idletimeout: both client and server timeouts must be present
|
||||
if "client" not in timeout_values or "server" not in timeout_values:
|
||||
print("defaults timeout client or timeout server missing")
|
||||
return False
|
||||
|
||||
if idle_value != timeout_values["client"] or idle_value != timeout_values["server"]:
|
||||
print("defaults timeout client or timeout server mismatch occurred")
|
||||
return False
|
||||
return True
|
||||
|
||||
def checkLoadBalance(haproxyData, haCfgSections):
|
||||
correct = True
|
||||
|
|
@ -120,9 +160,10 @@ def main():
|
|||
currSectionDict[lineSec[0]].append(lineSec[1] if len(lineSec) > 1 else '')
|
||||
|
||||
checkMaxConn = checkMaxconn(haproxyData[0], haCfgSections)
|
||||
checkIdleTimeout = checkIdletimeout(haproxyData[0], haCfgSections)
|
||||
checkLbRules = checkLoadBalance(haproxyData, haCfgSections)
|
||||
|
||||
if checkMaxConn and checkLbRules:
|
||||
if checkMaxConn and checkIdleTimeout and checkLbRules:
|
||||
print("All checks pass")
|
||||
exit(0)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -398,141 +398,146 @@ class TestRvRDeploymentPlanning(cloudstackTestCase):
|
|||
self.apiclient.updatePod(cmd)
|
||||
self.debug("Enabled first pod for testing..")
|
||||
|
||||
# Creating network using the network offering created
|
||||
self.debug("Creating network with network offering: %s" %
|
||||
self.network_offering.id)
|
||||
network = Network.create(
|
||||
self.apiclient,
|
||||
self.services["network"],
|
||||
accountid=self.account.name,
|
||||
domainid=self.account.domainid,
|
||||
networkofferingid=self.network_offering.id,
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.debug("Created network with ID: %s" % network.id)
|
||||
try:
|
||||
# Creating network using the network offering created
|
||||
self.debug("Creating network with network offering: %s" %
|
||||
self.network_offering.id)
|
||||
network = Network.create(
|
||||
self.apiclient,
|
||||
self.services["network"],
|
||||
accountid=self.account.name,
|
||||
domainid=self.account.domainid,
|
||||
networkofferingid=self.network_offering.id,
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.debug("Created network with ID: %s" % network.id)
|
||||
|
||||
networks = Network.list(
|
||||
self.apiclient,
|
||||
id=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(networks, list),
|
||||
True,
|
||||
"List networks should return a valid response for created network"
|
||||
)
|
||||
nw_response = networks[0]
|
||||
networks = Network.list(
|
||||
self.apiclient,
|
||||
id=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(networks, list),
|
||||
True,
|
||||
"List networks should return a valid response for created network"
|
||||
)
|
||||
nw_response = networks[0]
|
||||
|
||||
self.debug("Network state: %s" % nw_response.state)
|
||||
self.assertEqual(
|
||||
nw_response.state,
|
||||
"Allocated",
|
||||
"The network should be in allocated state after creation"
|
||||
)
|
||||
|
||||
self.debug("Listing routers for network: %s" % network.name)
|
||||
routers = Router.list(
|
||||
self.apiclient,
|
||||
networkid=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
routers,
|
||||
None,
|
||||
"Routers should not be spawned when network is in allocated state"
|
||||
)
|
||||
|
||||
self.debug("Deploying VM in account: %s" % self.account.name)
|
||||
|
||||
# Spawn an instance in that network
|
||||
virtual_machine = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["virtual_machine"],
|
||||
accountid=self.account.name,
|
||||
domainid=self.account.domainid,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
networkids=[str(network.id)]
|
||||
)
|
||||
self.debug("Deployed VM in network: %s" % network.id)
|
||||
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=virtual_machine.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List Vms should return a valid list"
|
||||
)
|
||||
vm = vms[0]
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
"Running",
|
||||
"Vm should be in running state after deployment"
|
||||
)
|
||||
|
||||
self.debug("Listing routers for network: %s" % network.name)
|
||||
routers = Router.list(
|
||||
self.apiclient,
|
||||
networkid=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(routers, list),
|
||||
True,
|
||||
"list router should return Primary and backup routers"
|
||||
)
|
||||
self.assertEqual(
|
||||
len(routers),
|
||||
2,
|
||||
"Length of the list router should be 2 (Backup & Primary)"
|
||||
)
|
||||
|
||||
hosts = Host.list(
|
||||
self.apiclient,
|
||||
id=routers[0].hostid,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"List host should return a valid data"
|
||||
)
|
||||
first_host = hosts[0]
|
||||
|
||||
hosts = Host.list(
|
||||
self.apiclient,
|
||||
id=routers[1].hostid,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"List host should return a valid data"
|
||||
)
|
||||
second_host = hosts[0]
|
||||
|
||||
# Checking if the cluster IDs of both routers are different?
|
||||
self.assertNotEqual(
|
||||
first_host.clusterid,
|
||||
second_host.clusterid,
|
||||
"Both the routers should be in different clusters"
|
||||
)
|
||||
self.debug("Enabling remaining pods if any..")
|
||||
pods = Pod.list(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
listall=True,
|
||||
allocationstate="Disabled"
|
||||
self.debug("Network state: %s" % nw_response.state)
|
||||
self.assertEqual(
|
||||
nw_response.state,
|
||||
"Allocated",
|
||||
"The network should be in allocated state after creation"
|
||||
)
|
||||
|
||||
if pods is not None:
|
||||
for pod in pods:
|
||||
cmd = updatePod.updatePodCmd()
|
||||
cmd.id = pod.id
|
||||
cmd.allocationstate = 'Enabled'
|
||||
self.apiclient.updatePod(cmd)
|
||||
self.debug("Listing routers for network: %s" % network.name)
|
||||
routers = Router.list(
|
||||
self.apiclient,
|
||||
networkid=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
routers,
|
||||
None,
|
||||
"Routers should not be spawned when network is in allocated state"
|
||||
)
|
||||
|
||||
self.debug("Deploying VM in account: %s" % self.account.name)
|
||||
|
||||
# Spawn an instance in that network
|
||||
virtual_machine = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["virtual_machine"],
|
||||
accountid=self.account.name,
|
||||
domainid=self.account.domainid,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
networkids=[str(network.id)]
|
||||
)
|
||||
self.debug("Deployed VM in network: %s" % network.id)
|
||||
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=virtual_machine.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List Vms should return a valid list"
|
||||
)
|
||||
vm = vms[0]
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
"Running",
|
||||
"Vm should be in running state after deployment"
|
||||
)
|
||||
|
||||
self.debug("Listing routers for network: %s" % network.name)
|
||||
routers = Router.list(
|
||||
self.apiclient,
|
||||
networkid=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(routers, list),
|
||||
True,
|
||||
"list router should return Primary and backup routers"
|
||||
)
|
||||
self.assertEqual(
|
||||
len(routers),
|
||||
2,
|
||||
"Length of the list router should be 2 (Backup & Primary)"
|
||||
)
|
||||
|
||||
hosts = Host.list(
|
||||
self.apiclient,
|
||||
id=routers[0].hostid,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"List host should return a valid data"
|
||||
)
|
||||
first_host = hosts[0]
|
||||
|
||||
hosts = Host.list(
|
||||
self.apiclient,
|
||||
id=routers[1].hostid,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"List host should return a valid data"
|
||||
)
|
||||
second_host = hosts[0]
|
||||
|
||||
# Checking if the cluster IDs of both routers are different?
|
||||
self.assertNotEqual(
|
||||
first_host.clusterid,
|
||||
second_host.clusterid,
|
||||
"Both the routers should be in different clusters"
|
||||
)
|
||||
finally:
|
||||
try:
|
||||
self.debug("Enabling remaining pods if any..")
|
||||
pods = Pod.list(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
listall=True,
|
||||
allocationstate="Disabled"
|
||||
)
|
||||
|
||||
if pods is not None:
|
||||
for pod in pods:
|
||||
cmd = updatePod.updatePodCmd()
|
||||
cmd.id = pod.id
|
||||
cmd.allocationstate = 'Enabled'
|
||||
self.apiclient.updatePod(cmd)
|
||||
except Exception as e:
|
||||
self.debug("Warning: Exception during pod re-enablement: %s" % e)
|
||||
return
|
||||
|
||||
# @attr(tags=["advanced", "advancedns"])
|
||||
|
|
@ -557,7 +562,7 @@ class TestRvRDeploymentPlanning(cloudstackTestCase):
|
|||
# 3. VM should be deployed and in Running state and on the specified
|
||||
# host
|
||||
# 4. There should be two routers (PRIMARY and BACKUP) for this network
|
||||
# ensure both routers should be on different storage pools
|
||||
# ensure both routers should be on different hosts
|
||||
|
||||
self.debug(
|
||||
"Checking if the current zone has multiple active pods in it..")
|
||||
|
|
@ -636,144 +641,150 @@ class TestRvRDeploymentPlanning(cloudstackTestCase):
|
|||
self.apiclient.updateCluster(cmd)
|
||||
self.debug("Enabled first cluster for testing..")
|
||||
|
||||
# Creating network using the network offering created
|
||||
self.debug("Creating network with network offering: %s" %
|
||||
self.network_offering.id)
|
||||
network = Network.create(
|
||||
self.apiclient,
|
||||
self.services["network"],
|
||||
accountid=self.account.name,
|
||||
domainid=self.account.domainid,
|
||||
networkofferingid=self.network_offering.id,
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.debug("Created network with ID: %s" % network.id)
|
||||
try:
|
||||
# Creating network using the network offering created
|
||||
self.debug("Creating network with network offering: %s" %
|
||||
self.network_offering.id)
|
||||
network = Network.create(
|
||||
self.apiclient,
|
||||
self.services["network"],
|
||||
accountid=self.account.name,
|
||||
domainid=self.account.domainid,
|
||||
networkofferingid=self.network_offering.id,
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.debug("Created network with ID: %s" % network.id)
|
||||
|
||||
networks = Network.list(
|
||||
self.apiclient,
|
||||
id=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(networks, list),
|
||||
True,
|
||||
"List networks should return a valid response for created network"
|
||||
)
|
||||
nw_response = networks[0]
|
||||
networks = Network.list(
|
||||
self.apiclient,
|
||||
id=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(networks, list),
|
||||
True,
|
||||
"List networks should return a valid response for created network"
|
||||
)
|
||||
nw_response = networks[0]
|
||||
|
||||
self.debug("Network state: %s" % nw_response.state)
|
||||
self.assertEqual(
|
||||
nw_response.state,
|
||||
"Allocated",
|
||||
"The network should be in allocated state after creation"
|
||||
)
|
||||
self.debug("Network state: %s" % nw_response.state)
|
||||
self.assertEqual(
|
||||
nw_response.state,
|
||||
"Allocated",
|
||||
"The network should be in allocated state after creation"
|
||||
)
|
||||
|
||||
self.debug("Listing routers for network: %s" % network.name)
|
||||
routers = Router.list(
|
||||
self.apiclient,
|
||||
networkid=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
routers,
|
||||
None,
|
||||
"Routers should not be spawned when network is in allocated state"
|
||||
)
|
||||
|
||||
self.debug("Retrieving the list of hosts in the cluster")
|
||||
hosts = Host.list(
|
||||
self.apiclient,
|
||||
clusterid=enabled_cluster.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"List hosts should not return an empty response"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
self.debug("Deploying VM in account: %s" % self.account.name)
|
||||
|
||||
# Spawn an instance in that network
|
||||
virtual_machine = VirtualMachine.create(
|
||||
self.debug("Listing routers for network: %s" % network.name)
|
||||
routers = Router.list(
|
||||
self.apiclient,
|
||||
self.services["virtual_machine"],
|
||||
accountid=self.account.name,
|
||||
domainid=self.account.domainid,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
networkids=[str(network.id)],
|
||||
hostid=host.id
|
||||
)
|
||||
self.debug("Deployed VM in network: %s" % network.id)
|
||||
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=virtual_machine.id,
|
||||
networkid=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List Vms should return a valid list"
|
||||
)
|
||||
vm = vms[0]
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
"Running",
|
||||
"Vm should be in running state after deployment"
|
||||
)
|
||||
self.assertEqual(
|
||||
routers,
|
||||
None,
|
||||
"Routers should not be spawned when network is in allocated state"
|
||||
)
|
||||
|
||||
self.debug("Listing routers for network: %s" % network.name)
|
||||
routers = Router.list(
|
||||
self.debug("Retrieving the list of hosts in the cluster")
|
||||
hosts = Host.list(
|
||||
self.apiclient,
|
||||
networkid=network.id,
|
||||
clusterid=enabled_cluster.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(routers, list),
|
||||
True,
|
||||
"list router should return Primary and backup routers"
|
||||
)
|
||||
self.assertEqual(
|
||||
len(routers),
|
||||
2,
|
||||
"Length of the list router should be 2 (Backup & Primary)"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
routers[0].hostid,
|
||||
routers[1].hostid,
|
||||
"Both the routers should be in different storage pools"
|
||||
)
|
||||
self.debug("Enabling remaining pods if any..")
|
||||
pods = Pod.list(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
listall=True,
|
||||
allocationstate="Disabled"
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"List hosts should not return an empty response"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
self.debug("Deploying VM in account: %s" % self.account.name)
|
||||
|
||||
# Spawn an instance in that network
|
||||
virtual_machine = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["virtual_machine"],
|
||||
accountid=self.account.name,
|
||||
domainid=self.account.domainid,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
networkids=[str(network.id)],
|
||||
hostid=host.id
|
||||
)
|
||||
self.debug("Deployed VM in network: %s" % network.id)
|
||||
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=virtual_machine.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List Vms should return a valid list"
|
||||
)
|
||||
vm = vms[0]
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
"Running",
|
||||
"Vm should be in running state after deployment"
|
||||
)
|
||||
|
||||
self.debug("Listing routers for network: %s" % network.name)
|
||||
routers = Router.list(
|
||||
self.apiclient,
|
||||
networkid=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(routers, list),
|
||||
True,
|
||||
"list router should return Primary and backup routers"
|
||||
)
|
||||
if pods is not None:
|
||||
for pod in pods:
|
||||
cmd = updatePod.updatePodCmd()
|
||||
cmd.id = pod.id
|
||||
cmd.allocationstate = 'Enabled'
|
||||
self.apiclient.updatePod(cmd)
|
||||
|
||||
clusters = Cluster.list(
|
||||
self.apiclient,
|
||||
allocationstate="Disabled",
|
||||
podid=enabled_pod.id,
|
||||
listall=True
|
||||
self.assertEqual(
|
||||
len(routers),
|
||||
2,
|
||||
"Length of the list router should be 2 (Backup & Primary)"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
routers[0].hostid,
|
||||
routers[1].hostid,
|
||||
"Both the routers should be in different hosts"
|
||||
)
|
||||
finally:
|
||||
try:
|
||||
self.debug("Enabling remaining pods if any..")
|
||||
pods = Pod.list(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
listall=True,
|
||||
allocationstate="Disabled"
|
||||
)
|
||||
if pods is not None:
|
||||
for pod in pods:
|
||||
cmd = updatePod.updatePodCmd()
|
||||
cmd.id = pod.id
|
||||
cmd.allocationstate = 'Enabled'
|
||||
self.apiclient.updatePod(cmd)
|
||||
|
||||
if clusters is not None:
|
||||
for cluster in clusters:
|
||||
cmd = updateCluster.updateClusterCmd()
|
||||
cmd.id = cluster.id
|
||||
cmd.allocationstate = 'Enabled'
|
||||
self.apiclient.updateCluster(cmd)
|
||||
clusters = Cluster.list(
|
||||
self.apiclient,
|
||||
allocationstate="Disabled",
|
||||
podid=enabled_pod.id,
|
||||
listall=True
|
||||
)
|
||||
|
||||
if clusters is not None:
|
||||
for cluster in clusters:
|
||||
cmd = updateCluster.updateClusterCmd()
|
||||
cmd.id = cluster.id
|
||||
cmd.allocationstate = 'Enabled'
|
||||
self.apiclient.updateCluster(cmd)
|
||||
except Exception as e:
|
||||
self.debug("Warning: Exception during resource re-enablement: %s" % e)
|
||||
return
|
||||
|
||||
|
||||
# @attr(tags=["advanced", "advancedns", "ssh"])
|
||||
@attr(tags=["TODO"])
|
||||
def test_RvR_multihosts(self):
|
||||
|
|
@ -874,140 +885,145 @@ class TestRvRDeploymentPlanning(cloudstackTestCase):
|
|||
self.apiclient.updateCluster(cmd)
|
||||
self.debug("Enabled first cluster for testing..")
|
||||
|
||||
# Creating network using the network offering created
|
||||
self.debug("Creating network with network offering: %s" %
|
||||
self.network_offering.id)
|
||||
network = Network.create(
|
||||
self.apiclient,
|
||||
self.services["network"],
|
||||
accountid=self.account.name,
|
||||
domainid=self.account.domainid,
|
||||
networkofferingid=self.network_offering.id,
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.debug("Created network with ID: %s" % network.id)
|
||||
try:
|
||||
# Creating network using the network offering created
|
||||
self.debug("Creating network with network offering: %s" %
|
||||
self.network_offering.id)
|
||||
network = Network.create(
|
||||
self.apiclient,
|
||||
self.services["network"],
|
||||
accountid=self.account.name,
|
||||
domainid=self.account.domainid,
|
||||
networkofferingid=self.network_offering.id,
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.debug("Created network with ID: %s" % network.id)
|
||||
|
||||
networks = Network.list(
|
||||
self.apiclient,
|
||||
id=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(networks, list),
|
||||
True,
|
||||
"List networks should return a valid response for created network"
|
||||
)
|
||||
nw_response = networks[0]
|
||||
networks = Network.list(
|
||||
self.apiclient,
|
||||
id=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(networks, list),
|
||||
True,
|
||||
"List networks should return a valid response for created network"
|
||||
)
|
||||
nw_response = networks[0]
|
||||
|
||||
self.debug("Network state: %s" % nw_response.state)
|
||||
self.assertEqual(
|
||||
nw_response.state,
|
||||
"Allocated",
|
||||
"The network should be in allocated state after creation"
|
||||
)
|
||||
|
||||
self.debug("Listing routers for network: %s" % network.name)
|
||||
routers = Router.list(
|
||||
self.apiclient,
|
||||
networkid=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
routers,
|
||||
None,
|
||||
"Routers should not be spawned when network is in allocated state"
|
||||
)
|
||||
|
||||
self.debug("Retrieving the list of hosts in the cluster")
|
||||
hosts = Host.list(
|
||||
self.apiclient,
|
||||
clusterid=enabled_cluster.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"List hosts should not return an empty response"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
self.debug("Deploying VM in account: %s" % self.account.name)
|
||||
|
||||
# Spawn an instance in that network
|
||||
virtual_machine = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["virtual_machine"],
|
||||
accountid=self.account.name,
|
||||
domainid=self.account.domainid,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
networkids=[str(network.id)],
|
||||
hostid=host.id
|
||||
)
|
||||
self.debug("Deployed VM in network: %s" % network.id)
|
||||
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=virtual_machine.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List Vms should return a valid list"
|
||||
)
|
||||
vm = vms[0]
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
"Running",
|
||||
"Vm should be in running state after deployment"
|
||||
)
|
||||
|
||||
self.debug("Listing routers for network: %s" % network.name)
|
||||
routers = Router.list(
|
||||
self.apiclient,
|
||||
networkid=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(routers, list),
|
||||
True,
|
||||
"list router should return Primary and backup routers"
|
||||
)
|
||||
self.assertEqual(
|
||||
len(routers),
|
||||
2,
|
||||
"Length of the list router should be 2 (Backup & Primary)"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
routers[0].hostid,
|
||||
routers[1].hostid,
|
||||
"Both the routers should be in different hosts"
|
||||
)
|
||||
self.debug("Enabling remaining pods if any..")
|
||||
pods = Pod.list(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
listall=True,
|
||||
allocationstate="Disabled"
|
||||
self.debug("Network state: %s" % nw_response.state)
|
||||
self.assertEqual(
|
||||
nw_response.state,
|
||||
"Allocated",
|
||||
"The network should be in allocated state after creation"
|
||||
)
|
||||
|
||||
if pods is not None:
|
||||
for pod in pods:
|
||||
cmd = updatePod.updatePodCmd()
|
||||
cmd.id = pod.id
|
||||
cmd.allocationstate = 'Enabled'
|
||||
self.apiclient.updatePod(cmd)
|
||||
self.debug("Listing routers for network: %s" % network.name)
|
||||
routers = Router.list(
|
||||
self.apiclient,
|
||||
networkid=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
routers,
|
||||
None,
|
||||
"Routers should not be spawned when network is in allocated state"
|
||||
)
|
||||
|
||||
clusters = Cluster.list(
|
||||
self.apiclient,
|
||||
allocationstate="Disabled",
|
||||
podid=enabled_pod.id,
|
||||
listall=True
|
||||
self.debug("Retrieving the list of hosts in the cluster")
|
||||
hosts = Host.list(
|
||||
self.apiclient,
|
||||
clusterid=enabled_cluster.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"List hosts should not return an empty response"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
self.debug("Deploying VM in account: %s" % self.account.name)
|
||||
|
||||
# Spawn an instance in that network
|
||||
virtual_machine = VirtualMachine.create(
|
||||
self.apiclient,
|
||||
self.services["virtual_machine"],
|
||||
accountid=self.account.name,
|
||||
domainid=self.account.domainid,
|
||||
serviceofferingid=self.service_offering.id,
|
||||
networkids=[str(network.id)],
|
||||
hostid=host.id
|
||||
)
|
||||
self.debug("Deployed VM in network: %s" % network.id)
|
||||
|
||||
vms = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=virtual_machine.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(vms, list),
|
||||
True,
|
||||
"List Vms should return a valid list"
|
||||
)
|
||||
vm = vms[0]
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
"Running",
|
||||
"Vm should be in running state after deployment"
|
||||
)
|
||||
|
||||
self.debug("Listing routers for network: %s" % network.name)
|
||||
routers = Router.list(
|
||||
self.apiclient,
|
||||
networkid=network.id,
|
||||
listall=True
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(routers, list),
|
||||
True,
|
||||
"list router should return Primary and backup routers"
|
||||
)
|
||||
self.assertEqual(
|
||||
len(routers),
|
||||
2,
|
||||
"Length of the list router should be 2 (Backup & Primary)"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
routers[0].hostid,
|
||||
routers[1].hostid,
|
||||
"Both the routers should be in different hosts"
|
||||
)
|
||||
if clusters is not None:
|
||||
for cluster in clusters:
|
||||
cmd = updateCluster.updateClusterCmd()
|
||||
cmd.id = cluster.id
|
||||
cmd.allocationstate = 'Enabled'
|
||||
self.apiclient.updateCluster(cmd)
|
||||
finally:
|
||||
try:
|
||||
self.debug("Enabling remaining pods if any..")
|
||||
pods = Pod.list(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
listall=True,
|
||||
allocationstate="Disabled"
|
||||
)
|
||||
|
||||
if pods is not None:
|
||||
for pod in pods:
|
||||
cmd = updatePod.updatePodCmd()
|
||||
cmd.id = pod.id
|
||||
cmd.allocationstate = 'Enabled'
|
||||
self.apiclient.updatePod(cmd)
|
||||
|
||||
clusters = Cluster.list(
|
||||
self.apiclient,
|
||||
allocationstate="Disabled",
|
||||
podid=enabled_pod.id,
|
||||
listall=True
|
||||
)
|
||||
if clusters is not None:
|
||||
for cluster in clusters:
|
||||
cmd = updateCluster.updateClusterCmd()
|
||||
cmd.id = cluster.id
|
||||
cmd.allocationstate = 'Enabled'
|
||||
self.apiclient.updateCluster(cmd)
|
||||
except Exception as e:
|
||||
self.debug("Warning: Exception during resource re-enablement: %s" % e)
|
||||
return
|
||||
|
|
|
|||
|
|
@ -286,20 +286,25 @@ class TestDedicatePublicIPRange(cloudstackTestCase):
|
|||
cmd.allocationstate = 'Disabled'
|
||||
self.apiclient.updateZone(cmd)
|
||||
|
||||
# Delete System VM and IP range, so System VM can get IP from original ranges
|
||||
self.debug("Destroying System VM: %s" % systemvm_id)
|
||||
cmd = destroySystemVm.destroySystemVmCmd()
|
||||
cmd.id = systemvm_id
|
||||
self.apiclient.destroySystemVm(cmd)
|
||||
try:
|
||||
# Delete System VM and IP range, so System VM can get IP from original ranges
|
||||
self.debug("Destroying System VM: %s" % systemvm_id)
|
||||
cmd = destroySystemVm.destroySystemVmCmd()
|
||||
cmd.id = systemvm_id
|
||||
self.apiclient.destroySystemVm(cmd)
|
||||
|
||||
domain_id = self.public_ip_range.vlan.domainid
|
||||
self.public_ip_range.delete(self.apiclient)
|
||||
domain_id = self.public_ip_range.vlan.domainid
|
||||
self.public_ip_range.delete(self.apiclient)
|
||||
|
||||
# Enable Zone
|
||||
cmd = updateZone.updateZoneCmd()
|
||||
cmd.id = self.zone.id
|
||||
cmd.allocationstate = 'Enabled'
|
||||
self.apiclient.updateZone(cmd)
|
||||
finally:
|
||||
# Enable Zone
|
||||
try:
|
||||
cmd = updateZone.updateZoneCmd()
|
||||
cmd.id = self.zone.id
|
||||
cmd.allocationstate = 'Enabled'
|
||||
self.apiclient.updateZone(cmd)
|
||||
except Exception as e:
|
||||
self.debug("Warning: Exception during zone re-enablement in base_system_vm: %s" % e)
|
||||
|
||||
# Wait for System VM to start and check System VM public IP
|
||||
systemvm_id = self.wait_for_system_vm_start(
|
||||
|
|
@ -399,18 +404,23 @@ class TestDedicatePublicIPRange(cloudstackTestCase):
|
|||
cmd.allocationstate = 'Disabled'
|
||||
self.apiclient.updateZone(cmd)
|
||||
|
||||
# Delete System VM and IP range, so System VM can get IP from original ranges
|
||||
if system_vms:
|
||||
for v in system_vms:
|
||||
self.debug("Destroying System VM: %s" % v.id)
|
||||
cmd = destroySystemVm.destroySystemVmCmd()
|
||||
cmd.id = v.id
|
||||
self.apiclient.destroySystemVm(cmd)
|
||||
try:
|
||||
# Delete System VM and IP range, so System VM can get IP from original ranges
|
||||
if system_vms:
|
||||
for v in system_vms:
|
||||
self.debug("Destroying System VM: %s" % v.id)
|
||||
cmd = destroySystemVm.destroySystemVmCmd()
|
||||
cmd.id = v.id
|
||||
self.apiclient.destroySystemVm(cmd)
|
||||
|
||||
self.public_ip_range.delete(self.apiclient)
|
||||
self.public_ip_range.delete(self.apiclient)
|
||||
|
||||
# Enable Zone
|
||||
cmd = updateZone.updateZoneCmd()
|
||||
cmd.id = self.zone.id
|
||||
cmd.allocationstate = 'Enabled'
|
||||
self.apiclient.updateZone(cmd)
|
||||
finally:
|
||||
# Enable Zone
|
||||
try:
|
||||
cmd = updateZone.updateZoneCmd()
|
||||
cmd.id = self.zone.id
|
||||
cmd.allocationstate = 'Enabled'
|
||||
self.apiclient.updateZone(cmd)
|
||||
except Exception as e:
|
||||
self.debug("Warning: Exception during zone re-enablement in delete_range: %s" % e)
|
||||
|
|
|
|||
|
|
@ -1245,6 +1245,8 @@
|
|||
"label.host.alerts": "Hosts in alert state",
|
||||
"label.host.name": "Host name",
|
||||
"label.host.ovftool.version": "OVFTool Version",
|
||||
"label.host.vddk.support": "VDDK Support",
|
||||
"label.host.vddk.version": "VDDK Version",
|
||||
"label.host.tag": "Host tag",
|
||||
"label.host.virtv2v.version": "Virt-v2v Version",
|
||||
"label.hostcontrolstate": "Compute Resource Status",
|
||||
|
|
@ -2407,6 +2409,7 @@
|
|||
"label.user.data.policy.tooltip": "User Data linked to the Template can be overridden by User Data provided during Instance deploy. Select the override policy as required.",
|
||||
"label.user.data": "User Data",
|
||||
"label.user.data.library": "User Data Library",
|
||||
"label.use.vddk": "Use VDDK",
|
||||
"label.ssh.port": "SSH port",
|
||||
"label.sshkeypair": "New SSH key pair",
|
||||
"label.sshkeypairs": "SSH key pairs",
|
||||
|
|
|
|||
|
|
@ -64,6 +64,22 @@
|
|||
</div>
|
||||
</div>
|
||||
</a-list-item>
|
||||
<a-list-item v-if="host.details && host.details['host.vddk.support']">
|
||||
<div>
|
||||
<strong>{{ $t('label.host.vddk.support') }}</strong>
|
||||
<div>
|
||||
{{ host.details['host.vddk.support'] }}
|
||||
</div>
|
||||
</div>
|
||||
</a-list-item>
|
||||
<a-list-item v-if="host.details && host.details['host.vddk.version']">
|
||||
<div>
|
||||
<strong>{{ $t('label.host.vddk.version') }}</strong>
|
||||
<div>
|
||||
{{ host.details['host.vddk.version'] }}
|
||||
</div>
|
||||
</div>
|
||||
</a-list-item>
|
||||
<a-list-item v-if="host.details && host.details['host.ovftool.version']">
|
||||
<div>
|
||||
<strong>{{ $t('label.host.ovftool.version') }}</strong>
|
||||
|
|
|
|||
|
|
@ -152,6 +152,12 @@
|
|||
</a-row>
|
||||
</a-radio-group>
|
||||
</a-form-item>
|
||||
<a-form-item name="usevddk" ref="usevddk" v-if="selectedVmwareVcenter">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.use.vddk')" :tooltip="apiParams.usevddk ? apiParams.usevddk.description : ''"/>
|
||||
</template>
|
||||
<a-switch v-model:checked="form.usevddk" @change="onUseVddkChange" />
|
||||
</a-form-item>
|
||||
<a-form-item name="forceconverttopool" ref="forceconverttopool" v-if="selectedVmwareVcenter">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.force.convert.to.pool')" :tooltip="apiParams.forceconverttopool.description"/>
|
||||
|
|
@ -170,7 +176,7 @@
|
|||
@handle-checkselectpair-change="updateSelectedKvmHostForConversion"
|
||||
/>
|
||||
</a-form-item>
|
||||
<a-form-item name="importhostid" ref="importhostid">
|
||||
<a-form-item name="importhostid" ref="importhostid" v-if="!form.usevddk">
|
||||
<check-box-select-pair
|
||||
layout="vertical"
|
||||
v-if="cluster.hypervisortype === 'KVM' && selectedVmwareVcenter"
|
||||
|
|
@ -184,12 +190,13 @@
|
|||
</a-form-item>
|
||||
<a-form-item name="convertstorageoption" ref="convertstorageoption">
|
||||
<check-box-select-pair
|
||||
:key="`convertstorageoption-${form.usevddk ? 'vddk' : 'default'}-${switches.forceConvertToPool ? 'pool' : 'tmp'}`"
|
||||
layout="vertical"
|
||||
v-if="cluster.hypervisortype === 'KVM' && selectedVmwareVcenter"
|
||||
:resourceKey="cluster.id"
|
||||
:selectOptions="storageOptionsForConversion"
|
||||
:checkBoxLabel="switches.forceConvertToPool ? $t('message.select.destination.storage.instance.conversion') : $t('message.select.temporary.storage.instance.conversion')"
|
||||
:defaultCheckBoxValue="false"
|
||||
:defaultCheckBoxValue="switches.forceConvertToPool"
|
||||
:reversed="false"
|
||||
@handle-checkselectpair-change="updateSelectedStorageOptionForConversion"
|
||||
/>
|
||||
|
|
@ -226,7 +233,7 @@
|
|||
:placeholder="$t('label.extra')"
|
||||
/>
|
||||
</a-form-item>
|
||||
<a-form-item name="forcemstoimportvmfiles" ref="forcemstoimportvmfiles" v-if="selectedVmwareVcenter">
|
||||
<a-form-item name="forcemstoimportvmfiles" ref="forcemstoimportvmfiles" v-if="selectedVmwareVcenter && !form.usevddk">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.force.ms.to.import.vm.files')" :tooltip="apiParams.forcemstoimportvmfiles.description"/>
|
||||
</template>
|
||||
|
|
@ -581,7 +588,8 @@ export default {
|
|||
selectedRootDiskSources: [],
|
||||
vmwareToKvmExtraParamsAllowed: false,
|
||||
vmwareToKvmExtraParamsSelected: false,
|
||||
vmwareToKvmExtraParams: ''
|
||||
vmwareToKvmExtraParams: '',
|
||||
userModifiedVddkSetting: false
|
||||
}
|
||||
},
|
||||
beforeCreate () {
|
||||
|
|
@ -778,6 +786,7 @@ export default {
|
|||
this.formRef = ref()
|
||||
this.form = reactive({
|
||||
rootdiskid: 0,
|
||||
usevddk: false,
|
||||
migrateallowed: this.switches.migrateAllowed,
|
||||
forced: this.switches.forced,
|
||||
forcemstoimportvmfiles: this.switches.forceMsToImportVmFiles,
|
||||
|
|
@ -1011,6 +1020,8 @@ export default {
|
|||
}).then(json => {
|
||||
this.kvmHostsForConversion = json.listhostsresponse.host || []
|
||||
this.kvmHostsForConversion = this.kvmHostsForConversion.filter(host => ['Enabled', 'Disabled'].includes(host.resourcestate))
|
||||
// Check if any host has VDDK support
|
||||
let hasVddkSupport = false
|
||||
this.kvmHostsForConversion.map(host => {
|
||||
host.name = host.name + ' [Pod=' + host.podname + '] [Cluster=' + host.clustername + ']'
|
||||
if (host.instanceconversionsupported !== null && host.instanceconversionsupported !== undefined && host.instanceconversionsupported) {
|
||||
|
|
@ -1024,7 +1035,29 @@ export default {
|
|||
if (host.details['host.ovftool.version']) {
|
||||
host.name = host.name + ' (ovftool=' + host.details['host.ovftool.version'] + ')'
|
||||
}
|
||||
// Check for VDDK support
|
||||
if (host.details['host.vddk.support'] === 'true' || host.details['host.vddk.support'] === true) {
|
||||
hasVddkSupport = true
|
||||
}
|
||||
|
||||
if (this.form.usevddk) {
|
||||
if (host.details['host.vddk.support'] === 'true' || host.details['host.vddk.support'] === true) {
|
||||
host.name = host.name + ' (VDDK=' + this.$t('label.supported') + ')'
|
||||
} else {
|
||||
host.name = host.name + ' (VDDK=' + this.$t('label.not.supported') + ')'
|
||||
}
|
||||
if (host.details['host.vddk.version']) {
|
||||
host.name = host.name + ' (vddk=' + host.details['host.vddk.version'] + ')'
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Enable usevddk by default if at least one host has VDDK support
|
||||
// Only auto-enable if user hasn't manually modified the setting
|
||||
if (hasVddkSupport && !this.form.usevddk && !this.userModifiedVddkSetting) {
|
||||
this.form.usevddk = true
|
||||
this.onUseVddkChange(true, false)
|
||||
}
|
||||
})
|
||||
},
|
||||
fetchKvmHostsForImporting () {
|
||||
|
|
@ -1052,6 +1085,11 @@ export default {
|
|||
}
|
||||
getAPI('listStoragePools', params).then(json => {
|
||||
this.storagePoolsForConversion = json.liststoragepoolsresponse.storagepool || []
|
||||
// Keep selected pool state aligned when the value is auto-populated by v-model.
|
||||
if (this.form.convertstoragepoolid) {
|
||||
const poolExists = this.storagePoolsForConversion.some(pool => pool.id === this.form.convertstoragepoolid)
|
||||
this.selectedStoragePoolForConversion = poolExists ? this.form.convertstoragepoolid : null
|
||||
}
|
||||
})
|
||||
} else if (this.selectedStorageOptionForConversion === 'local') {
|
||||
const kvmHost = this.kvmHostsForConversion.filter(x => x.id === this.selectedKvmHostForConversion)[0]
|
||||
|
|
@ -1061,6 +1099,10 @@ export default {
|
|||
status: 'Up'
|
||||
}).then(json => {
|
||||
this.storagePoolsForConversion = json.liststoragepoolsresponse.storagepool || []
|
||||
if (this.form.convertstoragepoolid) {
|
||||
const poolExists = this.storagePoolsForConversion.some(pool => pool.id === this.form.convertstoragepoolid)
|
||||
this.selectedStoragePoolForConversion = poolExists ? this.form.convertstoragepoolid : null
|
||||
}
|
||||
})
|
||||
}
|
||||
},
|
||||
|
|
@ -1115,6 +1157,34 @@ export default {
|
|||
},
|
||||
onForceConvertToPoolChange (val) {
|
||||
this.switches.forceConvertToPool = val
|
||||
this.form.forceconverttopool = val
|
||||
this.selectedStorageOptionForConversion = null
|
||||
this.selectedStoragePoolForConversion = null
|
||||
this.showStoragePoolsForConversion = false
|
||||
this.resetStorageOptionsForConversion()
|
||||
},
|
||||
onUseVddkChange (val, isUserChange = true) {
|
||||
if (isUserChange) {
|
||||
this.userModifiedVddkSetting = true
|
||||
}
|
||||
if (val) {
|
||||
this.form.forceconverttopool = true
|
||||
this.form.forcemstoimportvmfiles = false
|
||||
this.switches.forceConvertToPool = true
|
||||
this.switches.forceMsToImportVmFiles = false
|
||||
// Reset import host selection when VDDK is enabled
|
||||
this.selectedKvmHostForImporting = null
|
||||
// Refresh host list to show VDDK support details
|
||||
this.fetchKvmHostsForConversion()
|
||||
} else {
|
||||
this.form.forceconverttopool = false
|
||||
this.switches.forceConvertToPool = false
|
||||
this.selectedStorageOptionForConversion = null
|
||||
this.selectedStoragePoolForConversion = null
|
||||
this.showStoragePoolsForConversion = false
|
||||
// Refresh host list to remove VDDK support details
|
||||
this.fetchKvmHostsForConversion()
|
||||
}
|
||||
this.resetStorageOptionsForConversion()
|
||||
},
|
||||
updateSelectedRootDisk () {
|
||||
|
|
@ -1229,18 +1299,25 @@ export default {
|
|||
if (this.selectedKvmHostForImporting) {
|
||||
params.importinstancehostid = this.selectedKvmHostForImporting
|
||||
}
|
||||
if (this.selectedStoragePoolForConversion) {
|
||||
params.convertinstancepoolid = this.selectedStoragePoolForConversion
|
||||
const selectedPoolForConversion = values.convertstoragepoolid || this.selectedStoragePoolForConversion
|
||||
if (selectedPoolForConversion) {
|
||||
params.convertinstancepoolid = selectedPoolForConversion
|
||||
}
|
||||
if (this.vmwareToKvmExtraParams) {
|
||||
params.extraparams = this.vmwareToKvmExtraParams
|
||||
}
|
||||
params.forcemstoimportvmfiles = values.forcemstoimportvmfiles
|
||||
if (values.forceconverttopool) {
|
||||
if (values.usevddk) {
|
||||
params.usevddk = true
|
||||
params.forcemstoimportvmfiles = false
|
||||
} else {
|
||||
params.usevddk = false
|
||||
params.forcemstoimportvmfiles = values.forcemstoimportvmfiles
|
||||
}
|
||||
if (values.forceconverttopool !== undefined) {
|
||||
params.forceconverttopool = values.forceconverttopool
|
||||
}
|
||||
}
|
||||
var keys = ['hostname', 'domainid', 'projectid', 'account', 'migrateallowed', 'forced', 'forcemstoimportvmfiles', 'osid']
|
||||
var keys = ['hostname', 'domainid', 'projectid', 'account', 'migrateallowed', 'forced', 'osid']
|
||||
if (this.templateType !== 'auto') {
|
||||
keys.push('templateid')
|
||||
}
|
||||
|
|
@ -1354,6 +1431,11 @@ export default {
|
|||
this.templateType = this.defaultTemplateType()
|
||||
this.updateComputeOffering(undefined)
|
||||
this.switches = {}
|
||||
this.form.usevddk = false
|
||||
this.form.forceconverttopool = false
|
||||
this.form.forcemstoimportvmfiles = false
|
||||
this.userModifiedVddkSetting = false
|
||||
this.resetStorageOptionsForConversion()
|
||||
},
|
||||
closeAction () {
|
||||
this.$emit('close-action')
|
||||
|
|
|
|||
Loading…
Reference in New Issue