mirror of https://github.com/apache/cloudstack.git
merge forward 4.22 to main
This commit is contained in:
commit
f06ac512fa
|
|
@ -77,6 +77,8 @@ import com.cloud.alert.Alert;
|
|||
import com.cloud.capacity.Capacity;
|
||||
import com.cloud.dc.Pod;
|
||||
import com.cloud.dc.Vlan;
|
||||
import com.cloud.deploy.DeploymentPlan;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.exception.ConcurrentOperationException;
|
||||
import com.cloud.exception.ManagementServerException;
|
||||
import com.cloud.exception.ResourceUnavailableException;
|
||||
|
|
@ -97,6 +99,7 @@ import com.cloud.utils.Ternary;
|
|||
import com.cloud.vm.InstanceGroup;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachine.Type;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
|
||||
/**
|
||||
* Hopefully this is temporary.
|
||||
|
|
@ -478,6 +481,19 @@ public interface ManagementService {
|
|||
|
||||
Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> listHostsForMigrationOfVM(VirtualMachine vm, Long startIndex, Long pageSize, String keyword, List<VirtualMachine> vmList);
|
||||
|
||||
/**
|
||||
* Apply affinity group constraints and other exclusion rules for VM migration.
|
||||
* This is a helper method that can be used independently for per-iteration affinity checks in DRS.
|
||||
*
|
||||
* @param vm The virtual machine to migrate
|
||||
* @param vmProfile The VM profile
|
||||
* @param plan The deployment plan
|
||||
* @param vmList List of VMs with current/simulated placements for affinity processing
|
||||
* @return ExcludeList containing hosts to avoid
|
||||
*/
|
||||
ExcludeList applyAffinityConstraints(VirtualMachine vm, VirtualMachineProfile vmProfile,
|
||||
DeploymentPlan plan, List<VirtualMachine> vmList);
|
||||
|
||||
/**
|
||||
* List storage pools for live migrating of a volume. The API returns list of all pools in the cluster to which the
|
||||
* volume can be migrated. Current pool is not included in the list. In case of vSphere datastore cluster storage pools,
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ import com.cloud.exception.InsufficientCapacityException;
|
|||
import com.cloud.exception.ResourceAllocationException;
|
||||
import com.cloud.exception.ResourceUnavailableException;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.utils.StringUtils;
|
||||
|
||||
@APICommand(name = "createManagementNetworkIpRange",
|
||||
description = "Creates a Management network IP range.",
|
||||
|
|
@ -118,7 +119,7 @@ public class CreateManagementNetworkIpRangeCmd extends BaseAsyncCmd {
|
|||
}
|
||||
|
||||
public String getVlan() {
|
||||
if (vlan == null || vlan.isEmpty()) {
|
||||
if (StringUtils.isBlank(vlan)) {
|
||||
vlan = "untagged";
|
||||
}
|
||||
return vlan;
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ package org.apache.cloudstack.api.command.admin.vlan;
|
|||
import com.cloud.configuration.ConfigurationService;
|
||||
import com.cloud.network.Network;
|
||||
import com.cloud.utils.net.NetUtils;
|
||||
import com.cloud.utils.StringUtils;
|
||||
|
||||
import org.apache.cloudstack.api.APICommand;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
|
|
@ -163,7 +164,7 @@ public class CreateVlanIpRangeCmd extends BaseCmd {
|
|||
}
|
||||
|
||||
public String getVlan() {
|
||||
if ((vlan == null || vlan.isEmpty()) && !ConfigurationService.IsIpRangeForProvider(getProvider())) {
|
||||
if (StringUtils.isBlank(vlan) && !ConfigurationService.IsIpRangeForProvider(getProvider())) {
|
||||
vlan = "untagged";
|
||||
}
|
||||
return vlan;
|
||||
|
|
|
|||
|
|
@ -22,10 +22,10 @@ package org.apache.cloudstack.cluster;
|
|||
import com.cloud.host.Host;
|
||||
import com.cloud.offering.ServiceOffering;
|
||||
import com.cloud.org.Cluster;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.Ternary;
|
||||
import com.cloud.utils.component.Adapter;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.commons.math3.stat.descriptive.moment.Mean;
|
||||
import org.apache.commons.math3.stat.descriptive.moment.StandardDeviation;
|
||||
|
||||
|
|
@ -40,6 +40,9 @@ import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsMetricUs
|
|||
|
||||
public interface ClusterDrsAlgorithm extends Adapter {
|
||||
|
||||
Mean MEAN_CALCULATOR = new Mean();
|
||||
StandardDeviation STDDEV_CALCULATOR = new StandardDeviation(false);
|
||||
|
||||
/**
|
||||
* Determines whether a DRS operation is needed for a given cluster and host-VM
|
||||
* mapping.
|
||||
|
|
@ -59,79 +62,121 @@ public interface ClusterDrsAlgorithm extends Adapter {
|
|||
boolean needsDrs(Cluster cluster, List<Ternary<Long, Long, Long>> cpuList,
|
||||
List<Ternary<Long, Long, Long>> memoryList) throws ConfigurationException;
|
||||
|
||||
|
||||
/**
|
||||
* Determines the metrics for a given virtual machine and destination host in a DRS cluster.
|
||||
*
|
||||
* @param clusterId
|
||||
* the ID of the cluster to check
|
||||
* @param vm
|
||||
* the virtual machine to check
|
||||
* @param serviceOffering
|
||||
* the service offering for the virtual machine
|
||||
* @param destHost
|
||||
* the destination host for the virtual machine
|
||||
* @param hostCpuMap
|
||||
* a map of host IDs to the Ternary of used, reserved and total CPU on each host
|
||||
* @param hostMemoryMap
|
||||
* a map of host IDs to the Ternary of used, reserved and total memory on each host
|
||||
* @param requiresStorageMotion
|
||||
* whether storage motion is required for the virtual machine
|
||||
* Calculates the metrics (improvement, cost, benefit) for migrating a VM to a destination host. Improvement is
|
||||
* calculated based on the change in cluster imbalance before and after the migration.
|
||||
*
|
||||
* @param cluster the cluster to check
|
||||
* @param vm the virtual machine to check
|
||||
* @param serviceOffering the service offering for the virtual machine
|
||||
* @param destHost the destination host for the virtual machine
|
||||
* @param hostCpuMap a map of host IDs to the Ternary of used, reserved and total CPU on each host
|
||||
* @param hostMemoryMap a map of host IDs to the Ternary of used, reserved and total memory on each host
|
||||
* @param requiresStorageMotion whether storage motion is required for the virtual machine
|
||||
* @param preImbalance the pre-calculated cluster imbalance before migration (null to calculate it)
|
||||
* @param baseMetricsArray pre-calculated array of all host metrics before migration
|
||||
* @param hostIdToIndexMap mapping from host ID to index in the metrics array
|
||||
* @return a ternary containing improvement, cost, benefit
|
||||
*/
|
||||
Ternary<Double, Double, Double> getMetrics(Cluster cluster, VirtualMachine vm, ServiceOffering serviceOffering,
|
||||
Host destHost, Map<Long, Ternary<Long, Long, Long>> hostCpuMap,
|
||||
Map<Long, Ternary<Long, Long, Long>> hostMemoryMap,
|
||||
Boolean requiresStorageMotion) throws ConfigurationException;
|
||||
Boolean requiresStorageMotion, Double preImbalance,
|
||||
double[] baseMetricsArray, Map<Long, Integer> hostIdToIndexMap) throws ConfigurationException;
|
||||
|
||||
/**
|
||||
* Calculates the imbalance of the cluster after a virtual machine migration.
|
||||
* Calculates the cluster imbalance after migrating a VM to a destination host.
|
||||
*
|
||||
* @param serviceOffering
|
||||
* the service offering for the virtual machine
|
||||
* @param vm
|
||||
* the virtual machine being migrated
|
||||
* @param destHost
|
||||
* the destination host for the virtual machine
|
||||
* @param hostCpuMap
|
||||
* a map of host IDs to the Ternary of used, reserved and total CPU on each host
|
||||
* @param hostMemoryMap
|
||||
* a map of host IDs to the Ternary of used, reserved and total memory on each host
|
||||
*
|
||||
* @return a pair containing the CPU and memory imbalance of the cluster after the migration
|
||||
* @param vm the virtual machine being migrated
|
||||
* @param destHost the destination host for the virtual machine
|
||||
* @param clusterId the cluster ID
|
||||
* @param vmMetric the VM's resource consumption metric
|
||||
* @param baseMetricsArray pre-calculated array of all host metrics before migration
|
||||
* @param hostIdToIndexMap mapping from host ID to index in the metrics array
|
||||
* @return the cluster imbalance after migration
|
||||
*/
|
||||
default Double getImbalancePostMigration(ServiceOffering serviceOffering, VirtualMachine vm,
|
||||
Host destHost, Map<Long, Ternary<Long, Long, Long>> hostCpuMap,
|
||||
Map<Long, Ternary<Long, Long, Long>> hostMemoryMap) throws ConfigurationException {
|
||||
Pair<Long, Map<Long, Ternary<Long, Long, Long>>> pair = getHostMetricsMapAndType(destHost.getClusterId(), serviceOffering, hostCpuMap, hostMemoryMap);
|
||||
long vmMetric = pair.first();
|
||||
Map<Long, Ternary<Long, Long, Long>> hostMetricsMap = pair.second();
|
||||
default Double getImbalancePostMigration(VirtualMachine vm,
|
||||
Host destHost, Long clusterId, long vmMetric, double[] baseMetricsArray,
|
||||
Map<Long, Integer> hostIdToIndexMap, Map<Long, Ternary<Long, Long, Long>> hostCpuMap,
|
||||
Map<Long, Ternary<Long, Long, Long>> hostMemoryMap) {
|
||||
// Create a copy of the base array and adjust only the two affected hosts
|
||||
double[] adjustedMetrics = new double[baseMetricsArray.length];
|
||||
System.arraycopy(baseMetricsArray, 0, adjustedMetrics, 0, baseMetricsArray.length);
|
||||
|
||||
List<Double> list = new ArrayList<>();
|
||||
for (Long hostId : hostMetricsMap.keySet()) {
|
||||
list.add(getMetricValuePostMigration(destHost.getClusterId(), hostMetricsMap.get(hostId), vmMetric, hostId, destHost.getId(), vm.getHostId()));
|
||||
long destHostId = destHost.getId();
|
||||
long vmHostId = vm.getHostId();
|
||||
|
||||
// Adjust source host (remove VM resources)
|
||||
Integer sourceIndex = hostIdToIndexMap.get(vmHostId);
|
||||
if (sourceIndex != null && sourceIndex < adjustedMetrics.length) {
|
||||
Map<Long, Ternary<Long, Long, Long>> sourceMetricsMap = getClusterDrsMetric(clusterId).equals("cpu") ? hostCpuMap : hostMemoryMap;
|
||||
Ternary<Long, Long, Long> sourceMetrics = sourceMetricsMap.get(vmHostId);
|
||||
if (sourceMetrics != null) {
|
||||
adjustedMetrics[sourceIndex] = getMetricValuePostMigration(clusterId, sourceMetrics, vmMetric, vmHostId, destHostId, vmHostId);
|
||||
}
|
||||
return getImbalance(list);
|
||||
}
|
||||
|
||||
private Pair<Long, Map<Long, Ternary<Long, Long, Long>>> getHostMetricsMapAndType(Long clusterId,
|
||||
ServiceOffering serviceOffering, Map<Long, Ternary<Long, Long, Long>> hostCpuMap,
|
||||
Map<Long, Ternary<Long, Long, Long>> hostMemoryMap) throws ConfigurationException {
|
||||
// Adjust destination host (add VM resources)
|
||||
Integer destIndex = hostIdToIndexMap.get(destHostId);
|
||||
if (destIndex != null && destIndex < adjustedMetrics.length) {
|
||||
Map<Long, Ternary<Long, Long, Long>> destMetricsMap = getClusterDrsMetric(clusterId).equals("cpu") ? hostCpuMap : hostMemoryMap;
|
||||
Ternary<Long, Long, Long> destMetrics = destMetricsMap.get(destHostId);
|
||||
if (destMetrics != null) {
|
||||
adjustedMetrics[destIndex] = getMetricValuePostMigration(clusterId, destMetrics, vmMetric, destHostId, destHostId, vmHostId);
|
||||
}
|
||||
}
|
||||
|
||||
return calculateImbalance(adjustedMetrics);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate imbalance from an array of metric values.
|
||||
* Imbalance is defined as standard deviation divided by mean.
|
||||
*
|
||||
* Uses reusable stateless calculator objects to avoid object creation overhead.
|
||||
* @param values array of metric values
|
||||
* @return calculated imbalance
|
||||
*/
|
||||
private static double calculateImbalance(double[] values) {
|
||||
if (values == null || values.length == 0) {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
double mean = MEAN_CALCULATOR.evaluate(values);
|
||||
if (mean == 0.0) {
|
||||
return 0.0; // Avoid division by zero
|
||||
}
|
||||
double stdDev = STDDEV_CALCULATOR.evaluate(values, mean);
|
||||
return stdDev / mean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to get VM metric based on cluster configuration.
|
||||
*/
|
||||
static long getVmMetric(ServiceOffering serviceOffering, Long clusterId) throws ConfigurationException {
|
||||
String metric = getClusterDrsMetric(clusterId);
|
||||
Pair<Long, Map<Long, Ternary<Long, Long, Long>>> pair;
|
||||
switch (metric) {
|
||||
case "cpu":
|
||||
pair = new Pair<>((long) serviceOffering.getCpu() * serviceOffering.getSpeed(), hostCpuMap);
|
||||
break;
|
||||
return (long) serviceOffering.getCpu() * serviceOffering.getSpeed();
|
||||
case "memory":
|
||||
pair = new Pair<>(serviceOffering.getRamSize() * 1024L * 1024L, hostMemoryMap);
|
||||
break;
|
||||
return serviceOffering.getRamSize() * 1024L * 1024L;
|
||||
default:
|
||||
throw new ConfigurationException(
|
||||
String.format("Invalid metric: %s for cluster: %d", metric, clusterId));
|
||||
}
|
||||
return pair;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to calculate metrics from pre and post imbalance values.
|
||||
*/
|
||||
default Ternary<Double, Double, Double> calculateMetricsFromImbalances(Double preImbalance, Double postImbalance) {
|
||||
// This needs more research to determine the cost and benefit of a migration
|
||||
// TODO: Cost should be a factor of the VM size and the host capacity
|
||||
// TODO: Benefit should be a factor of the VM size and the host capacity and the number of VMs on the host
|
||||
final double improvement = preImbalance - postImbalance;
|
||||
final double cost = 0.0;
|
||||
final double benefit = 1.0;
|
||||
return new Ternary<>(improvement, cost, benefit);
|
||||
}
|
||||
|
||||
private Double getMetricValuePostMigration(Long clusterId, Ternary<Long, Long, Long> metrics, long vmMetric,
|
||||
|
|
@ -151,9 +196,26 @@ public interface ClusterDrsAlgorithm extends Adapter {
|
|||
}
|
||||
|
||||
private static Double getImbalance(List<Double> metricList) {
|
||||
Double clusterMeanMetric = getClusterMeanMetric(metricList);
|
||||
Double clusterStandardDeviation = getClusterStandardDeviation(metricList, clusterMeanMetric);
|
||||
return clusterStandardDeviation / clusterMeanMetric;
|
||||
if (CollectionUtils.isEmpty(metricList)) {
|
||||
return 0.0;
|
||||
}
|
||||
// Convert List<Double> to double[] once, avoiding repeated conversions
|
||||
double[] values = new double[metricList.size()];
|
||||
int index = 0;
|
||||
for (Double value : metricList) {
|
||||
if (value != null) {
|
||||
values[index++] = value;
|
||||
}
|
||||
}
|
||||
|
||||
// Trim array if some values were null
|
||||
if (index < values.length) {
|
||||
double[] trimmed = new double[index];
|
||||
System.arraycopy(values, 0, trimmed, 0, index);
|
||||
values = trimmed;
|
||||
}
|
||||
|
||||
return calculateImbalance(values);
|
||||
}
|
||||
|
||||
static String getClusterDrsMetric(long clusterId) {
|
||||
|
|
@ -181,36 +243,6 @@ public interface ClusterDrsAlgorithm extends Adapter {
|
|||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mean is the average of a collection or set of metrics. In context of a DRS
|
||||
* cluster, the cluster metrics defined as the average metrics value for some
|
||||
* metric (such as CPU, memory etc.) for every resource such as host.
|
||||
* Cluster Mean Metric, mavg = (∑mi) / N, where mi is a measurable metric for a
|
||||
* resource ‘i’ in a cluster with total N number of resources.
|
||||
*/
|
||||
static Double getClusterMeanMetric(List<Double> metricList) {
|
||||
return new Mean().evaluate(metricList.stream().mapToDouble(i -> i).toArray());
|
||||
}
|
||||
|
||||
/**
|
||||
* Standard deviation is defined as the square root of the absolute squared sum
|
||||
* of difference of a metric from its mean for every resource divided by the
|
||||
* total number of resources. In context of the DRS, the cluster standard
|
||||
* deviation is the standard deviation based on a metric of resources in a
|
||||
* cluster such as for the allocation or utilisation CPU/memory metric of hosts
|
||||
* in a cluster.
|
||||
* Cluster Standard Deviation, σc = sqrt((∑∣mi−mavg∣^2) / N), where mavg is the
|
||||
* mean metric value and mi is a measurable metric for some resource ‘i’ in the
|
||||
* cluster with total N number of resources.
|
||||
*/
|
||||
static Double getClusterStandardDeviation(List<Double> metricList, Double mean) {
|
||||
if (mean != null) {
|
||||
return new StandardDeviation(false).evaluate(metricList.stream().mapToDouble(i -> i).toArray(), mean);
|
||||
} else {
|
||||
return new StandardDeviation(false).evaluate(metricList.stream().mapToDouble(i -> i).toArray());
|
||||
}
|
||||
}
|
||||
|
||||
static boolean getDrsMetricUseRatio(long clusterId) {
|
||||
return ClusterDrsMetricUseRatio.valueIn(clusterId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,10 +16,15 @@
|
|||
// under the License.
|
||||
package org.apache.cloudstack.config;
|
||||
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.framework.config.Configurable;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
public class ApiServiceConfiguration implements Configurable {
|
||||
protected static Logger LOGGER = LogManager.getLogger(ApiServiceConfiguration.class);
|
||||
public static final ConfigKey<String> ManagementServerAddresses = new ConfigKey<>(String.class, "host", "Advanced", "localhost", "The ip address of management server. This can also accept comma separated addresses.", true, ConfigKey.Scope.Global, null, null, null, null, null, ConfigKey.Kind.CSV, null);
|
||||
public static final ConfigKey<String> ApiServletPath = new ConfigKey<String>("Advanced", String.class, "endpoint.url", "http://localhost:8080/client/api",
|
||||
"API end point. Can be used by CS components/services deployed remotely, for sending CS API requests", true);
|
||||
|
|
@ -29,6 +34,20 @@ public class ApiServiceConfiguration implements Configurable {
|
|||
"true", "Are the source checks on API calls enabled (true) or not (false)? See api.allowed.source.cidr.list", true, ConfigKey.Scope.Global);
|
||||
public static final ConfigKey<String> ApiAllowedSourceCidrList = new ConfigKey<>(String.class, "api.allowed.source.cidr.list", "Advanced",
|
||||
"0.0.0.0/0,::/0", "Comma separated list of IPv4/IPv6 CIDRs from which API calls can be performed. Can be set on Global and Account levels.", true, ConfigKey.Scope.Account, null, null, null, null, null, ConfigKey.Kind.CSV, null);
|
||||
|
||||
|
||||
public static void validateEndpointUrl() {
|
||||
String csUrl = getApiServletPathValue();
|
||||
if (StringUtils.isBlank(csUrl) || StringUtils.containsAny(csUrl, "localhost", "127.0.0.1", "[::1]")) {
|
||||
LOGGER.error("Global setting [{}] cannot contain localhost or be blank. Current value: {}", ApiServletPath.key(), csUrl);
|
||||
throw new InvalidParameterValueException("Unable to complete this operation. Contact your cloud admin.");
|
||||
}
|
||||
}
|
||||
|
||||
public static String getApiServletPathValue() {
|
||||
return ApiServletPath.value();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getConfigComponentName() {
|
||||
return ApiServiceConfiguration.class.getSimpleName();
|
||||
|
|
|
|||
|
|
@ -0,0 +1,95 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.cloudstack.config;
|
||||
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class ApiServiceConfigurationTest {
|
||||
|
||||
private static final String LOCALHOST = "http://localhost";
|
||||
|
||||
private static final String ENDPOINT_URL = "https://acs.clouds.com/client/api";
|
||||
|
||||
private static final String WHITE_SPACE = " ";
|
||||
|
||||
private static final String BLANK_STRING = "";
|
||||
|
||||
private static final String NULL_STRING = null;
|
||||
|
||||
private static final String LOCALHOST_IP = "127.0.0.1";
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void validateEndpointUrlTestIfEndpointUrlContainLocalhostShouldThrowInvalidParameterValueException() {
|
||||
try (MockedStatic<ApiServiceConfiguration> apiServiceConfigurationMockedStatic = Mockito.mockStatic(ApiServiceConfiguration.class)) {
|
||||
apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::getApiServletPathValue).thenReturn(LOCALHOST);
|
||||
apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::validateEndpointUrl).thenCallRealMethod();
|
||||
ApiServiceConfiguration.validateEndpointUrl();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void validateEndpointUrlTestIfEndpointUrlContainLocalhostShouldNotThrowInvalidParameterValueException() {
|
||||
try (MockedStatic<ApiServiceConfiguration> apiServiceConfigurationMockedStatic = Mockito.mockStatic(ApiServiceConfiguration.class)) {
|
||||
apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::getApiServletPathValue).thenReturn(ENDPOINT_URL);
|
||||
apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::validateEndpointUrl).thenCallRealMethod();
|
||||
ApiServiceConfiguration.validateEndpointUrl();
|
||||
}
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void validateEndpointUrlTestIfEndpointUrlIsNullShouldThrowInvalidParameterValueException() {
|
||||
try (MockedStatic<ApiServiceConfiguration> apiServiceConfigurationMockedStatic = Mockito.mockStatic(ApiServiceConfiguration.class)) {
|
||||
apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::getApiServletPathValue).thenReturn(NULL_STRING);
|
||||
apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::validateEndpointUrl).thenCallRealMethod();
|
||||
ApiServiceConfiguration.validateEndpointUrl();
|
||||
}
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void validateEndpointUrlTestIfEndpointUrlIsBlankShouldThrowInvalidParameterValueException() {
|
||||
try (MockedStatic<ApiServiceConfiguration> apiServiceConfigurationMockedStatic = Mockito.mockStatic(ApiServiceConfiguration.class)) {
|
||||
apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::getApiServletPathValue).thenReturn(BLANK_STRING);
|
||||
apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::validateEndpointUrl).thenCallRealMethod();
|
||||
ApiServiceConfiguration.validateEndpointUrl();
|
||||
}
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void validateEndpointUrlTestIfEndpointUrlIsWhiteSpaceShouldThrowInvalidParameterValueException() {
|
||||
try (MockedStatic<ApiServiceConfiguration> apiServiceConfigurationMockedStatic = Mockito.mockStatic(ApiServiceConfiguration.class)) {
|
||||
apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::getApiServletPathValue).thenReturn(WHITE_SPACE);
|
||||
apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::validateEndpointUrl).thenCallRealMethod();
|
||||
ApiServiceConfiguration.validateEndpointUrl();
|
||||
}
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void validateEndpointUrlTestIfEndpointUrlContainLocalhostIpShouldThrowInvalidParameterValueException() {
|
||||
try (MockedStatic<ApiServiceConfiguration> apiServiceConfigurationMockedStatic = Mockito.mockStatic(ApiServiceConfiguration.class)) {
|
||||
apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::getApiServletPathValue).thenReturn(LOCALHOST_IP);
|
||||
apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::validateEndpointUrl).thenCallRealMethod();
|
||||
ApiServiceConfiguration.validateEndpointUrl();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -127,8 +127,12 @@ import com.cloud.utils.component.SystemIntegrityChecker;
|
|||
import com.cloud.utils.crypt.DBEncryptionUtil;
|
||||
import com.cloud.utils.db.GlobalLock;
|
||||
import com.cloud.utils.db.ScriptRunner;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
import com.cloud.utils.db.TransactionCallback;
|
||||
import com.cloud.utils.db.TransactionLegacy;
|
||||
import com.cloud.utils.db.TransactionStatus;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
||||
|
|
@ -255,7 +259,6 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
|||
LOGGER.error("Unable to execute upgrade script", e);
|
||||
throw new CloudRuntimeException("Unable to execute upgrade script", e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
|
|
@ -459,17 +462,51 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
|||
throw new CloudRuntimeException("Unable to acquire lock to check for database integrity.");
|
||||
}
|
||||
|
||||
doUpgrades(lock);
|
||||
} finally {
|
||||
lock.releaseRef();
|
||||
}
|
||||
}
|
||||
|
||||
boolean isStandalone() throws CloudRuntimeException {
|
||||
return Transaction.execute(new TransactionCallback<>() {
|
||||
@Override
|
||||
public Boolean doInTransaction(TransactionStatus status) {
|
||||
String sql = "SELECT COUNT(*) FROM `cloud`.`mshost` WHERE `state` = 'UP'";
|
||||
try (Connection conn = TransactionLegacy.getStandaloneConnection();
|
||||
PreparedStatement pstmt = conn.prepareStatement(sql);
|
||||
ResultSet rs = pstmt.executeQuery()) {
|
||||
if (rs.next()) {
|
||||
int count = rs.getInt(1);
|
||||
return count == 0;
|
||||
}
|
||||
} catch (SQLException e) {
|
||||
String errorMessage = "Unable to check if the management server is running in standalone mode.";
|
||||
LOGGER.error(errorMessage, e);
|
||||
return false;
|
||||
} catch (NullPointerException npe) {
|
||||
String errorMessage = "Unable to check if the management server is running in standalone mode. Not able to get a Database connection.";
|
||||
LOGGER.error(errorMessage, npe);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
protected void doUpgrades(GlobalLock lock) {
|
||||
try {
|
||||
initializeDatabaseEncryptors();
|
||||
|
||||
final CloudStackVersion dbVersion = CloudStackVersion.parse(_dao.getCurrentVersion());
|
||||
final String currentVersionValue = this.getClass().getPackage().getImplementationVersion();
|
||||
final String currentVersionValue = getImplementationVersion();
|
||||
|
||||
if (StringUtils.isBlank(currentVersionValue)) {
|
||||
return;
|
||||
}
|
||||
|
||||
String csVersion = SystemVmTemplateRegistration.parseMetadataFile();
|
||||
String csVersion = parseSystemVmMetadata();
|
||||
final CloudStackVersion sysVmVersion = CloudStackVersion.parse(csVersion);
|
||||
final CloudStackVersion currentVersion = CloudStackVersion.parse(currentVersionValue);
|
||||
SystemVmTemplateRegistration.CS_MAJOR_VERSION = sysVmVersion.getMajorRelease() + "." + sysVmVersion.getMinorRelease();
|
||||
|
|
@ -486,16 +523,40 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
|||
return;
|
||||
}
|
||||
|
||||
if (isStandalone()) {
|
||||
upgrade(dbVersion, currentVersion);
|
||||
} else {
|
||||
String errorMessage = "Database upgrade is required but the management server is running in a clustered environment. " +
|
||||
"Please perform the database upgrade when the management server is not running in a clustered environment.";
|
||||
LOGGER.error(errorMessage);
|
||||
handleClusteredUpgradeRequired(); // allow tests to override behavior
|
||||
}
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
} finally {
|
||||
lock.releaseRef();
|
||||
}
|
||||
}
|
||||
|
||||
private void initializeDatabaseEncryptors() {
|
||||
/**
|
||||
* Hook that is called when an upgrade is required but the management server is clustered.
|
||||
* Default behavior is to exit the JVM, tests can override to throw instead.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
protected void handleClusteredUpgradeRequired() {
|
||||
System.exit(5); // I would prefer ServerDaemon.abort(errorMessage) but that would create a dependency hell
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
protected String getImplementationVersion() {
|
||||
return this.getClass().getPackage().getImplementationVersion();
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
protected String parseSystemVmMetadata() {
|
||||
return SystemVmTemplateRegistration.parseMetadataFile();
|
||||
}
|
||||
|
||||
// Make this protected so tests can noop it out
|
||||
protected void initializeDatabaseEncryptors() {
|
||||
TransactionLegacy txn = TransactionLegacy.open("initializeDatabaseEncryptors");
|
||||
txn.start();
|
||||
String errorMessage = "Unable to get the database connections";
|
||||
|
|
|
|||
|
|
@ -0,0 +1,173 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.upgrade;
|
||||
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import com.cloud.upgrade.dao.VersionDao;
|
||||
import com.cloud.upgrade.dao.VersionDaoImpl;
|
||||
import com.cloud.upgrade.dao.VersionVO;
|
||||
import com.cloud.utils.db.GlobalLock;
|
||||
import org.junit.Test;
|
||||
|
||||
public class DatabaseUpgradeCheckerDoUpgradesTest {
|
||||
|
||||
static class StubVersionDao extends VersionDaoImpl implements VersionDao {
|
||||
private final String currentVersion;
|
||||
|
||||
StubVersionDao(String currentVersion) {
|
||||
this.currentVersion = currentVersion;
|
||||
}
|
||||
|
||||
@Override
|
||||
public VersionVO findByVersion(String version, VersionVO.Step step) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getCurrentVersion() {
|
||||
return currentVersion;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class TestableChecker extends DatabaseUpgradeChecker {
|
||||
boolean initializeCalled = false;
|
||||
boolean upgradeCalled = false;
|
||||
boolean clusterHandlerCalled = false;
|
||||
String implVersionOverride = null;
|
||||
String sysVmMetadataOverride = "4.8.0";
|
||||
boolean standaloneOverride = true;
|
||||
|
||||
TestableChecker(String daoVersion) {
|
||||
// set a stub DAO
|
||||
this._dao = new StubVersionDao(daoVersion);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void initializeDatabaseEncryptors() {
|
||||
initializeCalled = true;
|
||||
// noop instead of doing DB work
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getImplementationVersion() {
|
||||
return implVersionOverride;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String parseSystemVmMetadata() {
|
||||
return sysVmMetadataOverride;
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean isStandalone() {
|
||||
return standaloneOverride;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void upgrade(org.apache.cloudstack.utils.CloudStackVersion dbVersion, org.apache.cloudstack.utils.CloudStackVersion currentVersion) {
|
||||
upgradeCalled = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void handleClusteredUpgradeRequired() {
|
||||
clusterHandlerCalled = true;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDoUpgrades_noImplementationVersion_returnsEarly() {
|
||||
TestableChecker checker = new TestableChecker("4.8.0");
|
||||
checker.implVersionOverride = ""; // blank -> should return early
|
||||
|
||||
GlobalLock lock = GlobalLock.getInternLock("test-noimpl");
|
||||
try {
|
||||
// acquire lock so doUpgrades can safely call unlock in finally
|
||||
lock.lock(1);
|
||||
checker.doUpgrades(lock);
|
||||
} finally {
|
||||
// ensure lock released if still held
|
||||
lock.releaseRef();
|
||||
}
|
||||
|
||||
assertTrue("initializeDatabaseEncryptors should be called before returning", checker.initializeCalled);
|
||||
assertFalse("upgrade should not be called when implementation version is blank", checker.upgradeCalled);
|
||||
assertFalse("cluster handler should not be called", checker.clusterHandlerCalled);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDoUpgrades_dbUpToDate_noUpgrade() {
|
||||
// DB version = code version -> no upgrade
|
||||
TestableChecker checker = new TestableChecker("4.8.1");
|
||||
checker.implVersionOverride = "4.8.1";
|
||||
checker.sysVmMetadataOverride = "4.8.1";
|
||||
|
||||
GlobalLock lock = GlobalLock.getInternLock("test-uptodate");
|
||||
try {
|
||||
lock.lock(1);
|
||||
checker.doUpgrades(lock);
|
||||
} finally {
|
||||
lock.releaseRef();
|
||||
}
|
||||
|
||||
assertTrue(checker.initializeCalled);
|
||||
assertFalse(checker.upgradeCalled);
|
||||
assertFalse(checker.clusterHandlerCalled);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDoUpgrades_requiresUpgrade_standalone_invokesUpgrade() {
|
||||
TestableChecker checker = new TestableChecker("4.8.0");
|
||||
checker.implVersionOverride = "4.8.2"; // code is newer than DB
|
||||
checker.sysVmMetadataOverride = "4.8.2";
|
||||
checker.standaloneOverride = true;
|
||||
|
||||
GlobalLock lock = GlobalLock.getInternLock("test-upgrade-standalone");
|
||||
try {
|
||||
lock.lock(1);
|
||||
checker.doUpgrades(lock);
|
||||
} finally {
|
||||
lock.releaseRef();
|
||||
}
|
||||
|
||||
assertTrue(checker.initializeCalled);
|
||||
assertTrue("upgrade should be invoked in standalone mode", checker.upgradeCalled);
|
||||
assertFalse(checker.clusterHandlerCalled);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDoUpgrades_requiresUpgrade_clustered_invokesHandler() {
|
||||
TestableChecker checker = new TestableChecker("4.8.0");
|
||||
checker.implVersionOverride = "4.8.2"; // code is newer than DB
|
||||
checker.sysVmMetadataOverride = "4.8.2";
|
||||
checker.standaloneOverride = false;
|
||||
|
||||
GlobalLock lock = GlobalLock.getInternLock("test-upgrade-clustered");
|
||||
try {
|
||||
lock.lock(1);
|
||||
checker.doUpgrades(lock);
|
||||
} finally {
|
||||
lock.releaseRef();
|
||||
}
|
||||
|
||||
assertTrue(checker.initializeCalled);
|
||||
assertFalse("upgrade should not be invoked in clustered mode", checker.upgradeCalled);
|
||||
assertTrue("cluster handler should be invoked in clustered mode", checker.clusterHandlerCalled);
|
||||
}
|
||||
}
|
||||
|
|
@ -16,14 +16,24 @@
|
|||
// under the License.
|
||||
package com.cloud.upgrade;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import java.sql.SQLException;
|
||||
import java.lang.reflect.Field;
|
||||
import java.sql.Connection;
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.ResultSet;
|
||||
|
||||
import java.util.Arrays;
|
||||
import javax.sql.DataSource;
|
||||
|
||||
import org.apache.cloudstack.utils.CloudStackVersion;
|
||||
import org.junit.Test;
|
||||
import org.junit.Before;
|
||||
import org.junit.After;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.mockito.ArgumentMatchers;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import com.cloud.upgrade.DatabaseUpgradeChecker.NoopDbUpgrade;
|
||||
import com.cloud.upgrade.dao.DbUpgrade;
|
||||
|
|
@ -43,8 +53,51 @@ import com.cloud.upgrade.dao.Upgrade471to480;
|
|||
import com.cloud.upgrade.dao.Upgrade480to481;
|
||||
import com.cloud.upgrade.dao.Upgrade490to4910;
|
||||
|
||||
import com.cloud.utils.db.TransactionLegacy;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class DatabaseUpgradeCheckerTest {
|
||||
|
||||
@Mock
|
||||
DataSource dataSource;
|
||||
|
||||
@Mock
|
||||
Connection connection;
|
||||
|
||||
@Mock
|
||||
PreparedStatement preparedStatement;
|
||||
|
||||
@Mock
|
||||
ResultSet resultSet;
|
||||
|
||||
private DataSource backupDataSource;
|
||||
|
||||
@Before
|
||||
public void setup() throws Exception {
|
||||
Field dsField = TransactionLegacy.class.getDeclaredField("s_ds");
|
||||
dsField.setAccessible(true);
|
||||
backupDataSource = (DataSource) dsField.get(null);
|
||||
dsField.set(null, dataSource);
|
||||
|
||||
Mockito.when(dataSource.getConnection()).thenReturn(connection);
|
||||
Mockito.when(connection.prepareStatement(ArgumentMatchers.anyString())).thenReturn(preparedStatement);
|
||||
Mockito.when(preparedStatement.executeQuery()).thenReturn(resultSet);
|
||||
}
|
||||
|
||||
@After
|
||||
public void cleanup() throws Exception {
|
||||
Field dsField = TransactionLegacy.class.getDeclaredField("s_ds");
|
||||
dsField.setAccessible(true);
|
||||
dsField.set(null, backupDataSource);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCalculateUpgradePath480to481() {
|
||||
|
||||
|
|
@ -79,7 +132,7 @@ public class DatabaseUpgradeCheckerTest {
|
|||
assertTrue(upgrades.length >= 1);
|
||||
assertTrue(upgrades[0] instanceof Upgrade490to4910);
|
||||
|
||||
assertTrue(Arrays.equals(new String[] {"4.9.0", currentVersion.toString()}, upgrades[0].getUpgradableVersionRange()));
|
||||
assertArrayEquals(new String[]{"4.9.0", currentVersion.toString()}, upgrades[0].getUpgradableVersionRange());
|
||||
assertEquals(currentVersion.toString(), upgrades[0].getUpgradedVersion());
|
||||
|
||||
}
|
||||
|
|
@ -104,7 +157,7 @@ public class DatabaseUpgradeCheckerTest {
|
|||
assertTrue(upgrades[3] instanceof Upgrade41120to41130);
|
||||
assertTrue(upgrades[4] instanceof Upgrade41120to41200);
|
||||
|
||||
assertTrue(Arrays.equals(new String[] {"4.11.0.0", "4.11.1.0"}, upgrades[1].getUpgradableVersionRange()));
|
||||
assertArrayEquals(new String[]{"4.11.0.0", "4.11.1.0"}, upgrades[1].getUpgradableVersionRange());
|
||||
assertEquals(currentVersion.toString(), upgrades[4].getUpgradedVersion());
|
||||
|
||||
}
|
||||
|
|
@ -151,12 +204,12 @@ public class DatabaseUpgradeCheckerTest {
|
|||
assertTrue(upgrades[5] instanceof Upgrade471to480);
|
||||
assertTrue(upgrades[6] instanceof Upgrade480to481);
|
||||
|
||||
assertTrue(Arrays.equals(new String[] {"4.8.1", currentVersion.toString()}, upgrades[upgrades.length - 1].getUpgradableVersionRange()));
|
||||
assertArrayEquals(new String[]{"4.8.1", currentVersion.toString()}, upgrades[upgrades.length - 1].getUpgradableVersionRange());
|
||||
assertEquals(currentVersion.toString(), upgrades[upgrades.length - 1].getUpgradedVersion());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCalculateUpgradePathUnkownDbVersion() {
|
||||
public void testCalculateUpgradePathUnknownDbVersion() {
|
||||
|
||||
final CloudStackVersion dbVersion = CloudStackVersion.parse("4.99.0.0");
|
||||
assertNotNull(dbVersion);
|
||||
|
|
@ -173,7 +226,7 @@ public class DatabaseUpgradeCheckerTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testCalculateUpgradePathFromKownDbVersion() {
|
||||
public void testCalculateUpgradePathFromKnownDbVersion() {
|
||||
|
||||
final CloudStackVersion dbVersion = CloudStackVersion.parse("4.17.0.0");
|
||||
assertNotNull(dbVersion);
|
||||
|
|
@ -306,4 +359,25 @@ public class DatabaseUpgradeCheckerTest {
|
|||
assertEquals(upgrades.length + 1, upgradesFromSecurityReleaseToNext.length);
|
||||
assertTrue(upgradesFromSecurityReleaseToNext[upgradesFromSecurityReleaseToNext.length - 1] instanceof NoopDbUpgrade);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void isStandalone() throws SQLException {
|
||||
// simulate zero 'UP' hosts -> standalone
|
||||
Mockito.when(resultSet.next()).thenReturn(true);
|
||||
Mockito.when(resultSet.getInt(1)).thenReturn(0);
|
||||
|
||||
final DatabaseUpgradeChecker checker = new DatabaseUpgradeChecker();
|
||||
assertTrue("DatabaseUpgradeChecker should be a standalone component", checker.isStandalone());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void isNotStandalone() throws SQLException {
|
||||
// simulate at least one 'UP' host -> not standalone
|
||||
Mockito.when(resultSet.next()).thenReturn(true);
|
||||
Mockito.when(resultSet.getInt(1)).thenReturn(1);
|
||||
|
||||
final DatabaseUpgradeChecker checker = new DatabaseUpgradeChecker();
|
||||
assertFalse("DatabaseUpgradeChecker should not be a standalone component", checker.isStandalone());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -68,23 +68,26 @@ public class Balanced extends AdapterBase implements ClusterDrsAlgorithm {
|
|||
return "balanced";
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Ternary<Double, Double, Double> getMetrics(Cluster cluster, VirtualMachine vm,
|
||||
ServiceOffering serviceOffering, Host destHost,
|
||||
Map<Long, Ternary<Long, Long, Long>> hostCpuMap, Map<Long, Ternary<Long, Long, Long>> hostMemoryMap,
|
||||
Boolean requiresStorageMotion) throws ConfigurationException {
|
||||
Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(cluster.getId(), new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()), null);
|
||||
Double postImbalance = getImbalancePostMigration(serviceOffering, vm, destHost, hostCpuMap, hostMemoryMap);
|
||||
Boolean requiresStorageMotion, Double preImbalance,
|
||||
double[] baseMetricsArray, Map<Long, Integer> hostIdToIndexMap) throws ConfigurationException {
|
||||
// Use provided pre-imbalance if available, otherwise calculate it
|
||||
if (preImbalance == null) {
|
||||
preImbalance = ClusterDrsAlgorithm.getClusterImbalance(cluster.getId(), new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()), null);
|
||||
}
|
||||
|
||||
logger.debug("Cluster {} pre-imbalance: {} post-imbalance: {} Algorithm: {} VM: {} srcHost: {} destHost: {}",
|
||||
// Use optimized post-imbalance calculation that adjusts only affected hosts
|
||||
Double postImbalance = getImbalancePostMigration(vm, destHost,
|
||||
cluster.getId(), ClusterDrsAlgorithm.getVmMetric(serviceOffering, cluster.getId()),
|
||||
baseMetricsArray, hostIdToIndexMap, hostCpuMap, hostMemoryMap);
|
||||
|
||||
logger.trace("Cluster {} pre-imbalance: {} post-imbalance: {} Algorithm: {} VM: {} srcHost ID: {} destHost: {}",
|
||||
cluster, preImbalance, postImbalance, getName(), vm, vm.getHostId(), destHost);
|
||||
|
||||
// This needs more research to determine the cost and benefit of a migration
|
||||
// TODO: Cost should be a factor of the VM size and the host capacity
|
||||
// TODO: Benefit should be a factor of the VM size and the host capacity and the number of VMs on the host
|
||||
final double improvement = preImbalance - postImbalance;
|
||||
final double cost = 0.0;
|
||||
final double benefit = 1.0;
|
||||
return new Ternary<>(improvement, cost, benefit);
|
||||
return calculateMetricsFromImbalances(preImbalance, postImbalance);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,6 +43,9 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getClusterDrsMetric;
|
||||
import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getClusterImbalance;
|
||||
import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getMetricValue;
|
||||
import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsImbalanceThreshold;
|
||||
import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsMetric;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
|
@ -119,6 +122,48 @@ public class BalancedTest {
|
|||
closeable.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to prepare metrics data for getMetrics calls with optimized signature.
|
||||
* Calculates pre-imbalance and builds baseMetricsArray and hostIdToIndexMap.
|
||||
*
|
||||
* @return a Ternary containing preImbalance, baseMetricsArray, and hostIdToIndexMap
|
||||
*/
|
||||
private Ternary<Double, double[], Map<Long, Integer>> prepareMetricsData() throws ConfigurationException {
|
||||
// Calculate pre-imbalance
|
||||
Double preImbalance = getClusterImbalance(clusterId, new ArrayList<>(hostCpuFreeMap.values()),
|
||||
new ArrayList<>(hostMemoryFreeMap.values()), null);
|
||||
|
||||
// Build baseMetricsArray and hostIdToIndexMap
|
||||
String metricType = getClusterDrsMetric(clusterId);
|
||||
Map<Long, Ternary<Long, Long, Long>> baseMetricsMap = "cpu".equals(metricType) ? hostCpuFreeMap : hostMemoryFreeMap;
|
||||
double[] baseMetricsArray = new double[baseMetricsMap.size()];
|
||||
Map<Long, Integer> hostIdToIndexMap = new HashMap<>();
|
||||
|
||||
int index = 0;
|
||||
for (Map.Entry<Long, Ternary<Long, Long, Long>> entry : baseMetricsMap.entrySet()) {
|
||||
Long hostId = entry.getKey();
|
||||
Ternary<Long, Long, Long> metrics = entry.getValue();
|
||||
long used = metrics.first();
|
||||
long actualTotal = metrics.third() - metrics.second();
|
||||
long free = actualTotal - metrics.first();
|
||||
Double metricValue = getMetricValue(clusterId, used, free, actualTotal, null);
|
||||
if (metricValue != null) {
|
||||
baseMetricsArray[index] = metricValue;
|
||||
hostIdToIndexMap.put(hostId, index);
|
||||
index++;
|
||||
}
|
||||
}
|
||||
|
||||
// Trim array if some values were null
|
||||
if (index < baseMetricsArray.length) {
|
||||
double[] trimmed = new double[index];
|
||||
System.arraycopy(baseMetricsArray, 0, trimmed, 0, index);
|
||||
baseMetricsArray = trimmed;
|
||||
}
|
||||
|
||||
return new Ternary<>(preImbalance, baseMetricsArray, hostIdToIndexMap);
|
||||
}
|
||||
|
||||
/**
|
||||
* <b>needsDrs tests</b>
|
||||
* <p>Scenarios to test for needsDrs
|
||||
|
|
@ -183,8 +228,14 @@ public class BalancedTest {
|
|||
@Test
|
||||
public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessException, ConfigurationException {
|
||||
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu");
|
||||
|
||||
Ternary<Double, double[], Map<Long, Integer>> metricsData = prepareMetricsData();
|
||||
Double preImbalance = metricsData.first();
|
||||
double[] baseMetricsArray = metricsData.second();
|
||||
Map<Long, Integer> hostIdToIndexMap = metricsData.third();
|
||||
|
||||
Ternary<Double, Double, Double> result = balanced.getMetrics(cluster, vm3, serviceOffering, destHost,
|
||||
hostCpuFreeMap, hostMemoryFreeMap, false);
|
||||
hostCpuFreeMap, hostMemoryFreeMap, false, preImbalance, baseMetricsArray, hostIdToIndexMap);
|
||||
assertEquals(0.0, result.first(), 0.01);
|
||||
assertEquals(0.0, result.second(), 0.0);
|
||||
assertEquals(1.0, result.third(), 0.0);
|
||||
|
|
@ -197,8 +248,14 @@ public class BalancedTest {
|
|||
@Test
|
||||
public void getMetricsWithMemory() throws NoSuchFieldException, IllegalAccessException, ConfigurationException {
|
||||
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory");
|
||||
|
||||
Ternary<Double, double[], Map<Long, Integer>> metricsData = prepareMetricsData();
|
||||
Double preImbalance = metricsData.first();
|
||||
double[] baseMetricsArray = metricsData.second();
|
||||
Map<Long, Integer> hostIdToIndexMap = metricsData.third();
|
||||
|
||||
Ternary<Double, Double, Double> result = balanced.getMetrics(cluster, vm3, serviceOffering, destHost,
|
||||
hostCpuFreeMap, hostMemoryFreeMap, false);
|
||||
hostCpuFreeMap, hostMemoryFreeMap, false, preImbalance, baseMetricsArray, hostIdToIndexMap);
|
||||
assertEquals(0.4, result.first(), 0.01);
|
||||
assertEquals(0, result.second(), 0.0);
|
||||
assertEquals(1, result.third(), 0.0);
|
||||
|
|
|
|||
|
|
@ -75,20 +75,22 @@ public class Condensed extends AdapterBase implements ClusterDrsAlgorithm {
|
|||
public Ternary<Double, Double, Double> getMetrics(Cluster cluster, VirtualMachine vm,
|
||||
ServiceOffering serviceOffering, Host destHost,
|
||||
Map<Long, Ternary<Long, Long, Long>> hostCpuMap, Map<Long, Ternary<Long, Long, Long>> hostMemoryMap,
|
||||
Boolean requiresStorageMotion) throws ConfigurationException {
|
||||
Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(cluster.getId(), new ArrayList<>(hostCpuMap.values()),
|
||||
Boolean requiresStorageMotion, Double preImbalance,
|
||||
double[] baseMetricsArray, Map<Long, Integer> hostIdToIndexMap) throws ConfigurationException {
|
||||
// Use provided pre-imbalance if available, otherwise calculate it
|
||||
if (preImbalance == null) {
|
||||
preImbalance = ClusterDrsAlgorithm.getClusterImbalance(cluster.getId(), new ArrayList<>(hostCpuMap.values()),
|
||||
new ArrayList<>(hostMemoryMap.values()), null);
|
||||
Double postImbalance = getImbalancePostMigration(serviceOffering, vm, destHost, hostCpuMap, hostMemoryMap);
|
||||
}
|
||||
|
||||
logger.debug("Cluster {} pre-imbalance: {} post-imbalance: {} Algorithm: {} VM: {} srcHost: {} destHost: {}",
|
||||
// Use optimized post-imbalance calculation that adjusts only affected hosts
|
||||
Double postImbalance = getImbalancePostMigration(vm, destHost,
|
||||
cluster.getId(), ClusterDrsAlgorithm.getVmMetric(serviceOffering, cluster.getId()),
|
||||
baseMetricsArray, hostIdToIndexMap, hostCpuMap, hostMemoryMap);
|
||||
|
||||
logger.trace("Cluster {} pre-imbalance: {} post-imbalance: {} Algorithm: {} VM: {} srcHost ID: {} destHost: {}",
|
||||
cluster, preImbalance, postImbalance, getName(), vm, vm.getHostId(), destHost);
|
||||
|
||||
// This needs more research to determine the cost and benefit of a migration
|
||||
// TODO: Cost should be a factor of the VM size and the host capacity
|
||||
// TODO: Benefit should be a factor of the VM size and the host capacity and the number of VMs on the host
|
||||
final double improvement = postImbalance - preImbalance;
|
||||
final double cost = 0;
|
||||
final double benefit = 1;
|
||||
return new Ternary<>(improvement, cost, benefit);
|
||||
return calculateMetricsFromImbalances(postImbalance, preImbalance);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,6 +43,9 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getClusterDrsMetric;
|
||||
import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getClusterImbalance;
|
||||
import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getMetricValue;
|
||||
import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsImbalanceThreshold;
|
||||
import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsMetric;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
|
@ -121,6 +124,48 @@ public class CondensedTest {
|
|||
closeable.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to prepare metrics data for getMetrics calls with optimized signature.
|
||||
* Calculates pre-imbalance and builds baseMetricsArray and hostIdToIndexMap.
|
||||
*
|
||||
* @return a Ternary containing preImbalance, baseMetricsArray, and hostIdToIndexMap
|
||||
*/
|
||||
private Ternary<Double, double[], Map<Long, Integer>> prepareMetricsData() throws ConfigurationException {
|
||||
// Calculate pre-imbalance
|
||||
Double preImbalance = getClusterImbalance(clusterId, new ArrayList<>(hostCpuFreeMap.values()),
|
||||
new ArrayList<>(hostMemoryFreeMap.values()), null);
|
||||
|
||||
// Build baseMetricsArray and hostIdToIndexMap
|
||||
String metricType = getClusterDrsMetric(clusterId);
|
||||
Map<Long, Ternary<Long, Long, Long>> baseMetricsMap = "cpu".equals(metricType) ? hostCpuFreeMap : hostMemoryFreeMap;
|
||||
double[] baseMetricsArray = new double[baseMetricsMap.size()];
|
||||
Map<Long, Integer> hostIdToIndexMap = new HashMap<>();
|
||||
|
||||
int index = 0;
|
||||
for (Map.Entry<Long, Ternary<Long, Long, Long>> entry : baseMetricsMap.entrySet()) {
|
||||
Long hostId = entry.getKey();
|
||||
Ternary<Long, Long, Long> metrics = entry.getValue();
|
||||
long used = metrics.first();
|
||||
long actualTotal = metrics.third() - metrics.second();
|
||||
long free = actualTotal - metrics.first();
|
||||
Double metricValue = getMetricValue(clusterId, used, free, actualTotal, null);
|
||||
if (metricValue != null) {
|
||||
baseMetricsArray[index] = metricValue;
|
||||
hostIdToIndexMap.put(hostId, index);
|
||||
index++;
|
||||
}
|
||||
}
|
||||
|
||||
// Trim array if some values were null
|
||||
if (index < baseMetricsArray.length) {
|
||||
double[] trimmed = new double[index];
|
||||
System.arraycopy(baseMetricsArray, 0, trimmed, 0, index);
|
||||
baseMetricsArray = trimmed;
|
||||
}
|
||||
|
||||
return new Ternary<>(preImbalance, baseMetricsArray, hostIdToIndexMap);
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>needsDrs tests
|
||||
* <p>Scenarios to test for needsDrs
|
||||
|
|
@ -185,8 +230,14 @@ public class CondensedTest {
|
|||
@Test
|
||||
public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessException, ConfigurationException {
|
||||
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu");
|
||||
|
||||
Ternary<Double, double[], Map<Long, Integer>> metricsData = prepareMetricsData();
|
||||
Double preImbalance = metricsData.first();
|
||||
double[] baseMetricsArray = metricsData.second();
|
||||
Map<Long, Integer> hostIdToIndexMap = metricsData.third();
|
||||
|
||||
Ternary<Double, Double, Double> result = condensed.getMetrics(cluster, vm3, serviceOffering, destHost,
|
||||
hostCpuFreeMap, hostMemoryFreeMap, false);
|
||||
hostCpuFreeMap, hostMemoryFreeMap, false, preImbalance, baseMetricsArray, hostIdToIndexMap);
|
||||
assertEquals(0.0, result.first(), 0.0);
|
||||
assertEquals(0, result.second(), 0.0);
|
||||
assertEquals(1, result.third(), 0.0);
|
||||
|
|
@ -199,8 +250,14 @@ public class CondensedTest {
|
|||
@Test
|
||||
public void getMetricsWithMemory() throws NoSuchFieldException, IllegalAccessException, ConfigurationException {
|
||||
overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory");
|
||||
|
||||
Ternary<Double, double[], Map<Long, Integer>> metricsData = prepareMetricsData();
|
||||
Double preImbalance = metricsData.first();
|
||||
double[] baseMetricsArray = metricsData.second();
|
||||
Map<Long, Integer> hostIdToIndexMap = metricsData.third();
|
||||
|
||||
Ternary<Double, Double, Double> result = condensed.getMetrics(cluster, vm3, serviceOffering, destHost,
|
||||
hostCpuFreeMap, hostMemoryFreeMap, false);
|
||||
hostCpuFreeMap, hostMemoryFreeMap, false, preImbalance, baseMetricsArray, hostIdToIndexMap);
|
||||
assertEquals(-0.4, result.first(), 0.01);
|
||||
assertEquals(0, result.second(), 0.0);
|
||||
assertEquals(1, result.third(), 0.0);
|
||||
|
|
|
|||
|
|
@ -47,6 +47,8 @@ import java.util.stream.Collectors;
|
|||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.configuration.Resource;
|
||||
import com.cloud.user.ResourceLimitService;
|
||||
import org.apache.cloudstack.acl.ControlledEntity;
|
||||
import org.apache.cloudstack.acl.Role;
|
||||
import org.apache.cloudstack.acl.RolePermissionEntity;
|
||||
|
|
@ -398,6 +400,8 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||
public ProjectManager projectManager;
|
||||
@Inject
|
||||
RoleService roleService;
|
||||
@Inject
|
||||
ResourceLimitService resourceLimitService;
|
||||
|
||||
private void logMessage(final Level logLevel, final String message, final Exception e) {
|
||||
if (logLevel == Level.WARN) {
|
||||
|
|
@ -905,15 +909,6 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||
return response;
|
||||
}
|
||||
|
||||
private void validateEndpointUrl() {
|
||||
String csUrl = ApiServiceConfiguration.ApiServletPath.value();
|
||||
if (csUrl == null || csUrl.contains("localhost")) {
|
||||
String error = String.format("Global setting %s has to be set to the Management Server's API end point",
|
||||
ApiServiceConfiguration.ApiServletPath.key());
|
||||
throw new InvalidParameterValueException(error);
|
||||
}
|
||||
}
|
||||
|
||||
private DataCenter validateAndGetZoneForKubernetesCreateParameters(Long zoneId, Long networkId) {
|
||||
DataCenter zone = dataCenterDao.findById(zoneId);
|
||||
if (zone == null) {
|
||||
|
|
@ -1008,7 +1003,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||
}
|
||||
|
||||
private void validateManagedKubernetesClusterCreateParameters(final CreateKubernetesClusterCmd cmd) throws CloudRuntimeException {
|
||||
validateEndpointUrl();
|
||||
ApiServiceConfiguration.validateEndpointUrl();
|
||||
final String name = cmd.getName();
|
||||
final Long zoneId = cmd.getZoneId();
|
||||
final Long kubernetesVersionId = cmd.getKubernetesVersionId();
|
||||
|
|
@ -1308,7 +1303,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||
KubernetesVersionManagerImpl.MINIMUN_AUTOSCALER_SUPPORTED_VERSION ));
|
||||
}
|
||||
|
||||
validateEndpointUrl();
|
||||
ApiServiceConfiguration.validateEndpointUrl();
|
||||
|
||||
if (minSize == null || maxSize == null) {
|
||||
throw new InvalidParameterValueException("Autoscaling requires minsize and maxsize to be passed");
|
||||
|
|
@ -1350,8 +1345,58 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||
validateServiceOfferingsForNodeTypesScale(serviceOfferingNodeTypeMap, defaultServiceOfferingId, kubernetesCluster, clusterVersion);
|
||||
|
||||
validateKubernetesClusterScaleSize(kubernetesCluster, clusterSize, maxClusterSize, zone);
|
||||
|
||||
ensureResourceLimitsForScale(kubernetesCluster, serviceOfferingNodeTypeMap,
|
||||
clusterSize != null ? clusterSize : null,
|
||||
kubernetesCluster.getAccountId());
|
||||
}
|
||||
|
||||
protected void ensureResourceLimitsForScale(final KubernetesClusterVO cluster,
|
||||
final Map<String, Long> requestedServiceOfferingIds,
|
||||
final Long targetNodeCounts,
|
||||
final Long accountId) {
|
||||
|
||||
long totalAdditionalVms = 0L;
|
||||
long totalAdditionalCpuUnits = 0L;
|
||||
long totalAdditionalRamMb = 0L;
|
||||
|
||||
|
||||
List<KubernetesClusterVmMapVO> clusterVmMapVOS = kubernetesClusterVmMapDao.listByClusterIdAndVmType(cluster.getId(), WORKER);
|
||||
long currentCount = clusterVmMapVOS != null ? clusterVmMapVOS.size() : 0L;
|
||||
long desiredCount = targetNodeCounts != null ? targetNodeCounts : currentCount;
|
||||
long additional = Math.max(0L, desiredCount - currentCount);
|
||||
if (additional == 0L) {
|
||||
return;
|
||||
}
|
||||
|
||||
Long offeringId = (requestedServiceOfferingIds != null && requestedServiceOfferingIds.containsKey(WORKER.name())) ?
|
||||
requestedServiceOfferingIds.get(WORKER.name()) :
|
||||
getExistingServiceOfferingIdForNodeType(WORKER.name(), cluster);
|
||||
|
||||
if (offeringId == null) {
|
||||
offeringId = cluster.getServiceOfferingId();
|
||||
}
|
||||
|
||||
ServiceOffering so = serviceOfferingDao.findById(offeringId);
|
||||
if (so == null) {
|
||||
throw new InvalidParameterValueException(String.format("Invalid service offering for node type %s", WORKER.name()));
|
||||
}
|
||||
|
||||
totalAdditionalVms += additional;
|
||||
long effectiveCpu = (long) so.getCpu() * so.getSpeed();
|
||||
totalAdditionalCpuUnits += effectiveCpu * additional;
|
||||
totalAdditionalRamMb += so.getRamSize() * additional;
|
||||
|
||||
try {
|
||||
resourceLimitService.checkResourceLimit(accountDao.findById(accountId), Resource.ResourceType.user_vm, totalAdditionalVms);
|
||||
resourceLimitService.checkResourceLimit(accountDao.findById(accountId), Resource.ResourceType.cpu, totalAdditionalCpuUnits);
|
||||
resourceLimitService.checkResourceLimit(accountDao.findById(accountId), Resource.ResourceType.memory, totalAdditionalRamMb);
|
||||
} catch (Exception e) {
|
||||
throw new CloudRuntimeException("Resource limits prevent scaling the cluster: " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
protected void validateServiceOfferingsForNodeTypesScale(Map<String, Long> map, Long defaultServiceOfferingId, KubernetesClusterVO kubernetesCluster, KubernetesSupportedVersion clusterVersion) {
|
||||
for (String key : CLUSTER_NODES_TYPES_LIST) {
|
||||
Long serviceOfferingId = map.getOrDefault(key, defaultServiceOfferingId);
|
||||
|
|
@ -1413,7 +1458,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||
|
||||
private void validateKubernetesClusterUpgradeParameters(UpgradeKubernetesClusterCmd cmd) {
|
||||
// Validate parameters
|
||||
validateEndpointUrl();
|
||||
ApiServiceConfiguration.validateEndpointUrl();
|
||||
|
||||
final Long kubernetesClusterId = cmd.getId();
|
||||
final Long upgradeVersionId = cmd.getKubernetesVersionId();
|
||||
|
|
|
|||
|
|
@ -685,7 +685,7 @@ public class KubernetesClusterActionWorker {
|
|||
|
||||
try {
|
||||
String command = String.format("sudo %s/%s -u '%s' -k '%s' -s '%s'",
|
||||
scriptPath, deploySecretsScriptFilename, ApiServiceConfiguration.ApiServletPath.value(), keys[0], keys[1]);
|
||||
scriptPath, deploySecretsScriptFilename, ApiServiceConfiguration.getApiServletPathValue(), keys[0], keys[1]);
|
||||
Account account = accountDao.findById(kubernetesCluster.getAccountId());
|
||||
if (account != null && account.getType() == Account.Type.PROJECT) {
|
||||
String projectId = projectService.findByProjectAccountId(account.getId()).getUuid();
|
||||
|
|
|
|||
|
|
@ -520,7 +520,6 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage
|
|||
|
||||
String apiKey = user.getApiKey();
|
||||
String secretKey = user.getSecretKey();
|
||||
String csUrl = ApiServiceConfiguration.ApiServletPath.value();
|
||||
|
||||
if (apiKey == null) {
|
||||
throw new InvalidParameterValueException("apiKey for user: " + user.getUsername() + " is empty. Please generate it");
|
||||
|
|
@ -530,9 +529,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage
|
|||
throw new InvalidParameterValueException("secretKey for user: " + user.getUsername() + " is empty. Please generate it");
|
||||
}
|
||||
|
||||
if (csUrl == null || csUrl.contains("localhost")) {
|
||||
throw new InvalidParameterValueException(String.format("Global setting %s has to be set to the Management Server's API end point", ApiServiceConfiguration.ApiServletPath.key()));
|
||||
}
|
||||
ApiServiceConfiguration.validateEndpointUrl();
|
||||
}
|
||||
@Override
|
||||
@ActionEvent(eventType = EventTypes.EVENT_AUTOSCALEVMPROFILE_CREATE, eventDescription = "creating autoscale vm profile", create = true)
|
||||
|
|
|
|||
|
|
@ -337,7 +337,6 @@ public class LoadBalancingRulesManagerImpl<Type> extends ManagerBase implements
|
|||
|
||||
String apiKey = null;
|
||||
String secretKey = null;
|
||||
String csUrl = ApiServiceConfiguration.ApiServletPath.value();
|
||||
Network.Provider provider = getLoadBalancerServiceProvider(lb);
|
||||
if (Network.Provider.Netscaler.equals(provider)) {
|
||||
Long autoscaleUserId = autoScaleVmProfile.getAutoScaleUserId();
|
||||
|
|
@ -358,13 +357,12 @@ public class LoadBalancingRulesManagerImpl<Type> extends ManagerBase implements
|
|||
throw new InvalidParameterValueException("secretKey for user: " + user.getUsername() + " is empty. Please generate it");
|
||||
}
|
||||
|
||||
if (csUrl == null || csUrl.contains("localhost")) {
|
||||
throw new InvalidParameterValueException(String.format("Global setting %s has to be set to the Management Server's API end point", ApiServiceConfiguration.ApiServletPath.key()));
|
||||
}
|
||||
ApiServiceConfiguration.validateEndpointUrl();
|
||||
}
|
||||
|
||||
LbAutoScaleVmProfile lbAutoScaleVmProfile =
|
||||
new LbAutoScaleVmProfile(autoScaleVmProfile, apiKey, secretKey, csUrl, zoneId, domainId, serviceOfferingId, templateId, vmName, lbNetworkUuid);
|
||||
new LbAutoScaleVmProfile(autoScaleVmProfile, apiKey, secretKey, ApiServiceConfiguration.getApiServletPathValue(), zoneId, domainId, serviceOfferingId, templateId,
|
||||
vmName, lbNetworkUuid);
|
||||
return new LbAutoScaleVmGroup(vmGroup, autoScalePolicies, lbAutoScaleVmProfile, currentState);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -716,6 +716,7 @@ import com.cloud.dc.dao.PodVlanMapDao;
|
|||
import com.cloud.dc.dao.VlanDao;
|
||||
import com.cloud.dc.dao.VlanDetailsDao;
|
||||
import com.cloud.deploy.DataCenterDeployment;
|
||||
import com.cloud.deploy.DeploymentPlan;
|
||||
import com.cloud.deploy.DeploymentPlanner;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.deploy.DeploymentPlanningManager;
|
||||
|
|
@ -962,7 +963,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
@Inject
|
||||
private HostPodDao _hostPodDao;
|
||||
@Inject
|
||||
private VgpuProfileDao vgpuProfileDao;
|
||||
VgpuProfileDao vgpuProfileDao;
|
||||
@Inject
|
||||
private VMInstanceDao _vmInstanceDao;
|
||||
@Inject
|
||||
|
|
@ -1484,17 +1485,27 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> listHostsForMigrationOfVM(final VirtualMachine vm, final Long startIndex, final Long pageSize,
|
||||
final String keyword, List<VirtualMachine> vmList) {
|
||||
|
||||
validateVmForHostMigration(vm);
|
||||
/**
|
||||
* Get technically compatible hosts for VM migration (storage, hypervisor, UEFI filtering).
|
||||
* This determines which hosts are technically capable of hosting the VM based on
|
||||
* storage requirements, hypervisor capabilities, and UEFI requirements.
|
||||
*
|
||||
* @param vm The virtual machine to migrate
|
||||
* @param startIndex Starting index for pagination
|
||||
* @param pageSize Page size for pagination
|
||||
* @param keyword Keyword filter for host search
|
||||
* @return Ternary containing: (all hosts with count, filtered compatible hosts, storage motion requirements map)
|
||||
*/
|
||||
Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> getTechnicallyCompatibleHosts(
|
||||
final VirtualMachine vm,
|
||||
final Long startIndex,
|
||||
final Long pageSize,
|
||||
final String keyword) {
|
||||
|
||||
// GPU check
|
||||
if (_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) {
|
||||
logger.info(" Live Migration of GPU enabled VM : " + vm.getInstanceName() + " is not supported");
|
||||
// Return empty list.
|
||||
return new Ternary<>(new Pair<>(new ArrayList<>(), 0),
|
||||
new ArrayList<>(), new HashMap<>());
|
||||
logger.info("Live Migration of GPU enabled VM : {} is not supported", vm);
|
||||
return new Ternary<>(new Pair<>(new ArrayList<>(), 0), new ArrayList<>(), new HashMap<>());
|
||||
}
|
||||
|
||||
final long srcHostId = vm.getHostId();
|
||||
|
|
@ -1508,6 +1519,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
ex.addProxyObject(vm.getUuid(), "vmId");
|
||||
throw ex;
|
||||
}
|
||||
|
||||
String srcHostVersion = srcHost.getHypervisorVersion();
|
||||
if (HypervisorType.KVM.equals(srcHost.getHypervisorType()) && srcHostVersion == null) {
|
||||
srcHostVersion = "";
|
||||
|
|
@ -1545,7 +1557,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
List<HostVO> allHosts;
|
||||
List<HostVO> filteredHosts = null;
|
||||
final Map<Host, Boolean> requiresStorageMotion = new HashMap<>();
|
||||
DataCenterDeployment plan;
|
||||
|
||||
if (canMigrateWithStorage) {
|
||||
Long podId = !VirtualMachine.Type.User.equals(vm.getType()) ? srcHost.getPodId() : null;
|
||||
allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, srcHost.getDataCenterId(), podId, null, null, keyword,
|
||||
|
|
@ -1594,7 +1606,6 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
if (CollectionUtils.isEmpty(filteredHosts)) {
|
||||
return new Ternary<>(new Pair<>(allHosts, allHostsPair.second()), new ArrayList<>(), new HashMap<>());
|
||||
}
|
||||
plan = new DataCenterDeployment(srcHost.getDataCenterId(), podId, null, null, null, null);
|
||||
} else {
|
||||
final Long cluster = srcHost.getClusterId();
|
||||
if (logger.isDebugEnabled()) {
|
||||
|
|
@ -1603,22 +1614,38 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, null, null, cluster, null, keyword, null, null, null,
|
||||
null, srcHost.getId());
|
||||
allHosts = allHostsPair.first();
|
||||
plan = new DataCenterDeployment(srcHost.getDataCenterId(), srcHost.getPodId(), srcHost.getClusterId(), null, null, null);
|
||||
filteredHosts = allHosts;
|
||||
}
|
||||
|
||||
final Pair<List<? extends Host>, Integer> otherHosts = new Pair<>(allHosts, allHostsPair.second());
|
||||
final Pair<List<? extends Host>, Integer> allHostsPairResult = new Pair<>(allHosts, allHostsPair.second());
|
||||
Pair<Boolean, List<HostVO>> uefiFilteredResult = filterUefiHostsForMigration(allHosts, filteredHosts, vm);
|
||||
if (!uefiFilteredResult.first()) {
|
||||
return new Ternary<>(otherHosts, new ArrayList<>(), new HashMap<>());
|
||||
return new Ternary<>(allHostsPairResult, new ArrayList<>(), new HashMap<>());
|
||||
}
|
||||
filteredHosts = uefiFilteredResult.second();
|
||||
|
||||
List<Host> suitableHosts = new ArrayList<>();
|
||||
return new Ternary<>(allHostsPairResult, filteredHosts, requiresStorageMotion);
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply affinity group constraints and other exclusion rules for VM migration.
|
||||
* This builds an ExcludeList based on affinity groups, DPDK requirements, and dedicated resources.
|
||||
*
|
||||
* @param vm The virtual machine to migrate
|
||||
* @param vmProfile The VM profile
|
||||
* @param plan The deployment plan
|
||||
* @param vmList List of VMs with current/simulated placements for affinity processing
|
||||
* @return ExcludeList containing hosts to avoid
|
||||
*/
|
||||
@Override
|
||||
public ExcludeList applyAffinityConstraints(VirtualMachine vm, VirtualMachineProfile vmProfile, DeploymentPlan plan, List<VirtualMachine> vmList) {
|
||||
final ExcludeList excludes = new ExcludeList();
|
||||
excludes.addHost(srcHostId);
|
||||
excludes.addHost(vm.getHostId());
|
||||
|
||||
if (dpdkHelper.isVMDpdkEnabled(vm.getId())) {
|
||||
excludeNonDPDKEnabledHosts(plan, excludes);
|
||||
if (plan instanceof DataCenterDeployment) {
|
||||
excludeNonDPDKEnabledHosts((DataCenterDeployment) plan, excludes);
|
||||
}
|
||||
}
|
||||
|
||||
// call affinitygroup chain
|
||||
|
|
@ -1631,13 +1658,37 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
}
|
||||
|
||||
if (vm.getType() == VirtualMachine.Type.User || vm.getType() == VirtualMachine.Type.DomainRouter) {
|
||||
final DataCenterVO dc = _dcDao.findById(srcHost.getDataCenterId());
|
||||
final DataCenterVO dc = _dcDao.findById(plan.getDataCenterId());
|
||||
_dpMgr.checkForNonDedicatedResources(vmProfile, dc, excludes);
|
||||
}
|
||||
|
||||
return excludes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get hosts with available capacity using host allocators, and apply architecture filtering.
|
||||
*
|
||||
* @param vm The virtual machine (for architecture filtering)
|
||||
* @param vmProfile The VM profile
|
||||
* @param plan The deployment plan
|
||||
* @param compatibleHosts List of technically compatible hosts
|
||||
* @param excludes ExcludeList with hosts to avoid
|
||||
* @param srcHost Source host (for architecture filtering)
|
||||
* @return List of suitable hosts with capacity
|
||||
*/
|
||||
protected List<Host> getCapableSuitableHosts(
|
||||
final VirtualMachine vm,
|
||||
final VirtualMachineProfile vmProfile,
|
||||
final DataCenterDeployment plan,
|
||||
final List<? extends Host> compatibleHosts,
|
||||
final ExcludeList excludes,
|
||||
final Host srcHost) {
|
||||
|
||||
List<Host> suitableHosts = new ArrayList<>();
|
||||
|
||||
for (final HostAllocator allocator : hostAllocators) {
|
||||
if (CollectionUtils.isNotEmpty(filteredHosts)) {
|
||||
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, filteredHosts, HostAllocator.RETURN_UPTO_ALL, false);
|
||||
if (CollectionUtils.isNotEmpty(compatibleHosts)) {
|
||||
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, compatibleHosts, HostAllocator.RETURN_UPTO_ALL, false);
|
||||
} else {
|
||||
suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, HostAllocator.RETURN_UPTO_ALL, false);
|
||||
}
|
||||
|
|
@ -1663,6 +1714,43 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
}
|
||||
}
|
||||
|
||||
return suitableHosts;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> listHostsForMigrationOfVM(final VirtualMachine vm, final Long startIndex, final Long pageSize,
|
||||
final String keyword, List<VirtualMachine> vmList) {
|
||||
|
||||
validateVmForHostMigration(vm);
|
||||
|
||||
// Get technically compatible hosts (storage, hypervisor, UEFI)
|
||||
Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> compatibilityResult =
|
||||
getTechnicallyCompatibleHosts(vm, startIndex, pageSize, keyword);
|
||||
|
||||
Pair<List<? extends Host>, Integer> allHostsPair = compatibilityResult.first();
|
||||
List<? extends Host> filteredHosts = compatibilityResult.second();
|
||||
Map<Host, Boolean> requiresStorageMotion = compatibilityResult.third();
|
||||
|
||||
// If no compatible hosts, return early
|
||||
if (CollectionUtils.isEmpty(filteredHosts)) {
|
||||
final Pair<List<? extends Host>, Integer> otherHosts = new Pair<>(allHostsPair.first(), allHostsPair.second());
|
||||
return new Ternary<>(otherHosts, new ArrayList<>(), requiresStorageMotion);
|
||||
}
|
||||
|
||||
// Create deployment plan and VM profile
|
||||
final Host srcHost = _hostDao.findById(vm.getHostId());
|
||||
final DataCenterDeployment plan = new DataCenterDeployment(
|
||||
srcHost.getDataCenterId(), srcHost.getPodId(), srcHost.getClusterId(), null, null, null);
|
||||
final VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(
|
||||
vm, null, _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()), null, null);
|
||||
|
||||
// Apply affinity constraints
|
||||
final ExcludeList excludes = applyAffinityConstraints(vm, vmProfile, plan, vmList);
|
||||
|
||||
// Get hosts with capacity
|
||||
List<Host> suitableHosts = getCapableSuitableHosts(vm, vmProfile, plan, filteredHosts, excludes, srcHost);
|
||||
|
||||
final Pair<List<? extends Host>, Integer> otherHosts = new Pair<>(allHostsPair.first(), allHostsPair.second());
|
||||
return new Ternary<>(otherHosts, suitableHosts, requiresStorageMotion);
|
||||
}
|
||||
|
||||
|
|
@ -1966,7 +2054,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
return suitablePools;
|
||||
}
|
||||
|
||||
private Pair<List<HostVO>, Integer> searchForServers(final Long startIndex, final Long pageSize, final Object name, final Object type,
|
||||
Pair<List<HostVO>, Integer> searchForServers(final Long startIndex, final Long pageSize, final Object name, final Object type,
|
||||
final Object state, final Object zone, final Object pod, final Object cluster, final Object id, final Object keyword,
|
||||
final Object resourceState, final Object haHosts, final Object hypervisorType, final Object hypervisorVersion, final Object... excludes) {
|
||||
final Filter searchFilter = new Filter(HostVO.class, "id", Boolean.TRUE, startIndex, pageSize);
|
||||
|
|
@ -4133,7 +4221,6 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
|
|||
cmdList.add(StartInternalLBVMCmd.class);
|
||||
cmdList.add(ListInternalLBVMsCmd.class);
|
||||
cmdList.add(ListNetworkIsolationMethodsCmd.class);
|
||||
cmdList.add(ListNetworkIsolationMethodsCmd.class);
|
||||
cmdList.add(CreateNetworkACLListCmd.class);
|
||||
cmdList.add(DeleteNetworkACLListCmd.class);
|
||||
cmdList.add(ListNetworkACLListsCmd.class);
|
||||
|
|
|
|||
|
|
@ -18,22 +18,24 @@
|
|||
*/
|
||||
package com.cloud.storage;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
import com.cloud.exception.InsufficientCapacityException;
|
||||
import com.cloud.exception.OperationTimedoutException;
|
||||
import com.cloud.exception.ResourceUnavailableException;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import org.apache.commons.collections4.CollectionUtils;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
|
|
@ -49,17 +51,10 @@ import com.cloud.server.ManagementServer;
|
|||
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||
import com.cloud.storage.dao.StoragePoolWorkDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.user.User;
|
||||
import com.cloud.user.dao.UserDao;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.ConsoleProxyVO;
|
||||
import com.cloud.vm.DomainRouterVO;
|
||||
import com.cloud.vm.SecondaryStorageVmVO;
|
||||
import com.cloud.vm.UserVmVO;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachine.State;
|
||||
import com.cloud.vm.VirtualMachineManager;
|
||||
import com.cloud.vm.dao.ConsoleProxyDao;
|
||||
|
|
@ -118,86 +113,151 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation {
|
|||
|
||||
@Override
|
||||
public boolean maintain(DataStore store, Map<String,String> details) {
|
||||
Long userId = CallContext.current().getCallingUserId();
|
||||
User user = _userDao.findById(userId);
|
||||
Account account = CallContext.current().getCallingAccount();
|
||||
StoragePoolVO pool = primaryDataStoreDao.findById(store.getId());
|
||||
try {
|
||||
List<StoragePoolVO> spes = null;
|
||||
// Handling Zone and Cluster wide storage scopes.
|
||||
// if the storage is ZONE wide then we pass podid and cluster id as null as they will be empty for ZWPS
|
||||
if (pool.getScope() == ScopeType.ZONE) {
|
||||
spes = primaryDataStoreDao.listBy(pool.getDataCenterId(), null, null, ScopeType.ZONE);
|
||||
} else {
|
||||
spes = primaryDataStoreDao.listBy(pool.getDataCenterId(), pool.getPodId(), pool.getClusterId(), ScopeType.CLUSTER);
|
||||
}
|
||||
for (StoragePoolVO sp : spes) {
|
||||
if (sp.getParent() != pool.getParent() && sp.getId() != pool.getParent()) { // If Datastore cluster is tried to prepare for maintenance then child storage pools are also kept in PrepareForMaintenance mode
|
||||
if (sp.getStatus() == StoragePoolStatus.PrepareForMaintenance) {
|
||||
throw new CloudRuntimeException(String.format("Only one storage pool in a cluster can be in PrepareForMaintenance mode, %s is already in PrepareForMaintenance mode ", sp));
|
||||
}
|
||||
}
|
||||
}
|
||||
StoragePool storagePool = (StoragePool)store;
|
||||
getStoragePoolForSpecification(pool);
|
||||
|
||||
//Handeling the Zone wide and cluster wide primay storage
|
||||
List<HostVO> hosts = new ArrayList<HostVO>();
|
||||
// if the storage scope is ZONE wide, then get all the hosts for which hypervisor ZWSP created to send Modifystoragepoolcommand
|
||||
//TODO: if it's zone wide, this code will list a lot of hosts in the zone, which may cause performance/OOM issue.
|
||||
if (pool.getScope().equals(ScopeType.ZONE)) {
|
||||
if (HypervisorType.Any.equals(pool.getHypervisor())) {
|
||||
hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZone(pool.getDataCenterId());
|
||||
}
|
||||
else {
|
||||
hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(pool.getHypervisor(), pool.getDataCenterId());
|
||||
}
|
||||
} else {
|
||||
hosts = _resourceMgr.listHostsInClusterByStatus(pool.getClusterId(), Status.Up);
|
||||
}
|
||||
List<HostVO> hosts = getHostsForStoragePool(pool);
|
||||
|
||||
if (hosts == null || hosts.size() == 0) {
|
||||
pool.setStatus(StoragePoolStatus.Maintenance);
|
||||
primaryDataStoreDao.update(pool.getId(), pool);
|
||||
return true;
|
||||
} else {
|
||||
// set the pool state to prepare for maintenance
|
||||
pool.setStatus(StoragePoolStatus.PrepareForMaintenance);
|
||||
primaryDataStoreDao.update(pool.getId(), pool);
|
||||
}
|
||||
// remove heartbeat
|
||||
for (HostVO host : hosts) {
|
||||
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(false, storagePool);
|
||||
if (MapUtils.isNotEmpty(details) && storageManager.canDisconnectHostFromStoragePool(host, storagePool)) {
|
||||
cmd.setDetails(details);
|
||||
}
|
||||
final Answer answer = agentMgr.easySend(host.getId(), cmd);
|
||||
if (answer == null || !answer.getResult()) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("ModifyStoragePool false failed due to " + ((answer == null) ? "answer null" : answer.getDetails()));
|
||||
}
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("ModifyStoragePool false succeeded");
|
||||
}
|
||||
if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) {
|
||||
logger.debug("Started synchronising datastore cluster storage pool {} with vCenter", pool);
|
||||
storageManager.syncDatastoreClusterStoragePool(pool.getId(), ((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren(), host.getId());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (setNextStateForMaintenance(hosts, pool) == StoragePoolStatus.PrepareForMaintenance) {
|
||||
removeHeartbeatForHostsFromPool(hosts, pool);
|
||||
// check to see if other ps exist
|
||||
// if they do, then we can migrate over the system vms to them
|
||||
// if they don't, then just stop all vms on this one
|
||||
List<StoragePoolVO> upPools = primaryDataStoreDao.listByStatusInZone(pool.getDataCenterId(), StoragePoolStatus.Up);
|
||||
boolean restart = true;
|
||||
if (upPools == null || upPools.size() == 0) {
|
||||
restart = false;
|
||||
}
|
||||
boolean restart = !CollectionUtils.isEmpty(upPools);
|
||||
|
||||
// 2. Get a list of all the ROOT volumes within this storage pool
|
||||
List<VolumeVO> allVolumes = volumeDao.findByPoolId(pool.getId());
|
||||
|
||||
// 3. Enqueue to the work queue
|
||||
enqueueMigrationsForVolumes(allVolumes, pool);
|
||||
// 4. Process the queue
|
||||
processMigrationWorkloads(pool, restart);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception in enabling primary storage maintenance:", e);
|
||||
pool.setStatus(StoragePoolStatus.ErrorInMaintenance);
|
||||
primaryDataStoreDao.update(pool.getId(), pool);
|
||||
// TODO decide on what recovery is possible
|
||||
throw new CloudRuntimeException(e.getMessage());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancelMaintain(DataStore store) {
|
||||
return cancelMaintain(store, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancelMaintain(DataStore store, Map<String,String> details) {
|
||||
// Change the storage state back to up
|
||||
StoragePoolVO poolVO = primaryDataStoreDao.findById(store.getId());
|
||||
StoragePool pool = (StoragePool)store;
|
||||
|
||||
List<HostVO> hosts = getHostsForStoragePool(poolVO);
|
||||
|
||||
if (CollectionUtils.isEmpty(hosts)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
Pair<Map<String, String>, Boolean> nfsMountOpts = storageManager.getStoragePoolNFSMountOpts(pool, null);
|
||||
addHeartbeatToHostsInPool(hosts, pool, nfsMountOpts);
|
||||
|
||||
// 2. Get a list of pending work for this queue
|
||||
List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao.listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId());
|
||||
|
||||
// 3. work through the queue
|
||||
cancelMigrationWorkloads(pendingWork);
|
||||
return false;
|
||||
}
|
||||
|
||||
private StoragePoolStatus setNextStateForMaintenance(List<HostVO> hosts, StoragePoolVO pool) {
|
||||
if (CollectionUtils.isEmpty(hosts)) {
|
||||
pool.setStatus(StoragePoolStatus.Maintenance);
|
||||
primaryDataStoreDao.update(pool.getId(), pool);
|
||||
return StoragePoolStatus.Maintenance;
|
||||
} else {
|
||||
// set the pool state to prepare for maintenance
|
||||
pool.setStatus(StoragePoolStatus.PrepareForMaintenance);
|
||||
primaryDataStoreDao.update(pool.getId(), pool);
|
||||
return StoragePoolStatus.PrepareForMaintenance;
|
||||
}
|
||||
}
|
||||
|
||||
private void processMigrationWorkloads(StoragePoolVO pool, boolean restart) throws ResourceUnavailableException, OperationTimedoutException, InsufficientCapacityException {
|
||||
List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao.listPendingWorkForPrepareForMaintenanceByPoolId(pool.getId());
|
||||
|
||||
for (StoragePoolWorkVO work : pendingWork) {
|
||||
// shut down the running vms
|
||||
VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
|
||||
|
||||
if (vmInstance == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (vmInstance.getType()) {
|
||||
case ConsoleProxy:
|
||||
case SecondaryStorageVm:
|
||||
case DomainRouter:
|
||||
handleVmMigration(restart, work, vmInstance);
|
||||
break;
|
||||
case User:
|
||||
handleStopVmForMigration(work, vmInstance);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void cancelMigrationWorkloads(List<StoragePoolWorkVO> pendingWork) {
|
||||
for (StoragePoolWorkVO work : pendingWork) {
|
||||
try {
|
||||
VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
|
||||
|
||||
if (vmInstance == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (vmInstance.getType()) {
|
||||
case ConsoleProxy:
|
||||
case SecondaryStorageVm:
|
||||
case DomainRouter:
|
||||
handleVmStart(work, vmInstance);
|
||||
break;
|
||||
case User:
|
||||
handleUserVmStart(work, vmInstance);
|
||||
break;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.debug("Failed start vm", e);
|
||||
throw new CloudRuntimeException(e.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void handleStopVmForMigration(StoragePoolWorkVO work, VMInstanceVO vmInstance) throws ResourceUnavailableException, OperationTimedoutException {
|
||||
vmMgr.advanceStop(vmInstance.getUuid(), false);
|
||||
// update work status
|
||||
work.setStoppedForMaintenance(true);
|
||||
_storagePoolWorkDao.update(work.getId(), work);
|
||||
}
|
||||
|
||||
private void handleVmMigration(boolean restart, StoragePoolWorkVO work, VMInstanceVO vmInstance) throws ResourceUnavailableException, OperationTimedoutException, InsufficientCapacityException {
|
||||
handleStopVmForMigration(work, vmInstance);
|
||||
|
||||
if (restart) {
|
||||
handleVmStart(work, vmInstance);
|
||||
}
|
||||
}
|
||||
|
||||
private void handleVmStart(StoragePoolWorkVO work, VMInstanceVO vmInstance) throws InsufficientCapacityException, ResourceUnavailableException, OperationTimedoutException {
|
||||
vmMgr.advanceStart(vmInstance.getUuid(), null, null);
|
||||
// update work queue
|
||||
work.setStartedAfterMaintenance(true);
|
||||
_storagePoolWorkDao.update(work.getId(), work);
|
||||
}
|
||||
|
||||
private void enqueueMigrationsForVolumes(List<VolumeVO> allVolumes, StoragePoolVO pool) {
|
||||
for (VolumeVO volume : allVolumes) {
|
||||
VMInstanceVO vmInstance = vmDao.findById(volume.getInstanceId());
|
||||
|
||||
|
|
@ -223,135 +283,113 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Process the queue
|
||||
List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao.listPendingWorkForPrepareForMaintenanceByPoolId(pool.getId());
|
||||
|
||||
for (StoragePoolWorkVO work : pendingWork) {
|
||||
// shut down the running vms
|
||||
VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
|
||||
|
||||
if (vmInstance == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// if the instance is of type consoleproxy, call the console
|
||||
// proxy
|
||||
if (vmInstance.getType().equals(VirtualMachine.Type.ConsoleProxy)) {
|
||||
// call the consoleproxymanager
|
||||
ConsoleProxyVO consoleProxy = _consoleProxyDao.findById(vmInstance.getId());
|
||||
vmMgr.advanceStop(consoleProxy.getUuid(), false);
|
||||
// update work status
|
||||
work.setStoppedForMaintenance(true);
|
||||
_storagePoolWorkDao.update(work.getId(), work);
|
||||
|
||||
if (restart) {
|
||||
|
||||
vmMgr.advanceStart(consoleProxy.getUuid(), null, null);
|
||||
// update work status
|
||||
work.setStartedAfterMaintenance(true);
|
||||
_storagePoolWorkDao.update(work.getId(), work);
|
||||
private void removeHeartbeatForHostsFromPool(List<HostVO> hosts, StoragePool storagePool) {
|
||||
// remove heartbeat
|
||||
for (HostVO host : hosts) {
|
||||
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(false, storagePool);
|
||||
final Answer answer = agentMgr.easySend(host.getId(), cmd);
|
||||
if (answer == null || !answer.getResult()) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("ModifyStoragePool false failed due to {}", ((answer == null) ? "answer null" : answer.getDetails()));
|
||||
}
|
||||
}
|
||||
|
||||
// if the instance is of type uservm, call the user vm manager
|
||||
if (vmInstance.getType() == VirtualMachine.Type.User) {
|
||||
UserVmVO userVm = userVmDao.findById(vmInstance.getId());
|
||||
vmMgr.advanceStop(userVm.getUuid(), false);
|
||||
// update work status
|
||||
work.setStoppedForMaintenance(true);
|
||||
_storagePoolWorkDao.update(work.getId(), work);
|
||||
}
|
||||
|
||||
// if the instance is of type secondary storage vm, call the
|
||||
// secondary storage vm manager
|
||||
if (vmInstance.getType().equals(VirtualMachine.Type.SecondaryStorageVm)) {
|
||||
SecondaryStorageVmVO secStrgVm = _secStrgDao.findById(vmInstance.getId());
|
||||
vmMgr.advanceStop(secStrgVm.getUuid(), false);
|
||||
// update work status
|
||||
work.setStoppedForMaintenance(true);
|
||||
_storagePoolWorkDao.update(work.getId(), work);
|
||||
|
||||
if (restart) {
|
||||
vmMgr.advanceStart(secStrgVm.getUuid(), null, null);
|
||||
// update work status
|
||||
work.setStartedAfterMaintenance(true);
|
||||
_storagePoolWorkDao.update(work.getId(), work);
|
||||
}
|
||||
}
|
||||
|
||||
// if the instance is of type domain router vm, call the network
|
||||
// manager
|
||||
if (vmInstance.getType().equals(VirtualMachine.Type.DomainRouter)) {
|
||||
DomainRouterVO domR = _domrDao.findById(vmInstance.getId());
|
||||
vmMgr.advanceStop(domR.getUuid(), false);
|
||||
// update work status
|
||||
work.setStoppedForMaintenance(true);
|
||||
_storagePoolWorkDao.update(work.getId(), work);
|
||||
|
||||
if (restart) {
|
||||
vmMgr.advanceStart(domR.getUuid(), null, null);
|
||||
// update work status
|
||||
work.setStartedAfterMaintenance(true);
|
||||
_storagePoolWorkDao.update(work.getId(), work);
|
||||
} else {
|
||||
reportSucceededModifyStorePool(storagePool, (ModifyStoragePoolAnswer) answer, host, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception in enabling primary storage maintenance:", e);
|
||||
pool.setStatus(StoragePoolStatus.ErrorInMaintenance);
|
||||
primaryDataStoreDao.update(pool.getId(), pool);
|
||||
throw new CloudRuntimeException(e.getMessage());
|
||||
|
||||
private void reportSucceededModifyStorePool(StoragePool storagePool, ModifyStoragePoolAnswer answer, HostVO host, boolean add) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("ModifyStoragePool succeeded for {}", add ? "adding" : "removing");
|
||||
}
|
||||
if (storagePool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) {
|
||||
logger.debug("Started synchronising datastore cluster storage pool {} with vCenter", storagePool);
|
||||
storageManager.syncDatastoreClusterStoragePool(storagePool.getId(), answer.getDatastoreClusterChildren(), host.getId());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancelMaintain(DataStore store) {
|
||||
return cancelMaintain(store, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancelMaintain(DataStore store, Map<String,String> details) {
|
||||
// Change the storage state back to up
|
||||
Long userId = CallContext.current().getCallingUserId();
|
||||
User user = _userDao.findById(userId);
|
||||
Account account = CallContext.current().getCallingAccount();
|
||||
StoragePoolVO poolVO = primaryDataStoreDao.findById(store.getId());
|
||||
StoragePool pool = (StoragePool)store;
|
||||
|
||||
//Handeling the Zone wide and cluster wide primay storage
|
||||
List<HostVO> hosts = new ArrayList<HostVO>();
|
||||
// if the storage scope is ZONE wide, then get all the hosts for which hypervisor ZWSP created to send Modifystoragepoolcommand
|
||||
if (poolVO.getScope().equals(ScopeType.ZONE)) {
|
||||
/**
|
||||
* Handling the Zone wide and cluster wide primary storage
|
||||
* if the storage scope is ZONE wide, then get all the hosts for which hypervisor ZoneWideStoragePools created to send ModifyStoragePoolCommand
|
||||
* TODO: if it's zone wide, this code will list a lot of hosts in the zone, which may cause performance/OOM issue.
|
||||
* @param pool pool to check for connected hosts
|
||||
* @return a list of connected hosts
|
||||
*/
|
||||
private List<HostVO> getHostsForStoragePool(StoragePoolVO pool) {
|
||||
List<HostVO> hosts;
|
||||
if (pool.getScope().equals(ScopeType.ZONE)) {
|
||||
if (HypervisorType.Any.equals(pool.getHypervisor())) {
|
||||
hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZone(pool.getDataCenterId());
|
||||
}
|
||||
else {
|
||||
hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(poolVO.getHypervisor(), pool.getDataCenterId());
|
||||
hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(pool.getHypervisor(), pool.getDataCenterId());
|
||||
}
|
||||
} else {
|
||||
hosts = _resourceMgr.listHostsInClusterByStatus(pool.getClusterId(), Status.Up);
|
||||
}
|
||||
|
||||
if (hosts == null || hosts.size() == 0) {
|
||||
return true;
|
||||
return hosts;
|
||||
}
|
||||
|
||||
Pair<Map<String, String>, Boolean> nfsMountOpts = storageManager.getStoragePoolNFSMountOpts(pool, null);
|
||||
// add heartbeat
|
||||
/**
|
||||
* Handling Zone and Cluster wide storage scopes. Depending on the scope of the pool, check for other storage pools in the same scope
|
||||
* If the storage is ZONE wide then we pass podId and cluster id as null as they will be empty for Zone wide storage
|
||||
*
|
||||
* @param pool pool to check for other pools in the same scope
|
||||
*/
|
||||
private void getStoragePoolForSpecification(StoragePoolVO pool) {
|
||||
List<StoragePoolVO> storagePools;
|
||||
if (pool.getScope() == ScopeType.ZONE) {
|
||||
storagePools = primaryDataStoreDao.listBy(pool.getDataCenterId(), null, null, ScopeType.ZONE);
|
||||
} else {
|
||||
storagePools = primaryDataStoreDao.listBy(pool.getDataCenterId(), pool.getPodId(), pool.getClusterId(), ScopeType.CLUSTER);
|
||||
}
|
||||
checkHierarchyForPreparingForMaintenance(pool, storagePools);
|
||||
}
|
||||
|
||||
/**
|
||||
* If Datastore cluster is tried to prepare for maintenance then child storage pools are also kept in PrepareForMaintenance mode
|
||||
* @param pool target to put in maintenance
|
||||
* @param storagePools list of possible peers/parents/children
|
||||
*/
|
||||
private static void checkHierarchyForPreparingForMaintenance(StoragePoolVO pool, List<StoragePoolVO> storagePools) {
|
||||
for (StoragePoolVO storagePool : storagePools) {
|
||||
if (!(storagePool.getParent().equals(pool.getParent()) || !pool.getParent().equals(storagePool.getId())) &&
|
||||
(storagePool.getStatus() == StoragePoolStatus.PrepareForMaintenance)) {
|
||||
throw new CloudRuntimeException(String.format("Only one storage pool in a cluster can be in PrepareForMaintenance mode, %s is already in PrepareForMaintenance mode ", storagePool));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* // check if the vm has a root volume. If not, remove the item from the queue, the vm should be
|
||||
* // started only when it has at least one root volume attached to it
|
||||
* // don't allow to start vm that doesn't have a root volume
|
||||
* @param work work item to handle for this VM
|
||||
* @param vmInstance VM to start
|
||||
* @throws InsufficientCapacityException no migration target found
|
||||
* @throws ResourceUnavailableException a resource required for migration is not in the expected state
|
||||
* @throws OperationTimedoutException migration operation took too long
|
||||
*/
|
||||
private void handleUserVmStart(StoragePoolWorkVO work, VMInstanceVO vmInstance) throws InsufficientCapacityException, ResourceUnavailableException, OperationTimedoutException {
|
||||
if (volumeDao.findByInstanceAndType(vmInstance.getId(), Volume.Type.ROOT).isEmpty()) {
|
||||
_storagePoolWorkDao.remove(work.getId());
|
||||
} else {
|
||||
handleVmStart(work, vmInstance);
|
||||
}
|
||||
}
|
||||
|
||||
private void addHeartbeatToHostsInPool(List<HostVO> hosts, StoragePool pool, Pair<Map<String, String>, Boolean> nfsMountOpts) {
|
||||
for (HostVO host : hosts) {
|
||||
ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand(true, pool);
|
||||
if (MapUtils.isNotEmpty(details)) {
|
||||
msPoolCmd.setDetails(details);
|
||||
}
|
||||
ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand(true, pool, nfsMountOpts.first());
|
||||
final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd);
|
||||
if (answer == null || !answer.getResult()) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("ModifyStoragePool add failed due to " + ((answer == null) ? "answer null" : answer.getDetails()));
|
||||
logger.debug("ModifyStoragePool add failed due to {}", ((answer == null) ? "answer null" : answer.getDetails()));
|
||||
}
|
||||
if (answer != null && nfsMountOpts.second()) {
|
||||
logger.error(String.format("Unable to attach storage pool to the host %s due to %s", host, answer.getDetails()));
|
||||
logger.error("Unable to attach storage pool to the host {} due to {}", host, answer.getDetails());
|
||||
StringBuilder exceptionSB = new StringBuilder("Unable to attach storage pool to the host ").append(host.getName());
|
||||
String reason = storageManager.getStoragePoolMountFailureReason(answer.getDetails());
|
||||
if (reason!= null) {
|
||||
|
|
@ -360,84 +398,9 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation {
|
|||
throw new CloudRuntimeException(exceptionSB.toString());
|
||||
}
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("ModifyStoragePool add succeeded");
|
||||
}
|
||||
storageManager.updateStoragePoolHostVOAndBytes(pool, host.getId(), (ModifyStoragePoolAnswer) answer);
|
||||
if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) {
|
||||
logger.debug("Started synchronising datastore cluster storage pool {} with vCenter", pool);
|
||||
storageManager.syncDatastoreClusterStoragePool(pool.getId(), ((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren(), host.getId());
|
||||
reportSucceededModifyStorePool(pool, (ModifyStoragePoolAnswer) answer, host, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Get a list of pending work for this queue
|
||||
List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao.listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId());
|
||||
|
||||
// 3. work through the queue
|
||||
for (StoragePoolWorkVO work : pendingWork) {
|
||||
try {
|
||||
VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
|
||||
|
||||
if (vmInstance == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// if the instance is of type consoleproxy, call the console
|
||||
// proxy
|
||||
if (vmInstance.getType().equals(VirtualMachine.Type.ConsoleProxy)) {
|
||||
|
||||
ConsoleProxyVO consoleProxy = _consoleProxyDao
|
||||
.findById(vmInstance.getId());
|
||||
vmMgr.advanceStart(consoleProxy.getUuid(), null, null);
|
||||
// update work queue
|
||||
work.setStartedAfterMaintenance(true);
|
||||
_storagePoolWorkDao.update(work.getId(), work);
|
||||
}
|
||||
|
||||
// if the instance is of type ssvm, call the ssvm manager
|
||||
if (vmInstance.getType().equals(
|
||||
VirtualMachine.Type.SecondaryStorageVm)) {
|
||||
SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance
|
||||
.getId());
|
||||
vmMgr.advanceStart(ssVm.getUuid(), null, null);
|
||||
|
||||
// update work queue
|
||||
work.setStartedAfterMaintenance(true);
|
||||
_storagePoolWorkDao.update(work.getId(), work);
|
||||
}
|
||||
|
||||
// if the instance is of type domain router vm, call the network
|
||||
// manager
|
||||
if (vmInstance.getType().equals(VirtualMachine.Type.DomainRouter)) {
|
||||
DomainRouterVO domR = _domrDao.findById(vmInstance.getId());
|
||||
vmMgr.advanceStart(domR.getUuid(), null, null);
|
||||
// update work queue
|
||||
work.setStartedAfterMaintenance(true);
|
||||
_storagePoolWorkDao.update(work.getId(), work);
|
||||
}
|
||||
|
||||
// if the instance is of type user vm, call the user vm manager
|
||||
if (vmInstance.getType().equals(VirtualMachine.Type.User)) {
|
||||
// check if the vm has a root volume. If not, remove the item from the queue, the vm should be
|
||||
// started only when it has at least one root volume attached to it
|
||||
// don't allow to start vm that doesn't have a root volume
|
||||
if (volumeDao.findByInstanceAndType(vmInstance.getId(), Volume.Type.ROOT).isEmpty()) {
|
||||
_storagePoolWorkDao.remove(work.getId());
|
||||
} else {
|
||||
UserVmVO userVm = userVmDao.findById(vmInstance.getId());
|
||||
|
||||
vmMgr.advanceStart(userVm.getUuid(), null, null);
|
||||
work.setStartedAfterMaintenance(true);
|
||||
_storagePoolWorkDao.update(work.getId(), work);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.debug("Failed start vm", e);
|
||||
throw new CloudRuntimeException(e.toString());
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1405,6 +1405,9 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M
|
|||
- New role should not be of type Admin with domain other than ROOT domain
|
||||
*/
|
||||
protected void validateRoleChange(Account account, Role role, Account caller) {
|
||||
if (account.getRoleId() != null && account.getRoleId().equals(role.getId())) {
|
||||
return;
|
||||
}
|
||||
Role currentRole = roleService.findRole(account.getRoleId());
|
||||
Role callerRole = roleService.findRole(caller.getRoleId());
|
||||
String errorMsg = String.format("Unable to update account role to %s, ", role.getName());
|
||||
|
|
@ -1420,6 +1423,9 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M
|
|||
throw new PermissionDeniedException(String.format("%s as either current or new role has higher " +
|
||||
"privileges than the caller", errorMsg));
|
||||
}
|
||||
if (account.isDefault()) {
|
||||
throw new PermissionDeniedException(String.format("%s as the account is a default account", errorMsg));
|
||||
}
|
||||
if (role.getRoleType().equals(RoleType.Admin) && account.getDomainId() != Domain.ROOT_DOMAIN) {
|
||||
throw new PermissionDeniedException(String.format("%s as the user does not belong to the ROOT domain",
|
||||
errorMsg));
|
||||
|
|
|
|||
|
|
@ -24,6 +24,8 @@ import com.cloud.api.query.dao.HostJoinDao;
|
|||
import com.cloud.api.query.vo.HostJoinVO;
|
||||
import com.cloud.dc.ClusterVO;
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.deploy.DataCenterDeployment;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.domain.Domain;
|
||||
import com.cloud.event.ActionEventUtils;
|
||||
import com.cloud.event.EventTypes;
|
||||
|
|
@ -51,6 +53,8 @@ import com.cloud.utils.db.TransactionCallback;
|
|||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
import com.cloud.vm.VirtualMachineProfileImpl;
|
||||
import com.cloud.vm.VmDetailConstants;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
import org.apache.cloudstack.api.ApiCommandResourceType;
|
||||
|
|
@ -62,6 +66,8 @@ import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd;
|
|||
import org.apache.cloudstack.api.response.ClusterDrsPlanMigrationResponse;
|
||||
import org.apache.cloudstack.api.response.ClusterDrsPlanResponse;
|
||||
import org.apache.cloudstack.api.response.ListResponse;
|
||||
import org.apache.cloudstack.affinity.AffinityGroupVMMapVO;
|
||||
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
|
||||
import org.apache.cloudstack.cluster.dao.ClusterDrsPlanDao;
|
||||
import org.apache.cloudstack.cluster.dao.ClusterDrsPlanMigrationDao;
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
|
|
@ -71,6 +77,7 @@ import org.apache.cloudstack.framework.jobs.AsyncJobManager;
|
|||
import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO;
|
||||
import org.apache.cloudstack.jobs.JobInfo;
|
||||
import org.apache.cloudstack.managed.context.ManagedContextTimerTask;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.lang3.time.DateUtils;
|
||||
|
||||
|
|
@ -81,13 +88,18 @@ import java.util.Calendar;
|
|||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.Timer;
|
||||
import java.util.TimerTask;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static com.cloud.org.Grouping.AllocationState.Disabled;
|
||||
import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getClusterImbalance;
|
||||
import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getClusterDrsMetric;
|
||||
import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getMetricValue;
|
||||
|
||||
public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsService, PluggableService {
|
||||
|
||||
|
|
@ -125,6 +137,9 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ
|
|||
@Inject
|
||||
ManagementServer managementServer;
|
||||
|
||||
@Inject
|
||||
AffinityGroupVMMapDao affinityGroupVMMapDao;
|
||||
|
||||
List<ClusterDrsAlgorithm> drsAlgorithms = new ArrayList<>();
|
||||
|
||||
Map<String, ClusterDrsAlgorithm> drsAlgorithmMap = new HashMap<>();
|
||||
|
|
@ -318,19 +333,14 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ
|
|||
* @throws ConfigurationException
|
||||
* If there is an error in the DRS configuration.
|
||||
*/
|
||||
List<Ternary<VirtualMachine, Host, Host>> getDrsPlan(Cluster cluster,
|
||||
int maxIterations) throws ConfigurationException {
|
||||
List<Ternary<VirtualMachine, Host, Host>> migrationPlan = new ArrayList<>();
|
||||
List<Ternary<VirtualMachine, Host, Host>> getDrsPlan(Cluster cluster, int maxIterations) throws ConfigurationException {
|
||||
|
||||
if (cluster.getAllocationState() == Disabled || maxIterations <= 0) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
ClusterDrsAlgorithm algorithm = getDrsAlgorithm(ClusterDrsAlgorithm.valueIn(cluster.getId()));
|
||||
List<HostVO> hostList = hostDao.findByClusterId(cluster.getId());
|
||||
List<VirtualMachine> vmList = new ArrayList<>(vmInstanceDao.listByClusterId(cluster.getId()));
|
||||
|
||||
int iteration = 0;
|
||||
|
||||
Map<Long, Host> hostMap = hostList.stream().collect(Collectors.toMap(HostVO::getId, host -> host));
|
||||
|
||||
Map<Long, List<VirtualMachine>> hostVmMap = getHostVmMap(hostList, vmList);
|
||||
|
|
@ -357,10 +367,39 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ
|
|||
serviceOfferingDao.findByIdIncludingRemoved(vm.getId(), vm.getServiceOfferingId()));
|
||||
}
|
||||
|
||||
Pair<Map<Long, List<? extends Host>>, Map<Long, Map<Host, Boolean>>> hostCache = getCompatibleHostAndVmStorageMotionCache(vmList);
|
||||
Map<Long, List<? extends Host>> vmToCompatibleHostsCache = hostCache.first();
|
||||
Map<Long, Map<Host, Boolean>> vmToStorageMotionCache = hostCache.second();
|
||||
|
||||
Set<Long> vmsWithAffinityGroups = getVmsWithAffinityGroups(vmList, vmToCompatibleHostsCache);
|
||||
|
||||
return getMigrationPlans(maxIterations, cluster, hostMap, vmList, vmsWithAffinityGroups, vmToCompatibleHostsCache,
|
||||
vmToStorageMotionCache, vmIdServiceOfferingMap, originalHostIdVmIdMap, hostVmMap, hostCpuMap, hostMemoryMap);
|
||||
}
|
||||
|
||||
private List<Ternary<VirtualMachine, Host, Host>> getMigrationPlans(
|
||||
long maxIterations, Cluster cluster, Map<Long, Host> hostMap, List<VirtualMachine> vmList,
|
||||
Set<Long> vmsWithAffinityGroups, Map<Long, List<? extends Host>> vmToCompatibleHostsCache,
|
||||
Map<Long, Map<Host, Boolean>> vmToStorageMotionCache, Map<Long, ServiceOffering> vmIdServiceOfferingMap,
|
||||
Map<Long, List<Long>> originalHostIdVmIdMap, Map<Long, List<VirtualMachine>> hostVmMap,
|
||||
Map<Long, Ternary<Long, Long, Long>> hostCpuMap, Map<Long, Ternary<Long, Long, Long>> hostMemoryMap
|
||||
) throws ConfigurationException {
|
||||
ClusterDrsAlgorithm algorithm = getDrsAlgorithm(ClusterDrsAlgorithm.valueIn(cluster.getId()));
|
||||
int iteration = 0;
|
||||
List<Ternary<VirtualMachine, Host, Host>> migrationPlan = new ArrayList<>();
|
||||
while (iteration < maxIterations && algorithm.needsDrs(cluster, new ArrayList<>(hostCpuMap.values()),
|
||||
new ArrayList<>(hostMemoryMap.values()))) {
|
||||
|
||||
logger.debug("Starting DRS iteration {} for cluster {}", iteration + 1, cluster);
|
||||
// Re-evaluate affinity constraints with current (simulated) VM placements
|
||||
Map<Long, ExcludeList> vmToExcludesMap = getVmToExcludesMap(vmList, hostMap, vmsWithAffinityGroups,
|
||||
vmToCompatibleHostsCache, vmIdServiceOfferingMap);
|
||||
|
||||
logger.debug("Completed affinity evaluation for DRS iteration {} for cluster {}", iteration + 1, cluster);
|
||||
|
||||
Pair<VirtualMachine, Host> bestMigration = getBestMigration(cluster, algorithm, vmList,
|
||||
vmIdServiceOfferingMap, hostCpuMap, hostMemoryMap);
|
||||
vmIdServiceOfferingMap, hostCpuMap, hostMemoryMap,
|
||||
vmToCompatibleHostsCache, vmToStorageMotionCache, vmToExcludesMap);
|
||||
VirtualMachine vm = bestMigration.first();
|
||||
Host destHost = bestMigration.second();
|
||||
if (destHost == null || vm == null || originalHostIdVmIdMap.get(destHost.getId()).contains(vm.getId())) {
|
||||
|
|
@ -372,8 +411,6 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ
|
|||
ServiceOffering serviceOffering = vmIdServiceOfferingMap.get(vm.getId());
|
||||
migrationPlan.add(new Ternary<>(vm, hostMap.get(vm.getHostId()), hostMap.get(destHost.getId())));
|
||||
|
||||
hostVmMap.get(vm.getHostId()).remove(vm);
|
||||
hostVmMap.get(destHost.getId()).add(vm);
|
||||
hostVmMap.get(vm.getHostId()).remove(vm);
|
||||
hostVmMap.get(destHost.getId()).add(vm);
|
||||
|
||||
|
|
@ -391,6 +428,106 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ
|
|||
return migrationPlan;
|
||||
}
|
||||
|
||||
private Map<Long, ExcludeList> getVmToExcludesMap(List<VirtualMachine> vmList, Map<Long, Host> hostMap,
|
||||
Set<Long> vmsWithAffinityGroups, Map<Long, List<? extends Host>> vmToCompatibleHostsCache,
|
||||
Map<Long, ServiceOffering> vmIdServiceOfferingMap) {
|
||||
Map<Long, ExcludeList> vmToExcludesMap = new HashMap<>();
|
||||
for (VirtualMachine vm : vmList) {
|
||||
if (vmToCompatibleHostsCache.containsKey(vm.getId())) {
|
||||
Host srcHost = hostMap.get(vm.getHostId());
|
||||
if (srcHost != null) {
|
||||
// Only call expensive applyAffinityConstraints for VMs with affinity groups
|
||||
// For VMs without affinity groups, create minimal ExcludeList (just source host)
|
||||
ExcludeList excludes;
|
||||
if (vmsWithAffinityGroups.contains(vm.getId())) {
|
||||
DataCenterDeployment plan = new DataCenterDeployment(
|
||||
srcHost.getDataCenterId(), srcHost.getPodId(), srcHost.getClusterId(),
|
||||
null, null, null);
|
||||
VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(vm, null,
|
||||
vmIdServiceOfferingMap.get(vm.getId()), null, null);
|
||||
|
||||
excludes = managementServer.applyAffinityConstraints(
|
||||
vm, vmProfile, plan, vmList);
|
||||
} else {
|
||||
// VM has no affinity groups - create minimal ExcludeList (just source host)
|
||||
excludes = new ExcludeList();
|
||||
excludes.addHost(vm.getHostId());
|
||||
}
|
||||
vmToExcludesMap.put(vm.getId(), excludes);
|
||||
}
|
||||
}
|
||||
}
|
||||
return vmToExcludesMap;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Pre-compute suitable hosts (once per eligible VM - never changes)
|
||||
* Use listHostsForMigrationOfVM to get hosts validated by getCapableSuitableHosts
|
||||
* This ensures DRS uses the same validation as "find host for migration" command
|
||||
*
|
||||
* @param vmList List of VMs to pre-compute suitable hosts for
|
||||
* @return Pair of VM to compatible hosts map and VM to storage motion requirement map
|
||||
*/
|
||||
private Pair<Map<Long, List<? extends Host>>, Map<Long, Map<Host, Boolean>>> getCompatibleHostAndVmStorageMotionCache(
|
||||
List<VirtualMachine> vmList
|
||||
) {
|
||||
Map<Long, List<? extends Host>> vmToCompatibleHostsCache = new HashMap<>();
|
||||
Map<Long, Map<Host, Boolean>> vmToStorageMotionCache = new HashMap<>();
|
||||
|
||||
for (VirtualMachine vm : vmList) {
|
||||
// Skip ineligible VMs
|
||||
if (vm.getType().isUsedBySystem() ||
|
||||
vm.getState() != VirtualMachine.State.Running ||
|
||||
(MapUtils.isNotEmpty(vm.getDetails()) &&
|
||||
"true".equalsIgnoreCase(vm.getDetails().get(VmDetailConstants.SKIP_DRS)))) {
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
// Use listHostsForMigrationOfVM to get suitable hosts (validated by getCapableSuitableHosts)
|
||||
// This ensures the same validation as the "find host for migration" command
|
||||
Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> hostsForMigration =
|
||||
managementServer.listHostsForMigrationOfVM(vm, 0L, 500L, null, vmList);
|
||||
|
||||
List<? extends Host> suitableHosts = hostsForMigration.second(); // Get suitable hosts (validated by HostAllocator)
|
||||
Map<Host, Boolean> requiresStorageMotion = hostsForMigration.third();
|
||||
|
||||
if (suitableHosts != null && !suitableHosts.isEmpty()) {
|
||||
vmToCompatibleHostsCache.put(vm.getId(), suitableHosts);
|
||||
vmToStorageMotionCache.put(vm.getId(), requiresStorageMotion);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.debug("Could not get suitable hosts for VM {}: {}", vm, e.getMessage());
|
||||
}
|
||||
}
|
||||
return new Pair<>(vmToCompatibleHostsCache, vmToStorageMotionCache);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pre-fetch affinity group mappings for all eligible VMs (once, before iterations)
|
||||
* This allows us to skip expensive affinity processing for VMs without affinity groups
|
||||
*
|
||||
* @param vmList List of VMs to check for affinity groups
|
||||
* @param vmToCompatibleHostsCache Cached map of VM IDs to their compatible hosts
|
||||
* @return Set of VM IDs that have affinity groups
|
||||
*/
|
||||
private Set<Long> getVmsWithAffinityGroups(
|
||||
List<VirtualMachine> vmList, Map<Long, List<? extends Host>> vmToCompatibleHostsCache
|
||||
) {
|
||||
Set<Long> vmsWithAffinityGroups = new HashSet<>();
|
||||
for (VirtualMachine vm : vmList) {
|
||||
if (vmToCompatibleHostsCache.containsKey(vm.getId())) {
|
||||
// Check if VM has any affinity groups - if list is empty, VM has no affinity groups
|
||||
List<AffinityGroupVMMapVO> affinityGroupMappings = affinityGroupVMMapDao.listByInstanceId(vm.getId());
|
||||
if (CollectionUtils.isNotEmpty(affinityGroupMappings)) {
|
||||
vmsWithAffinityGroups.add(vm.getId());
|
||||
}
|
||||
}
|
||||
}
|
||||
return vmsWithAffinityGroups;
|
||||
}
|
||||
|
||||
private ClusterDrsAlgorithm getDrsAlgorithm(String algoName) {
|
||||
if (drsAlgorithmMap.containsKey(algoName)) {
|
||||
return drsAlgorithmMap.get(algoName);
|
||||
|
|
@ -429,6 +566,12 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ
|
|||
* @param hostMemoryCapacityMap
|
||||
* a map of host IDs to their corresponding memory
|
||||
* capacity
|
||||
* @param vmToCompatibleHostsCache
|
||||
* cached map of VM IDs to their compatible hosts
|
||||
* @param vmToStorageMotionCache
|
||||
* cached map of VM IDs to storage motion requirements
|
||||
* @param vmToExcludesMap
|
||||
* map of VM IDs to their ExcludeList (affinity constraints)
|
||||
*
|
||||
* @return a pair of the virtual machine and host that represent the best
|
||||
* migration, or null if no migration is
|
||||
|
|
@ -438,33 +581,46 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ
|
|||
List<VirtualMachine> vmList,
|
||||
Map<Long, ServiceOffering> vmIdServiceOfferingMap,
|
||||
Map<Long, Ternary<Long, Long, Long>> hostCpuCapacityMap,
|
||||
Map<Long, Ternary<Long, Long, Long>> hostMemoryCapacityMap) throws ConfigurationException {
|
||||
Map<Long, Ternary<Long, Long, Long>> hostMemoryCapacityMap,
|
||||
Map<Long, List<? extends Host>> vmToCompatibleHostsCache,
|
||||
Map<Long, Map<Host, Boolean>> vmToStorageMotionCache,
|
||||
Map<Long, ExcludeList> vmToExcludesMap) throws ConfigurationException {
|
||||
// Pre-calculate cluster imbalance once per iteration (same for all VM-host combinations)
|
||||
Double preImbalance = getClusterImbalance(cluster.getId(),
|
||||
new ArrayList<>(hostCpuCapacityMap.values()),
|
||||
new ArrayList<>(hostMemoryCapacityMap.values()),
|
||||
null);
|
||||
|
||||
// Pre-calculate base metrics array once per iteration for optimized imbalance calculation
|
||||
String metricType = getClusterDrsMetric(cluster.getId());
|
||||
Map<Long, Ternary<Long, Long, Long>> baseMetricsMap = "cpu".equals(metricType) ? hostCpuCapacityMap : hostMemoryCapacityMap;
|
||||
Pair<double[], Map<Long, Integer>> baseMetricsAndIndexMap = getBaseMetricsArrayAndHostIdIndexMap(cluster, baseMetricsMap);
|
||||
double[] baseMetricsArray = baseMetricsAndIndexMap.first();
|
||||
Map<Long, Integer> hostIdToIndexMap = baseMetricsAndIndexMap.second();
|
||||
|
||||
double improvement = 0;
|
||||
Pair<VirtualMachine, Host> bestMigration = new Pair<>(null, null);
|
||||
|
||||
for (VirtualMachine vm : vmList) {
|
||||
if (vm.getType().isUsedBySystem() || vm.getState() != VirtualMachine.State.Running ||
|
||||
(MapUtils.isNotEmpty(vm.getDetails()) &&
|
||||
vm.getDetails().get(VmDetailConstants.SKIP_DRS).equalsIgnoreCase("true"))
|
||||
) {
|
||||
List<? extends Host> compatibleHosts = vmToCompatibleHostsCache.get(vm.getId());
|
||||
Map<Host, Boolean> requiresStorageMotion = vmToStorageMotionCache.get(vm.getId());
|
||||
ExcludeList excludes = vmToExcludesMap.get(vm.getId());
|
||||
|
||||
ServiceOffering serviceOffering = vmIdServiceOfferingMap.get(vm.getId());
|
||||
if (skipDrs(vm, compatibleHosts, serviceOffering)) {
|
||||
continue;
|
||||
}
|
||||
Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> hostsForMigrationOfVM = managementServer
|
||||
.listHostsForMigrationOfVM(
|
||||
vm, 0L, 500L, null, vmList);
|
||||
List<? extends Host> compatibleDestinationHosts = hostsForMigrationOfVM.first().first();
|
||||
List<? extends Host> suitableDestinationHosts = hostsForMigrationOfVM.second();
|
||||
|
||||
Map<Host, Boolean> requiresStorageMotion = hostsForMigrationOfVM.third();
|
||||
long vmCpu = (long) serviceOffering.getCpu() * serviceOffering.getSpeed();
|
||||
long vmMemory = serviceOffering.getRamSize() * 1024L * 1024L;
|
||||
|
||||
for (Host destHost : compatibleDestinationHosts) {
|
||||
if (!suitableDestinationHosts.contains(destHost) || cluster.getId() != destHost.getClusterId()) {
|
||||
for (Host destHost : compatibleHosts) {
|
||||
Ternary<Double, Double, Double> metrics = getMetricsForMigration(cluster, algorithm, vm, vmCpu,
|
||||
vmMemory, serviceOffering, destHost, hostCpuCapacityMap, hostMemoryCapacityMap,
|
||||
requiresStorageMotion, preImbalance, baseMetricsArray, hostIdToIndexMap, excludes);
|
||||
if (metrics == null) {
|
||||
continue;
|
||||
}
|
||||
Ternary<Double, Double, Double> metrics = algorithm.getMetrics(cluster, vm,
|
||||
vmIdServiceOfferingMap.get(vm.getId()), destHost, hostCpuCapacityMap, hostMemoryCapacityMap,
|
||||
requiresStorageMotion.get(destHost));
|
||||
|
||||
Double currentImprovement = metrics.first();
|
||||
Double cost = metrics.second();
|
||||
Double benefit = metrics.third();
|
||||
|
|
@ -477,6 +633,86 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ
|
|||
return bestMigration;
|
||||
}
|
||||
|
||||
private boolean skipDrs(VirtualMachine vm, List<? extends Host> compatibleHosts, ServiceOffering serviceOffering) {
|
||||
if (vm.getType().isUsedBySystem() || vm.getState() != VirtualMachine.State.Running) {
|
||||
return true;
|
||||
}
|
||||
if (MapUtils.isNotEmpty(vm.getDetails()) &&
|
||||
"true".equalsIgnoreCase(vm.getDetails().get(VmDetailConstants.SKIP_DRS))) {
|
||||
return true;
|
||||
}
|
||||
if (CollectionUtils.isEmpty(compatibleHosts)) {
|
||||
return true;
|
||||
}
|
||||
if (serviceOffering == null) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private Pair<double[], Map<Long, Integer>> getBaseMetricsArrayAndHostIdIndexMap(
|
||||
Cluster cluster, Map<Long, Ternary<Long, Long, Long>> baseMetricsMap
|
||||
) {
|
||||
double[] baseMetricsArray = new double[baseMetricsMap.size()];
|
||||
Map<Long, Integer> hostIdToIndexMap = new HashMap<>();
|
||||
|
||||
int index = 0;
|
||||
for (Map.Entry<Long, Ternary<Long, Long, Long>> entry : baseMetricsMap.entrySet()) {
|
||||
Long hostId = entry.getKey();
|
||||
Ternary<Long, Long, Long> metrics = entry.getValue();
|
||||
long used = metrics.first();
|
||||
long actualTotal = metrics.third() - metrics.second();
|
||||
long free = actualTotal - metrics.first();
|
||||
Double metricValue = getMetricValue(cluster.getId(), used, free, actualTotal, null);
|
||||
if (metricValue != null) {
|
||||
baseMetricsArray[index] = metricValue;
|
||||
hostIdToIndexMap.put(hostId, index);
|
||||
index++;
|
||||
}
|
||||
}
|
||||
|
||||
// Trim array if some values were null
|
||||
if (index < baseMetricsArray.length) {
|
||||
double[] trimmed = new double[index];
|
||||
System.arraycopy(baseMetricsArray, 0, trimmed, 0, index);
|
||||
baseMetricsArray = trimmed;
|
||||
}
|
||||
return new Pair<>(baseMetricsArray, hostIdToIndexMap);
|
||||
}
|
||||
|
||||
private Ternary<Double, Double, Double> getMetricsForMigration(
|
||||
Cluster cluster, ClusterDrsAlgorithm algorithm, VirtualMachine vm, long vmCpu, long vmMemory,
|
||||
ServiceOffering serviceOffering, Host destHost, Map<Long, Ternary<Long, Long, Long>> hostCpuCapacityMap,
|
||||
Map<Long, Ternary<Long, Long, Long>> hostMemoryCapacityMap, Map<Host, Boolean> requiresStorageMotion,
|
||||
Double preImbalance, double[] baseMetricsArray, Map<Long, Integer> hostIdToIndexMap, ExcludeList excludes
|
||||
) throws ConfigurationException {
|
||||
if (cluster.getId() != destHost.getClusterId()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Check affinity constraints
|
||||
if (excludes != null && excludes.shouldAvoid(destHost)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Quick capacity pre-filter: skip hosts that don't have enough capacity
|
||||
Ternary<Long, Long, Long> destHostCpu = hostCpuCapacityMap.get(destHost.getId());
|
||||
Ternary<Long, Long, Long> destHostMemory = hostMemoryCapacityMap.get(destHost.getId());
|
||||
if (destHostCpu == null || destHostMemory == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
long destHostAvailableCpu = (destHostCpu.third() - destHostCpu.second()) - destHostCpu.first();
|
||||
long destHostAvailableMemory = (destHostMemory.third() - destHostMemory.second()) - destHostMemory.first();
|
||||
|
||||
if (destHostAvailableCpu < vmCpu || destHostAvailableMemory < vmMemory) {
|
||||
return null; // Skip hosts without sufficient capacity
|
||||
}
|
||||
|
||||
return algorithm.getMetrics(cluster, vm, serviceOffering, destHost, hostCpuCapacityMap, hostMemoryCapacityMap,
|
||||
requiresStorageMotion.getOrDefault(destHost, false), preImbalance, baseMetricsArray, hostIdToIndexMap);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Saves a DRS plan for a given cluster and returns the saved plan along with the list of migrations to be executed.
|
||||
|
|
|
|||
|
|
@ -23,6 +23,21 @@ import static org.mockito.Mockito.lenient;
|
|||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
import org.mockito.stubbing.Answer;
|
||||
import org.springframework.test.util.ReflectionTestUtils;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
|
@ -52,24 +67,11 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
|||
import org.apache.cloudstack.framework.config.impl.ConfigurationVO;
|
||||
import org.apache.cloudstack.framework.extensions.manager.ExtensionsManager;
|
||||
import org.apache.cloudstack.userdata.UserDataManager;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
import org.mockito.stubbing.Answer;
|
||||
import org.springframework.test.util.ReflectionTestUtils;
|
||||
|
||||
import com.cloud.cpu.CPU;
|
||||
import com.cloud.dc.Vlan.VlanType;
|
||||
import com.cloud.domain.dao.DomainDao;
|
||||
import com.cloud.api.ApiDBUtils;
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import com.cloud.host.DetailVO;
|
||||
import com.cloud.host.Host;
|
||||
|
|
@ -104,6 +106,7 @@ import com.cloud.vm.VMInstanceDetailVO;
|
|||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.dao.UserVmDao;
|
||||
import com.cloud.vm.dao.VMInstanceDetailsDao;
|
||||
import com.cloud.agent.manager.allocator.HostAllocator;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class ManagementServerImplTest {
|
||||
|
|
@ -130,10 +133,10 @@ public class ManagementServerImplTest {
|
|||
IpAddressManagerImpl ipAddressManagerImpl;
|
||||
|
||||
@Mock
|
||||
AccountManager _accountMgr;
|
||||
AccountManager accountManager;
|
||||
|
||||
@Mock
|
||||
UserDataDao _userDataDao;
|
||||
UserDataDao userDataDao;
|
||||
|
||||
@Mock
|
||||
VMTemplateDao templateDao;
|
||||
|
|
@ -142,7 +145,7 @@ public class ManagementServerImplTest {
|
|||
AnnotationDao annotationDao;
|
||||
|
||||
@Mock
|
||||
UserVmDao _userVmDao;
|
||||
UserVmDao userVmDao;
|
||||
|
||||
@Mock
|
||||
UserDataManager userDataManager;
|
||||
|
|
@ -163,10 +166,10 @@ public class ManagementServerImplTest {
|
|||
DomainDao domainDao;
|
||||
|
||||
@Mock
|
||||
GuestOSCategoryDao _guestOSCategoryDao;
|
||||
GuestOSCategoryDao guestOSCategoryDao;
|
||||
|
||||
@Mock
|
||||
GuestOSDao _guestOSDao;
|
||||
GuestOSDao guestOSDao;
|
||||
|
||||
@Mock
|
||||
ExtensionsManager extensionManager;
|
||||
|
|
@ -175,16 +178,38 @@ public class ManagementServerImplTest {
|
|||
@InjectMocks
|
||||
ManagementServerImpl spy = new ManagementServerImpl();
|
||||
|
||||
@Mock
|
||||
HostAllocator hostAllocator;
|
||||
|
||||
private AutoCloseable closeable;
|
||||
private MockedStatic<ApiDBUtils> apiDBUtilsMock;
|
||||
|
||||
@Before
|
||||
public void setup() throws IllegalAccessException, NoSuchFieldException {
|
||||
closeable = MockitoAnnotations.openMocks(this);
|
||||
CallContext.register(Mockito.mock(User.class), Mockito.mock(Account.class));
|
||||
spy._accountMgr = accountManager;
|
||||
spy.userDataDao = userDataDao;
|
||||
spy.templateDao = templateDao;
|
||||
spy._userVmDao = userVmDao;
|
||||
spy.annotationDao = annotationDao;
|
||||
spy._detailsDao = hostDetailsDao;
|
||||
spy.userDataManager = userDataManager;
|
||||
|
||||
spy.setHostAllocators(List.of(hostAllocator));
|
||||
|
||||
// Mock ApiDBUtils static method
|
||||
apiDBUtilsMock = Mockito.mockStatic(ApiDBUtils.class);
|
||||
// Return empty list to avoid architecture filtering in most tests
|
||||
apiDBUtilsMock.when(() -> ApiDBUtils.listZoneClustersArchs(Mockito.anyLong()))
|
||||
.thenReturn(new ArrayList<>());
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
if (apiDBUtilsMock != null) {
|
||||
apiDBUtilsMock.close();
|
||||
}
|
||||
CallContext.unregister();
|
||||
closeable.close();
|
||||
}
|
||||
|
|
@ -345,7 +370,7 @@ public class ManagementServerImplTest {
|
|||
when(account.getAccountId()).thenReturn(1L);
|
||||
when(account.getDomainId()).thenReturn(2L);
|
||||
when(callContextMock.getCallingAccount()).thenReturn(account);
|
||||
when(_accountMgr.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account);
|
||||
when(accountManager.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account);
|
||||
|
||||
String testUserData = "testUserdata";
|
||||
RegisterUserDataCmd cmd = Mockito.mock(RegisterUserDataCmd.class);
|
||||
|
|
@ -353,8 +378,8 @@ public class ManagementServerImplTest {
|
|||
when(cmd.getName()).thenReturn("testName");
|
||||
when(cmd.getHttpMethod()).thenReturn(BaseCmd.HTTPMethod.GET);
|
||||
|
||||
when(_userDataDao.findByName(account.getAccountId(), account.getDomainId(), "testName")).thenReturn(null);
|
||||
when(_userDataDao.findByUserData(account.getAccountId(), account.getDomainId(), testUserData)).thenReturn(null);
|
||||
when(userDataDao.findByName(account.getAccountId(), account.getDomainId(), "testName")).thenReturn(null);
|
||||
when(userDataDao.findByUserData(account.getAccountId(), account.getDomainId(), testUserData)).thenReturn(null);
|
||||
when(userDataManager.validateUserData(testUserData, BaseCmd.HTTPMethod.GET)).thenReturn(testUserData);
|
||||
|
||||
UserData userData = spy.registerUserData(cmd);
|
||||
|
|
@ -373,15 +398,15 @@ public class ManagementServerImplTest {
|
|||
when(account.getAccountId()).thenReturn(1L);
|
||||
when(account.getDomainId()).thenReturn(2L);
|
||||
when(callContextMock.getCallingAccount()).thenReturn(account);
|
||||
when(_accountMgr.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account);
|
||||
when(accountManager.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account);
|
||||
|
||||
RegisterUserDataCmd cmd = Mockito.mock(RegisterUserDataCmd.class);
|
||||
when(cmd.getUserData()).thenReturn("testUserdata");
|
||||
when(cmd.getName()).thenReturn("testName");
|
||||
|
||||
UserDataVO userData = Mockito.mock(UserDataVO.class);
|
||||
when(_userDataDao.findByName(account.getAccountId(), account.getDomainId(), "testName")).thenReturn(null);
|
||||
when(_userDataDao.findByUserData(account.getAccountId(), account.getDomainId(), "testUserdata")).thenReturn(userData);
|
||||
when(userDataDao.findByName(account.getAccountId(), account.getDomainId(), "testName")).thenReturn(null);
|
||||
when(userDataDao.findByUserData(account.getAccountId(), account.getDomainId(), "testUserdata")).thenReturn(userData);
|
||||
|
||||
spy.registerUserData(cmd);
|
||||
}
|
||||
|
|
@ -395,13 +420,13 @@ public class ManagementServerImplTest {
|
|||
when(account.getAccountId()).thenReturn(1L);
|
||||
when(account.getDomainId()).thenReturn(2L);
|
||||
Mockito.when(callContextMock.getCallingAccount()).thenReturn(account);
|
||||
when(_accountMgr.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account);
|
||||
when(accountManager.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account);
|
||||
|
||||
RegisterUserDataCmd cmd = Mockito.mock(RegisterUserDataCmd.class);
|
||||
when(cmd.getName()).thenReturn("testName");
|
||||
|
||||
UserDataVO userData = Mockito.mock(UserDataVO.class);
|
||||
when(_userDataDao.findByName(account.getAccountId(), account.getDomainId(), "testName")).thenReturn(userData);
|
||||
when(userDataDao.findByName(account.getAccountId(), account.getDomainId(), "testName")).thenReturn(userData);
|
||||
|
||||
spy.registerUserData(cmd);
|
||||
}
|
||||
|
|
@ -413,7 +438,7 @@ public class ManagementServerImplTest {
|
|||
CallContext callContextMock = Mockito.mock(CallContext.class);
|
||||
when(CallContext.current()).thenReturn(callContextMock);
|
||||
when(callContextMock.getCallingAccount()).thenReturn(account);
|
||||
when(_accountMgr.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account);
|
||||
when(accountManager.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account);
|
||||
|
||||
DeleteUserDataCmd cmd = Mockito.mock(DeleteUserDataCmd.class);
|
||||
when(cmd.getAccountName()).thenReturn("testAccountName");
|
||||
|
|
@ -423,10 +448,10 @@ public class ManagementServerImplTest {
|
|||
UserDataVO userData = Mockito.mock(UserDataVO.class);
|
||||
|
||||
Mockito.when(userData.getId()).thenReturn(1L);
|
||||
when(_userDataDao.findById(1L)).thenReturn(userData);
|
||||
when(userDataDao.findById(1L)).thenReturn(userData);
|
||||
when(templateDao.findTemplatesLinkedToUserdata(1L)).thenReturn(new ArrayList<VMTemplateVO>());
|
||||
when(_userVmDao.findByUserDataId(1L)).thenReturn(new ArrayList<UserVmVO>());
|
||||
when(_userDataDao.remove(1L)).thenReturn(true);
|
||||
when(userVmDao.findByUserDataId(1L)).thenReturn(new ArrayList<UserVmVO>());
|
||||
when(userDataDao.remove(1L)).thenReturn(true);
|
||||
|
||||
boolean result = spy.deleteUserData(cmd);
|
||||
Assert.assertEquals(true, result);
|
||||
|
|
@ -439,7 +464,7 @@ public class ManagementServerImplTest {
|
|||
CallContext callContextMock = Mockito.mock(CallContext.class);
|
||||
when(CallContext.current()).thenReturn(callContextMock);
|
||||
when(callContextMock.getCallingAccount()).thenReturn(account);
|
||||
when(_accountMgr.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account);
|
||||
when(accountManager.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account);
|
||||
|
||||
DeleteUserDataCmd cmd = Mockito.mock(DeleteUserDataCmd.class);
|
||||
when(cmd.getAccountName()).thenReturn("testAccountName");
|
||||
|
|
@ -449,7 +474,7 @@ public class ManagementServerImplTest {
|
|||
|
||||
UserDataVO userData = Mockito.mock(UserDataVO.class);
|
||||
Mockito.when(userData.getId()).thenReturn(1L);
|
||||
when(_userDataDao.findById(1L)).thenReturn(userData);
|
||||
when(userDataDao.findById(1L)).thenReturn(userData);
|
||||
|
||||
VMTemplateVO vmTemplateVO = Mockito.mock(VMTemplateVO.class);
|
||||
List<VMTemplateVO> linkedTemplates = new ArrayList<>();
|
||||
|
|
@ -466,7 +491,7 @@ public class ManagementServerImplTest {
|
|||
CallContext callContextMock = Mockito.mock(CallContext.class);
|
||||
when(CallContext.current()).thenReturn(callContextMock);
|
||||
when(callContextMock.getCallingAccount()).thenReturn(account);
|
||||
when(_accountMgr.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account);
|
||||
when(accountManager.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account);
|
||||
|
||||
DeleteUserDataCmd cmd = Mockito.mock(DeleteUserDataCmd.class);
|
||||
when(cmd.getAccountName()).thenReturn("testAccountName");
|
||||
|
|
@ -476,14 +501,14 @@ public class ManagementServerImplTest {
|
|||
|
||||
UserDataVO userData = Mockito.mock(UserDataVO.class);
|
||||
Mockito.when(userData.getId()).thenReturn(1L);
|
||||
when(_userDataDao.findById(1L)).thenReturn(userData);
|
||||
when(userDataDao.findById(1L)).thenReturn(userData);
|
||||
|
||||
when(templateDao.findTemplatesLinkedToUserdata(1L)).thenReturn(new ArrayList<VMTemplateVO>());
|
||||
|
||||
UserVmVO userVmVO = Mockito.mock(UserVmVO.class);
|
||||
List<UserVmVO> vms = new ArrayList<>();
|
||||
vms.add(userVmVO);
|
||||
when(_userVmDao.findByUserDataId(1L)).thenReturn(vms);
|
||||
when(userVmDao.findByUserDataId(1L)).thenReturn(vms);
|
||||
|
||||
spy.deleteUserData(cmd);
|
||||
}
|
||||
|
|
@ -504,7 +529,7 @@ public class ManagementServerImplTest {
|
|||
UserDataVO userData = Mockito.mock(UserDataVO.class);
|
||||
|
||||
SearchBuilder<UserDataVO> sb = Mockito.mock(SearchBuilder.class);
|
||||
when(_userDataDao.createSearchBuilder()).thenReturn(sb);
|
||||
when(userDataDao.createSearchBuilder()).thenReturn(sb);
|
||||
when(sb.entity()).thenReturn(userData);
|
||||
|
||||
SearchCriteria<UserDataVO> sc = Mockito.mock(SearchCriteria.class);
|
||||
|
|
@ -513,7 +538,7 @@ public class ManagementServerImplTest {
|
|||
List<UserDataVO> userDataList = new ArrayList<UserDataVO>();
|
||||
userDataList.add(userData);
|
||||
Pair<List<UserDataVO>, Integer> result = new Pair(userDataList, 1);
|
||||
when(_userDataDao.searchAndCount(nullable(SearchCriteria.class), nullable(Filter.class))).thenReturn(result);
|
||||
when(userDataDao.searchAndCount(nullable(SearchCriteria.class), nullable(Filter.class))).thenReturn(result);
|
||||
|
||||
Pair<List<? extends UserData>, Integer> userdataResultList = spy.listUserDatas(cmd, false);
|
||||
|
||||
|
|
@ -537,7 +562,7 @@ public class ManagementServerImplTest {
|
|||
UserDataVO userData = Mockito.mock(UserDataVO.class);
|
||||
|
||||
SearchBuilder<UserDataVO> sb = Mockito.mock(SearchBuilder.class);
|
||||
when(_userDataDao.createSearchBuilder()).thenReturn(sb);
|
||||
when(userDataDao.createSearchBuilder()).thenReturn(sb);
|
||||
when(sb.entity()).thenReturn(userData);
|
||||
|
||||
SearchCriteria<UserDataVO> sc = Mockito.mock(SearchCriteria.class);
|
||||
|
|
@ -546,7 +571,7 @@ public class ManagementServerImplTest {
|
|||
List<UserDataVO> userDataList = new ArrayList<UserDataVO>();
|
||||
userDataList.add(userData);
|
||||
Pair<List<UserDataVO>, Integer> result = new Pair(userDataList, 1);
|
||||
when(_userDataDao.searchAndCount(nullable(SearchCriteria.class), nullable(Filter.class))).thenReturn(result);
|
||||
when(userDataDao.searchAndCount(nullable(SearchCriteria.class), nullable(Filter.class))).thenReturn(result);
|
||||
|
||||
Pair<List<? extends UserData>, Integer> userdataResultList = spy.listUserDatas(cmd, false);
|
||||
|
||||
|
|
@ -570,7 +595,7 @@ public class ManagementServerImplTest {
|
|||
UserDataVO userData = Mockito.mock(UserDataVO.class);
|
||||
|
||||
SearchBuilder<UserDataVO> sb = Mockito.mock(SearchBuilder.class);
|
||||
when(_userDataDao.createSearchBuilder()).thenReturn(sb);
|
||||
when(userDataDao.createSearchBuilder()).thenReturn(sb);
|
||||
when(sb.entity()).thenReturn(userData);
|
||||
|
||||
SearchCriteria<UserDataVO> sc = Mockito.mock(SearchCriteria.class);
|
||||
|
|
@ -579,7 +604,7 @@ public class ManagementServerImplTest {
|
|||
List<UserDataVO> userDataList = new ArrayList<UserDataVO>();
|
||||
userDataList.add(userData);
|
||||
Pair<List<UserDataVO>, Integer> result = new Pair(userDataList, 1);
|
||||
when(_userDataDao.searchAndCount(nullable(SearchCriteria.class), nullable(Filter.class))).thenReturn(result);
|
||||
when(userDataDao.searchAndCount(nullable(SearchCriteria.class), nullable(Filter.class))).thenReturn(result);
|
||||
|
||||
Pair<List<? extends UserData>, Integer> userdataResultList = spy.listUserDatas(cmd, false);
|
||||
|
||||
|
|
@ -764,12 +789,12 @@ public class ManagementServerImplTest {
|
|||
boolean featured = true;
|
||||
Mockito.when(addCmd.getName()).thenReturn(name);
|
||||
Mockito.when(addCmd.isFeatured()).thenReturn(featured);
|
||||
Mockito.doAnswer((Answer<GuestOSCategoryVO>) invocation -> (GuestOSCategoryVO)invocation.getArguments()[0]).when(_guestOSCategoryDao).persist(Mockito.any(GuestOSCategoryVO.class));
|
||||
Mockito.doAnswer((Answer<GuestOSCategoryVO>) invocation -> (GuestOSCategoryVO)invocation.getArguments()[0]).when(guestOSCategoryDao).persist(Mockito.any(GuestOSCategoryVO.class));
|
||||
GuestOsCategory result = spy.addGuestOsCategory(addCmd);
|
||||
Assert.assertNotNull(result);
|
||||
Assert.assertEquals(name, result.getName());
|
||||
Assert.assertEquals(featured, result.isFeatured());
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).persist(any(GuestOSCategoryVO.class));
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.times(1)).persist(any(GuestOSCategoryVO.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -784,14 +809,14 @@ public class ManagementServerImplTest {
|
|||
Mockito.when(updateCmd.getName()).thenReturn(name);
|
||||
Mockito.when(updateCmd.isFeatured()).thenReturn(featured);
|
||||
Mockito.when(updateCmd.getSortKey()).thenReturn(sortKey);
|
||||
Mockito.when(_guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory);
|
||||
Mockito.when(_guestOSCategoryDao.update(Mockito.eq(id), any(GuestOSCategoryVO.class))).thenReturn(true);
|
||||
Mockito.when(guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory);
|
||||
Mockito.when(guestOSCategoryDao.update(Mockito.eq(id), any(GuestOSCategoryVO.class))).thenReturn(true);
|
||||
GuestOsCategory result = spy.updateGuestOsCategory(updateCmd);
|
||||
Assert.assertNotNull(result);
|
||||
Assert.assertEquals(name, result.getName());
|
||||
Assert.assertEquals(featured, result.isFeatured());
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).findById(id);
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).update(Mockito.eq(id), any(GuestOSCategoryVO.class));
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.times(1)).findById(id);
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.times(1)).update(Mockito.eq(id), any(GuestOSCategoryVO.class));
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
|
|
@ -799,7 +824,7 @@ public class ManagementServerImplTest {
|
|||
UpdateGuestOsCategoryCmd updateCmd = Mockito.mock(UpdateGuestOsCategoryCmd.class);
|
||||
long id = 1L;
|
||||
when(updateCmd.getId()).thenReturn(id);
|
||||
when(_guestOSCategoryDao.findById(id)).thenReturn(null);
|
||||
when(guestOSCategoryDao.findById(id)).thenReturn(null);
|
||||
spy.updateGuestOsCategory(updateCmd);
|
||||
}
|
||||
|
||||
|
|
@ -812,13 +837,13 @@ public class ManagementServerImplTest {
|
|||
when(updateCmd.getName()).thenReturn(null);
|
||||
when(updateCmd.isFeatured()).thenReturn(null);
|
||||
when(updateCmd.getSortKey()).thenReturn(null);
|
||||
when(_guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory);
|
||||
when(guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory);
|
||||
GuestOsCategory result = spy.updateGuestOsCategory(updateCmd);
|
||||
Assert.assertNotNull(result);
|
||||
Assert.assertNotNull(result.getName());
|
||||
Assert.assertFalse(result.isFeatured());
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).findById(id);
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.never()).update(Mockito.eq(id), any(GuestOSCategoryVO.class));
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.times(1)).findById(id);
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.never()).update(Mockito.eq(id), any(GuestOSCategoryVO.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -831,14 +856,14 @@ public class ManagementServerImplTest {
|
|||
Mockito.when(updateCmd.getName()).thenReturn(name);
|
||||
Mockito.when(updateCmd.isFeatured()).thenReturn(null);
|
||||
Mockito.when(updateCmd.getSortKey()).thenReturn(null);
|
||||
Mockito.when(_guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory);
|
||||
Mockito.when(_guestOSCategoryDao.update(Mockito.eq(id), any(GuestOSCategoryVO.class))).thenReturn(true);
|
||||
Mockito.when(guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory);
|
||||
Mockito.when(guestOSCategoryDao.update(Mockito.eq(id), any(GuestOSCategoryVO.class))).thenReturn(true);
|
||||
GuestOsCategory result = spy.updateGuestOsCategory(updateCmd);
|
||||
Assert.assertNotNull(result);
|
||||
Assert.assertEquals(name, result.getName());
|
||||
Assert.assertFalse(result.isFeatured());
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).findById(id);
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).update(Mockito.eq(id), any(GuestOSCategoryVO.class));
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.times(1)).findById(id);
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.times(1)).update(Mockito.eq(id), any(GuestOSCategoryVO.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -847,14 +872,14 @@ public class ManagementServerImplTest {
|
|||
GuestOSCategoryVO guestOSCategory = Mockito.mock(GuestOSCategoryVO.class);
|
||||
long id = 1L;
|
||||
Mockito.when(deleteCmd.getId()).thenReturn(id);
|
||||
Mockito.when(_guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory);
|
||||
Mockito.when(_guestOSDao.listIdsByCategoryId(id)).thenReturn(Arrays.asList());
|
||||
Mockito.when(_guestOSCategoryDao.remove(id)).thenReturn(true);
|
||||
Mockito.when(guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory);
|
||||
Mockito.when(guestOSDao.listIdsByCategoryId(id)).thenReturn(Arrays.asList());
|
||||
Mockito.when(guestOSCategoryDao.remove(id)).thenReturn(true);
|
||||
boolean result = spy.deleteGuestOsCategory(deleteCmd);
|
||||
Assert.assertTrue(result);
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).findById(id);
|
||||
Mockito.verify(_guestOSDao, Mockito.times(1)).listIdsByCategoryId(id);
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).remove(id);
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.times(1)).findById(id);
|
||||
Mockito.verify(guestOSDao, Mockito.times(1)).listIdsByCategoryId(id);
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.times(1)).remove(id);
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
|
|
@ -862,7 +887,7 @@ public class ManagementServerImplTest {
|
|||
DeleteGuestOsCategoryCmd deleteCmd = Mockito.mock(DeleteGuestOsCategoryCmd.class);
|
||||
long id = 1L;
|
||||
Mockito.when(deleteCmd.getId()).thenReturn(id);
|
||||
Mockito.when(_guestOSCategoryDao.findById(id)).thenReturn(null);
|
||||
Mockito.when(guestOSCategoryDao.findById(id)).thenReturn(null);
|
||||
spy.deleteGuestOsCategory(deleteCmd);
|
||||
}
|
||||
|
||||
|
|
@ -872,8 +897,8 @@ public class ManagementServerImplTest {
|
|||
GuestOSCategoryVO guestOSCategory = Mockito.mock(GuestOSCategoryVO.class);
|
||||
long id = 1L;
|
||||
Mockito.when(deleteCmd.getId()).thenReturn(id);
|
||||
Mockito.when(_guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory);
|
||||
Mockito.when(_guestOSDao.listIdsByCategoryId(id)).thenReturn(Arrays.asList(1L));
|
||||
Mockito.when(guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory);
|
||||
Mockito.when(guestOSDao.listIdsByCategoryId(id)).thenReturn(Arrays.asList(1L));
|
||||
spy.deleteGuestOsCategory(deleteCmd);
|
||||
}
|
||||
|
||||
|
|
@ -881,7 +906,7 @@ public class ManagementServerImplTest {
|
|||
GuestOSVO vo = mock(GuestOSVO.class);
|
||||
SearchBuilder<GuestOSVO> sb = mock(SearchBuilder.class);
|
||||
when(sb.entity()).thenReturn(vo);
|
||||
when(_guestOSDao.createSearchBuilder()).thenReturn(sb);
|
||||
when(guestOSDao.createSearchBuilder()).thenReturn(sb);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -908,19 +933,19 @@ public class ManagementServerImplTest {
|
|||
SearchBuilder<GuestOSCategoryVO> searchBuilder = Mockito.mock(SearchBuilder.class);
|
||||
Mockito.when(searchBuilder.entity()).thenReturn(guestOSCategory);
|
||||
SearchCriteria<GuestOSCategoryVO> searchCriteria = Mockito.mock(SearchCriteria.class);
|
||||
Mockito.when(_guestOSCategoryDao.createSearchBuilder()).thenReturn(searchBuilder);
|
||||
Mockito.when(guestOSCategoryDao.createSearchBuilder()).thenReturn(searchBuilder);
|
||||
Mockito.when(searchBuilder.create()).thenReturn(searchCriteria);
|
||||
Mockito.when(templateDao.listTemplateIsoByArchVnfAndZone(zoneId, arch, isIso, isVnf)).thenReturn(Arrays.asList(1L, 2L));
|
||||
Pair<List<GuestOSCategoryVO>, Integer> mockResult = new Pair<>(Arrays.asList(guestOSCategory), 1);
|
||||
mockGuestOsJoin();
|
||||
Mockito.when(_guestOSCategoryDao.searchAndCount(Mockito.eq(searchCriteria), Mockito.any())).thenReturn(mockResult);
|
||||
Mockito.when(guestOSCategoryDao.searchAndCount(Mockito.eq(searchCriteria), Mockito.any())).thenReturn(mockResult);
|
||||
Pair<List<? extends GuestOsCategory>, Integer> result = spy.listGuestOSCategoriesByCriteria(listCmd);
|
||||
Assert.assertNotNull(result);
|
||||
Assert.assertEquals(1, result.second().intValue());
|
||||
Assert.assertEquals(1, result.first().size());
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).createSearchBuilder();
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.times(1)).createSearchBuilder();
|
||||
Mockito.verify(templateDao, Mockito.times(1)).listTemplateIsoByArchVnfAndZone(zoneId, arch, isIso, isVnf);
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).searchAndCount(Mockito.eq(searchCriteria), Mockito.any());
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.times(1)).searchAndCount(Mockito.eq(searchCriteria), Mockito.any());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -946,19 +971,19 @@ public class ManagementServerImplTest {
|
|||
SearchBuilder<GuestOSCategoryVO> searchBuilder = Mockito.mock(SearchBuilder.class);
|
||||
Mockito.when(searchBuilder.entity()).thenReturn(guestOSCategory);
|
||||
SearchCriteria<GuestOSCategoryVO> searchCriteria = Mockito.mock(SearchCriteria.class);
|
||||
Mockito.when(_guestOSCategoryDao.createSearchBuilder()).thenReturn(searchBuilder);
|
||||
Mockito.when(guestOSCategoryDao.createSearchBuilder()).thenReturn(searchBuilder);
|
||||
Mockito.when(searchBuilder.create()).thenReturn(searchCriteria);
|
||||
Mockito.when(templateDao.listTemplateIsoByArchVnfAndZone(zoneId, arch, isIso, isVnf)).thenReturn(Arrays.asList(1L, 2L));
|
||||
Pair<List<GuestOSCategoryVO>, Integer> mockResult = new Pair<>(Arrays.asList(), 0);
|
||||
Mockito.when(_guestOSCategoryDao.searchAndCount(Mockito.eq(searchCriteria), Mockito.any())).thenReturn(mockResult);
|
||||
Mockito.when(guestOSCategoryDao.searchAndCount(Mockito.eq(searchCriteria), Mockito.any())).thenReturn(mockResult);
|
||||
mockGuestOsJoin();
|
||||
Pair<List<? extends GuestOsCategory>, Integer> result = spy.listGuestOSCategoriesByCriteria(listCmd);
|
||||
Assert.assertNotNull(result);
|
||||
Assert.assertEquals(0, result.second().intValue());
|
||||
Assert.assertEquals(0, result.first().size());
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).createSearchBuilder();
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.times(1)).createSearchBuilder();
|
||||
Mockito.verify(templateDao, Mockito.times(1)).listTemplateIsoByArchVnfAndZone(zoneId, arch, isIso, isVnf);
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).searchAndCount(Mockito.eq(searchCriteria), Mockito.any());
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.times(1)).searchAndCount(Mockito.eq(searchCriteria), Mockito.any());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -984,19 +1009,19 @@ public class ManagementServerImplTest {
|
|||
SearchBuilder<GuestOSCategoryVO> searchBuilder = Mockito.mock(SearchBuilder.class);
|
||||
Mockito.when(searchBuilder.entity()).thenReturn(guestOSCategory);
|
||||
SearchCriteria<GuestOSCategoryVO> searchCriteria = Mockito.mock(SearchCriteria.class);
|
||||
Mockito.when(_guestOSCategoryDao.createSearchBuilder()).thenReturn(searchBuilder);
|
||||
Mockito.when(guestOSCategoryDao.createSearchBuilder()).thenReturn(searchBuilder);
|
||||
Mockito.when(searchBuilder.create()).thenReturn(searchCriteria);
|
||||
Mockito.when(templateDao.listTemplateIsoByArchVnfAndZone(zoneId, arch, isIso, isVnf)).thenReturn(Arrays.asList(1L, 2L));
|
||||
Pair<List<GuestOSCategoryVO>, Integer> mockResult = new Pair<>(Arrays.asList(), 0);
|
||||
when(_guestOSCategoryDao.searchAndCount(Mockito.eq(searchCriteria), Mockito.any())).thenReturn(mockResult);
|
||||
when(guestOSCategoryDao.searchAndCount(Mockito.eq(searchCriteria), Mockito.any())).thenReturn(mockResult);
|
||||
mockGuestOsJoin();
|
||||
Pair<List<? extends GuestOsCategory>, Integer> result = spy.listGuestOSCategoriesByCriteria(listCmd);
|
||||
Assert.assertNotNull(result);
|
||||
Assert.assertEquals(0, result.second().intValue());
|
||||
Assert.assertEquals(0, result.first().size());
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).createSearchBuilder();
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.times(1)).createSearchBuilder();
|
||||
Mockito.verify(templateDao, Mockito.times(1)).listTemplateIsoByArchVnfAndZone(zoneId, arch, isIso, isVnf);
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).searchAndCount(Mockito.eq(searchCriteria), Mockito.any());
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.times(1)).searchAndCount(Mockito.eq(searchCriteria), Mockito.any());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -1011,18 +1036,18 @@ public class ManagementServerImplTest {
|
|||
SearchBuilder<GuestOSCategoryVO> searchBuilder = Mockito.mock(SearchBuilder.class);
|
||||
Mockito.when(searchBuilder.entity()).thenReturn(guestOSCategory);
|
||||
SearchCriteria<GuestOSCategoryVO> searchCriteria = Mockito.mock(SearchCriteria.class);
|
||||
Mockito.when(_guestOSCategoryDao.createSearchBuilder()).thenReturn(searchBuilder);
|
||||
Mockito.when(guestOSCategoryDao.createSearchBuilder()).thenReturn(searchBuilder);
|
||||
Mockito.when(searchBuilder.create()).thenReturn(searchCriteria);
|
||||
Pair<List<GuestOSCategoryVO>, Integer> mockResult = new Pair<>(Arrays.asList(guestOSCategory), 1);
|
||||
Mockito.when(_guestOSCategoryDao.searchAndCount(Mockito.eq(searchCriteria), Mockito.any())).thenReturn(mockResult);
|
||||
Mockito.when(guestOSCategoryDao.searchAndCount(Mockito.eq(searchCriteria), Mockito.any())).thenReturn(mockResult);
|
||||
mockGuestOsJoin();
|
||||
Pair<List<? extends GuestOsCategory>, Integer> result = spy.listGuestOSCategoriesByCriteria(listCmd);
|
||||
Assert.assertNotNull(result);
|
||||
Assert.assertEquals(1, result.second().intValue());
|
||||
Assert.assertEquals(1, result.first().size());
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).createSearchBuilder();
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.times(1)).createSearchBuilder();
|
||||
Mockito.verify(searchCriteria, Mockito.times(1)).setParameters("id", id);
|
||||
Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).searchAndCount(Mockito.eq(searchCriteria), Mockito.any());
|
||||
Mockito.verify(guestOSCategoryDao, Mockito.times(1)).searchAndCount(Mockito.eq(searchCriteria), Mockito.any());
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -1034,49 +1059,4 @@ public class ManagementServerImplTest {
|
|||
Assert.assertNotNull(spy.getExternalVmConsole(virtualMachine, host));
|
||||
Mockito.verify(extensionManager).getInstanceConsole(virtualMachine, host);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getStatesForIpAddressSearchReturnsValidStates() {
|
||||
ListPublicIpAddressesCmd cmd = Mockito.mock(ListPublicIpAddressesCmd.class);
|
||||
Mockito.when(cmd.getState()).thenReturn("Allocated ,free");
|
||||
List<IpAddress.State> result = spy.getStatesForIpAddressSearch(cmd);
|
||||
Assert.assertEquals(2, result.size());
|
||||
Assert.assertTrue(result.contains(IpAddress.State.Allocated));
|
||||
Assert.assertTrue(result.contains(IpAddress.State.Free));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getStatesForIpAddressSearchReturnsEmptyListForNullState() {
|
||||
ListPublicIpAddressesCmd cmd = Mockito.mock(ListPublicIpAddressesCmd.class);
|
||||
Mockito.when(cmd.getState()).thenReturn(null);
|
||||
List<IpAddress.State> result = spy.getStatesForIpAddressSearch(cmd);
|
||||
Assert.assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getStatesForIpAddressSearchReturnsEmptyListForBlankState() {
|
||||
ListPublicIpAddressesCmd cmd = Mockito.mock(ListPublicIpAddressesCmd.class);
|
||||
Mockito.when(cmd.getState()).thenReturn(" ");
|
||||
List<IpAddress.State> result = spy.getStatesForIpAddressSearch(cmd);
|
||||
Assert.assertTrue(result.isEmpty());
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void getStatesForIpAddressSearchThrowsExceptionForInvalidState() {
|
||||
ListPublicIpAddressesCmd cmd = Mockito.mock(ListPublicIpAddressesCmd.class);
|
||||
Mockito.when(cmd.getState()).thenReturn("InvalidState");
|
||||
spy.getStatesForIpAddressSearch(cmd);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getStatesForIpAddressSearchHandlesMixedValidAndInvalidStates() {
|
||||
ListPublicIpAddressesCmd cmd = Mockito.mock(ListPublicIpAddressesCmd.class);
|
||||
Mockito.when(cmd.getState()).thenReturn("Allocated,InvalidState");
|
||||
try {
|
||||
spy.getStatesForIpAddressSearch(cmd);
|
||||
Assert.fail("Expected InvalidParameterValueException to be thrown");
|
||||
} catch (InvalidParameterValueException e) {
|
||||
Assert.assertEquals("Invalid state: InvalidState", e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1368,6 +1368,22 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase {
|
|||
accountManagerImpl.validateRoleChange(account, newRole, caller);
|
||||
}
|
||||
|
||||
@Test(expected = PermissionDeniedException.class)
|
||||
public void testValidateRoleAdminCannotChangeDefaultAdmin() {
|
||||
Account account = Mockito.mock(Account.class);
|
||||
Mockito.when(account.isDefault()).thenReturn(true);
|
||||
Mockito.when(account.getRoleId()).thenReturn(1L);
|
||||
Role newRole = Mockito.mock(Role.class);
|
||||
Mockito.when(newRole.getRoleType()).thenReturn(RoleType.User);
|
||||
Role callerRole = Mockito.mock(Role.class);
|
||||
Mockito.when(callerRole.getRoleType()).thenReturn(RoleType.Admin);
|
||||
Account caller = Mockito.mock(Account.class);
|
||||
Mockito.when(caller.getRoleId()).thenReturn(2L);
|
||||
Mockito.when(roleService.findRole(1L)).thenReturn(Mockito.mock(Role.class));
|
||||
Mockito.when(roleService.findRole(2L)).thenReturn(callerRole);
|
||||
accountManagerImpl.validateRoleChange(account, newRole, caller);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void checkIfAccountManagesProjectsTestNotThrowExceptionWhenTheAccountIsNotAProjectAdministrator() {
|
||||
long accountId = 1L;
|
||||
|
|
|
|||
|
|
@ -42,7 +42,9 @@ import com.cloud.utils.db.GlobalLock;
|
|||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VmDetailConstants;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
|
||||
import org.apache.cloudstack.api.command.admin.cluster.GenerateClusterDrsPlanCmd;
|
||||
import org.apache.cloudstack.api.response.ClusterDrsPlanMigrationResponse;
|
||||
import org.apache.cloudstack.api.response.ClusterDrsPlanResponse;
|
||||
|
|
@ -116,6 +118,9 @@ public class ClusterDrsServiceImplTest {
|
|||
@Mock
|
||||
private VMInstanceDao vmInstanceDao;
|
||||
|
||||
@Mock
|
||||
private AffinityGroupVMMapDao affinityGroupVMMapDao;
|
||||
|
||||
@Spy
|
||||
@InjectMocks
|
||||
private ClusterDrsServiceImpl clusterDrsService = new ClusterDrsServiceImpl();
|
||||
|
|
@ -168,9 +173,14 @@ public class ClusterDrsServiceImplTest {
|
|||
VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class);
|
||||
Mockito.when(vm1.getId()).thenReturn(1L);
|
||||
Mockito.when(vm1.getHostId()).thenReturn(1L);
|
||||
Mockito.when(vm1.getType()).thenReturn(VirtualMachine.Type.User);
|
||||
Mockito.when(vm1.getState()).thenReturn(VirtualMachine.State.Running);
|
||||
|
||||
VMInstanceVO vm2 = Mockito.mock(VMInstanceVO.class);
|
||||
Mockito.when(vm2.getHostId()).thenReturn(2L);
|
||||
Mockito.when(vm2.getId()).thenReturn(2L);
|
||||
Mockito.when(vm2.getType()).thenReturn(VirtualMachine.Type.User);
|
||||
Mockito.when(vm2.getState()).thenReturn(VirtualMachine.State.Running);
|
||||
|
||||
List<HostVO> hostList = new ArrayList<>();
|
||||
hostList.add(host1);
|
||||
|
|
@ -201,10 +211,11 @@ public class ClusterDrsServiceImplTest {
|
|||
Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList);
|
||||
Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn(
|
||||
true, false);
|
||||
Mockito.when(
|
||||
clusterDrsService.getBestMigration(Mockito.any(Cluster.class), Mockito.any(ClusterDrsAlgorithm.class),
|
||||
Mockito.anyList(), Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap())).thenReturn(
|
||||
new Pair<>(vm1, host2));
|
||||
|
||||
Mockito.doReturn(new Pair<>(vm1, host2)).when(clusterDrsService).getBestMigration(
|
||||
Mockito.any(Cluster.class), Mockito.any(ClusterDrsAlgorithm.class),
|
||||
Mockito.anyList(), Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap(),
|
||||
Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap());
|
||||
Mockito.when(serviceOfferingDao.findByIdIncludingRemoved(Mockito.anyLong(), Mockito.anyLong())).thenReturn(
|
||||
serviceOffering);
|
||||
Mockito.when(hostJoinDao.searchByIds(host1.getId(), host2.getId())).thenReturn(List.of(hostJoin1, hostJoin2));
|
||||
|
|
@ -219,6 +230,420 @@ public class ClusterDrsServiceImplTest {
|
|||
assertEquals(1, iterations.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDrsPlanWithDisabledCluster() throws ConfigurationException {
|
||||
ClusterVO cluster = Mockito.mock(ClusterVO.class);
|
||||
Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Disabled);
|
||||
|
||||
List<Ternary<VirtualMachine, Host, Host>> result = clusterDrsService.getDrsPlan(cluster, 5);
|
||||
assertEquals(0, result.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDrsPlanWithZeroMaxIterations() throws ConfigurationException {
|
||||
ClusterVO cluster = Mockito.mock(ClusterVO.class);
|
||||
Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled);
|
||||
|
||||
List<Ternary<VirtualMachine, Host, Host>> result = clusterDrsService.getDrsPlan(cluster, 0);
|
||||
assertEquals(0, result.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDrsPlanWithNegativeMaxIterations() throws ConfigurationException {
|
||||
ClusterVO cluster = Mockito.mock(ClusterVO.class);
|
||||
Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled);
|
||||
|
||||
List<Ternary<VirtualMachine, Host, Host>> result = clusterDrsService.getDrsPlan(cluster, -1);
|
||||
assertEquals(0, result.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDrsPlanWithSystemVMs() throws ConfigurationException {
|
||||
ClusterVO cluster = Mockito.mock(ClusterVO.class);
|
||||
Mockito.when(cluster.getId()).thenReturn(1L);
|
||||
Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled);
|
||||
|
||||
HostVO host1 = Mockito.mock(HostVO.class);
|
||||
Mockito.when(host1.getId()).thenReturn(1L);
|
||||
|
||||
VMInstanceVO systemVm = Mockito.mock(VMInstanceVO.class);
|
||||
Mockito.when(systemVm.getId()).thenReturn(1L);
|
||||
Mockito.when(systemVm.getHostId()).thenReturn(1L);
|
||||
Mockito.when(systemVm.getType()).thenReturn(VirtualMachine.Type.SecondaryStorageVm);
|
||||
|
||||
List<HostVO> hostList = new ArrayList<>();
|
||||
hostList.add(host1);
|
||||
|
||||
List<VMInstanceVO> vmList = new ArrayList<>();
|
||||
vmList.add(systemVm);
|
||||
|
||||
HostJoinVO hostJoin1 = Mockito.mock(HostJoinVO.class);
|
||||
Mockito.when(hostJoin1.getId()).thenReturn(1L);
|
||||
Mockito.when(hostJoin1.getCpuUsedCapacity()).thenReturn(1000L);
|
||||
Mockito.when(hostJoin1.getCpuReservedCapacity()).thenReturn(0L);
|
||||
Mockito.when(hostJoin1.getCpus()).thenReturn(4);
|
||||
Mockito.when(hostJoin1.getSpeed()).thenReturn(1000L);
|
||||
Mockito.when(hostJoin1.getMemUsedCapacity()).thenReturn(1024L);
|
||||
Mockito.when(hostJoin1.getMemReservedCapacity()).thenReturn(0L);
|
||||
Mockito.when(hostJoin1.getTotalMemory()).thenReturn(8192L);
|
||||
|
||||
Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList);
|
||||
Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList);
|
||||
Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn(true);
|
||||
Mockito.when(hostJoinDao.searchByIds(Mockito.any())).thenReturn(List.of(hostJoin1));
|
||||
|
||||
List<Ternary<VirtualMachine, Host, Host>> result = clusterDrsService.getDrsPlan(cluster, 5);
|
||||
assertEquals(0, result.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDrsPlanWithNonRunningVMs() throws ConfigurationException {
|
||||
ClusterVO cluster = Mockito.mock(ClusterVO.class);
|
||||
Mockito.when(cluster.getId()).thenReturn(1L);
|
||||
Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled);
|
||||
|
||||
HostVO host1 = Mockito.mock(HostVO.class);
|
||||
Mockito.when(host1.getId()).thenReturn(1L);
|
||||
|
||||
VMInstanceVO stoppedVm = Mockito.mock(VMInstanceVO.class);
|
||||
Mockito.when(stoppedVm.getId()).thenReturn(1L);
|
||||
Mockito.when(stoppedVm.getHostId()).thenReturn(1L);
|
||||
Mockito.when(stoppedVm.getType()).thenReturn(VirtualMachine.Type.User);
|
||||
Mockito.when(stoppedVm.getState()).thenReturn(VirtualMachine.State.Stopped);
|
||||
|
||||
List<HostVO> hostList = new ArrayList<>();
|
||||
hostList.add(host1);
|
||||
|
||||
List<VMInstanceVO> vmList = new ArrayList<>();
|
||||
vmList.add(stoppedVm);
|
||||
|
||||
HostJoinVO hostJoin1 = Mockito.mock(HostJoinVO.class);
|
||||
Mockito.when(hostJoin1.getId()).thenReturn(1L);
|
||||
Mockito.when(hostJoin1.getCpuUsedCapacity()).thenReturn(1000L);
|
||||
Mockito.when(hostJoin1.getCpuReservedCapacity()).thenReturn(0L);
|
||||
Mockito.when(hostJoin1.getCpus()).thenReturn(4);
|
||||
Mockito.when(hostJoin1.getSpeed()).thenReturn(1000L);
|
||||
Mockito.when(hostJoin1.getMemUsedCapacity()).thenReturn(1024L);
|
||||
Mockito.when(hostJoin1.getMemReservedCapacity()).thenReturn(0L);
|
||||
Mockito.when(hostJoin1.getTotalMemory()).thenReturn(8192L);
|
||||
|
||||
Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList);
|
||||
Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList);
|
||||
Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn(true);
|
||||
Mockito.when(hostJoinDao.searchByIds(Mockito.any())).thenReturn(List.of(hostJoin1));
|
||||
|
||||
List<Ternary<VirtualMachine, Host, Host>> result = clusterDrsService.getDrsPlan(cluster, 5);
|
||||
assertEquals(0, result.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDrsPlanWithSkipDrsFlag() throws ConfigurationException {
|
||||
ClusterVO cluster = Mockito.mock(ClusterVO.class);
|
||||
Mockito.when(cluster.getId()).thenReturn(1L);
|
||||
Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled);
|
||||
|
||||
HostVO host1 = Mockito.mock(HostVO.class);
|
||||
Mockito.when(host1.getId()).thenReturn(1L);
|
||||
|
||||
VMInstanceVO skippedVm = Mockito.mock(VMInstanceVO.class);
|
||||
Mockito.when(skippedVm.getId()).thenReturn(1L);
|
||||
Mockito.when(skippedVm.getHostId()).thenReturn(1L);
|
||||
Mockito.when(skippedVm.getType()).thenReturn(VirtualMachine.Type.User);
|
||||
Mockito.when(skippedVm.getState()).thenReturn(VirtualMachine.State.Running);
|
||||
Map<String, String> details = new HashMap<>();
|
||||
details.put(VmDetailConstants.SKIP_DRS, "true");
|
||||
Mockito.when(skippedVm.getDetails()).thenReturn(details);
|
||||
|
||||
List<HostVO> hostList = new ArrayList<>();
|
||||
hostList.add(host1);
|
||||
|
||||
List<VMInstanceVO> vmList = new ArrayList<>();
|
||||
vmList.add(skippedVm);
|
||||
|
||||
HostJoinVO hostJoin1 = Mockito.mock(HostJoinVO.class);
|
||||
Mockito.when(hostJoin1.getId()).thenReturn(1L);
|
||||
Mockito.when(hostJoin1.getCpuUsedCapacity()).thenReturn(1000L);
|
||||
Mockito.when(hostJoin1.getCpuReservedCapacity()).thenReturn(0L);
|
||||
Mockito.when(hostJoin1.getCpus()).thenReturn(4);
|
||||
Mockito.when(hostJoin1.getSpeed()).thenReturn(1000L);
|
||||
Mockito.when(hostJoin1.getMemUsedCapacity()).thenReturn(1024L);
|
||||
Mockito.when(hostJoin1.getMemReservedCapacity()).thenReturn(0L);
|
||||
Mockito.when(hostJoin1.getTotalMemory()).thenReturn(8192L);
|
||||
|
||||
Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList);
|
||||
Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList);
|
||||
Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn(true);
|
||||
Mockito.when(hostJoinDao.searchByIds(Mockito.any())).thenReturn(List.of(hostJoin1));
|
||||
|
||||
List<Ternary<VirtualMachine, Host, Host>> result = clusterDrsService.getDrsPlan(cluster, 5);
|
||||
assertEquals(0, result.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDrsPlanWithNoCompatibleHosts() throws ConfigurationException {
|
||||
ClusterVO cluster = Mockito.mock(ClusterVO.class);
|
||||
Mockito.when(cluster.getId()).thenReturn(1L);
|
||||
Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled);
|
||||
|
||||
HostVO host1 = Mockito.mock(HostVO.class);
|
||||
Mockito.when(host1.getId()).thenReturn(1L);
|
||||
|
||||
VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class);
|
||||
Mockito.when(vm1.getId()).thenReturn(1L);
|
||||
Mockito.when(vm1.getHostId()).thenReturn(1L);
|
||||
Mockito.when(vm1.getType()).thenReturn(VirtualMachine.Type.User);
|
||||
Mockito.when(vm1.getState()).thenReturn(VirtualMachine.State.Running);
|
||||
Mockito.when(vm1.getDetails()).thenReturn(Collections.emptyMap());
|
||||
|
||||
List<HostVO> hostList = new ArrayList<>();
|
||||
hostList.add(host1);
|
||||
|
||||
List<VMInstanceVO> vmList = new ArrayList<>();
|
||||
vmList.add(vm1);
|
||||
|
||||
HostJoinVO hostJoin1 = Mockito.mock(HostJoinVO.class);
|
||||
Mockito.when(hostJoin1.getId()).thenReturn(1L);
|
||||
Mockito.when(hostJoin1.getCpuUsedCapacity()).thenReturn(1000L);
|
||||
Mockito.when(hostJoin1.getCpuReservedCapacity()).thenReturn(0L);
|
||||
Mockito.when(hostJoin1.getCpus()).thenReturn(4);
|
||||
Mockito.when(hostJoin1.getSpeed()).thenReturn(1000L);
|
||||
Mockito.when(hostJoin1.getMemUsedCapacity()).thenReturn(1024L);
|
||||
Mockito.when(hostJoin1.getMemReservedCapacity()).thenReturn(0L);
|
||||
Mockito.when(hostJoin1.getTotalMemory()).thenReturn(8192L);
|
||||
|
||||
ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class);
|
||||
|
||||
Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList);
|
||||
Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList);
|
||||
Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn(true);
|
||||
Mockito.when(serviceOfferingDao.findByIdIncludingRemoved(Mockito.anyLong(), Mockito.anyLong())).thenReturn(serviceOffering);
|
||||
Mockito.when(hostJoinDao.searchByIds(Mockito.any())).thenReturn(List.of(hostJoin1));
|
||||
|
||||
List<Ternary<VirtualMachine, Host, Host>> result = clusterDrsService.getDrsPlan(cluster, 5);
|
||||
assertEquals(0, result.size());
|
||||
Mockito.verify(managementServer, Mockito.times(1)).listHostsForMigrationOfVM(Mockito.eq(vm1), Mockito.anyLong(), Mockito.anyLong(), Mockito.any(), Mockito.anyList());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDrsPlanWithExceptionInCompatibilityCheck() throws ConfigurationException {
|
||||
ClusterVO cluster = Mockito.mock(ClusterVO.class);
|
||||
Mockito.when(cluster.getId()).thenReturn(1L);
|
||||
Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled);
|
||||
|
||||
HostVO host1 = Mockito.mock(HostVO.class);
|
||||
Mockito.when(host1.getId()).thenReturn(1L);
|
||||
|
||||
VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class);
|
||||
Mockito.when(vm1.getId()).thenReturn(1L);
|
||||
Mockito.when(vm1.getHostId()).thenReturn(1L);
|
||||
Mockito.when(vm1.getType()).thenReturn(VirtualMachine.Type.User);
|
||||
Mockito.when(vm1.getState()).thenReturn(VirtualMachine.State.Running);
|
||||
Mockito.when(vm1.getDetails()).thenReturn(Collections.emptyMap());
|
||||
|
||||
List<HostVO> hostList = new ArrayList<>();
|
||||
hostList.add(host1);
|
||||
|
||||
List<VMInstanceVO> vmList = new ArrayList<>();
|
||||
vmList.add(vm1);
|
||||
|
||||
HostJoinVO hostJoin1 = Mockito.mock(HostJoinVO.class);
|
||||
Mockito.when(hostJoin1.getId()).thenReturn(1L);
|
||||
Mockito.when(hostJoin1.getCpuUsedCapacity()).thenReturn(1000L);
|
||||
Mockito.when(hostJoin1.getCpuReservedCapacity()).thenReturn(0L);
|
||||
Mockito.when(hostJoin1.getCpus()).thenReturn(4);
|
||||
Mockito.when(hostJoin1.getSpeed()).thenReturn(1000L);
|
||||
Mockito.when(hostJoin1.getMemUsedCapacity()).thenReturn(1024L);
|
||||
Mockito.when(hostJoin1.getMemReservedCapacity()).thenReturn(0L);
|
||||
Mockito.when(hostJoin1.getTotalMemory()).thenReturn(8192L);
|
||||
|
||||
ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class);
|
||||
|
||||
Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList);
|
||||
Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList);
|
||||
Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn(true);
|
||||
Mockito.when(serviceOfferingDao.findByIdIncludingRemoved(Mockito.anyLong(), Mockito.anyLong())).thenReturn(serviceOffering);
|
||||
Mockito.when(hostJoinDao.searchByIds(Mockito.any())).thenReturn(List.of(hostJoin1));
|
||||
|
||||
List<Ternary<VirtualMachine, Host, Host>> result = clusterDrsService.getDrsPlan(cluster, 5);
|
||||
assertEquals(0, result.size());
|
||||
// Exception should be caught and logged, not propagated
|
||||
Mockito.verify(managementServer, Mockito.times(1)).listHostsForMigrationOfVM(Mockito.eq(vm1), Mockito.anyLong(), Mockito.anyLong(), Mockito.any(), Mockito.anyList());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDrsPlanWithNoBestMigration() throws ConfigurationException {
|
||||
ClusterVO cluster = Mockito.mock(ClusterVO.class);
|
||||
Mockito.when(cluster.getId()).thenReturn(1L);
|
||||
Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled);
|
||||
|
||||
HostVO host1 = Mockito.mock(HostVO.class);
|
||||
Mockito.when(host1.getId()).thenReturn(1L);
|
||||
|
||||
VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class);
|
||||
Mockito.when(vm1.getId()).thenReturn(1L);
|
||||
Mockito.when(vm1.getHostId()).thenReturn(1L);
|
||||
Mockito.when(vm1.getType()).thenReturn(VirtualMachine.Type.User);
|
||||
Mockito.when(vm1.getState()).thenReturn(VirtualMachine.State.Running);
|
||||
Mockito.when(vm1.getDetails()).thenReturn(Collections.emptyMap());
|
||||
|
||||
List<HostVO> hostList = new ArrayList<>();
|
||||
hostList.add(host1);
|
||||
|
||||
List<VMInstanceVO> vmList = new ArrayList<>();
|
||||
vmList.add(vm1);
|
||||
|
||||
HostJoinVO hostJoin1 = Mockito.mock(HostJoinVO.class);
|
||||
Mockito.when(hostJoin1.getId()).thenReturn(1L);
|
||||
Mockito.when(hostJoin1.getCpuUsedCapacity()).thenReturn(1000L);
|
||||
Mockito.when(hostJoin1.getCpuReservedCapacity()).thenReturn(0L);
|
||||
Mockito.when(hostJoin1.getCpus()).thenReturn(4);
|
||||
Mockito.when(hostJoin1.getSpeed()).thenReturn(1000L);
|
||||
Mockito.when(hostJoin1.getMemUsedCapacity()).thenReturn(1024L);
|
||||
Mockito.when(hostJoin1.getMemReservedCapacity()).thenReturn(0L);
|
||||
Mockito.when(hostJoin1.getTotalMemory()).thenReturn(8192L);
|
||||
|
||||
ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class);
|
||||
|
||||
Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList);
|
||||
Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList);
|
||||
Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn(true);
|
||||
Mockito.when(serviceOfferingDao.findByIdIncludingRemoved(Mockito.anyLong(), Mockito.anyLong())).thenReturn(serviceOffering);
|
||||
Mockito.when(hostJoinDao.searchByIds(Mockito.any())).thenReturn(List.of(hostJoin1));
|
||||
|
||||
HostVO compatibleHost = Mockito.mock(HostVO.class);
|
||||
|
||||
// Return null migration (no best migration found)
|
||||
Mockito.doReturn(new Pair<>(null, null)).when(clusterDrsService).getBestMigration(
|
||||
Mockito.any(Cluster.class), Mockito.any(ClusterDrsAlgorithm.class),
|
||||
Mockito.anyList(), Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap(),
|
||||
Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap());
|
||||
|
||||
List<Ternary<VirtualMachine, Host, Host>> result = clusterDrsService.getDrsPlan(cluster, 5);
|
||||
assertEquals(0, result.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDrsPlanWithMultipleIterations() throws ConfigurationException {
|
||||
ClusterVO cluster = Mockito.mock(ClusterVO.class);
|
||||
Mockito.when(cluster.getId()).thenReturn(1L);
|
||||
Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled);
|
||||
|
||||
HostVO host1 = Mockito.mock(HostVO.class);
|
||||
Mockito.when(host1.getId()).thenReturn(1L);
|
||||
|
||||
HostVO host2 = Mockito.mock(HostVO.class);
|
||||
Mockito.when(host2.getId()).thenReturn(2L);
|
||||
|
||||
VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class);
|
||||
Mockito.when(vm1.getId()).thenReturn(1L);
|
||||
Mockito.when(vm1.getHostId()).thenReturn(1L);
|
||||
Mockito.when(vm1.getType()).thenReturn(VirtualMachine.Type.User);
|
||||
Mockito.when(vm1.getState()).thenReturn(VirtualMachine.State.Running);
|
||||
Mockito.when(vm1.getDetails()).thenReturn(Collections.emptyMap());
|
||||
|
||||
VMInstanceVO vm2 = Mockito.mock(VMInstanceVO.class);
|
||||
Mockito.when(vm2.getId()).thenReturn(2L);
|
||||
Mockito.when(vm2.getHostId()).thenReturn(1L);
|
||||
Mockito.when(vm2.getType()).thenReturn(VirtualMachine.Type.User);
|
||||
Mockito.when(vm2.getState()).thenReturn(VirtualMachine.State.Running);
|
||||
Mockito.when(vm2.getDetails()).thenReturn(Collections.emptyMap());
|
||||
|
||||
List<HostVO> hostList = new ArrayList<>();
|
||||
hostList.add(host1);
|
||||
hostList.add(host2);
|
||||
|
||||
List<VMInstanceVO> vmList = new ArrayList<>();
|
||||
vmList.add(vm1);
|
||||
vmList.add(vm2);
|
||||
|
||||
HostJoinVO hostJoin1 = Mockito.mock(HostJoinVO.class);
|
||||
Mockito.when(hostJoin1.getId()).thenReturn(1L);
|
||||
Mockito.when(hostJoin1.getCpuUsedCapacity()).thenReturn(1000L);
|
||||
Mockito.when(hostJoin1.getCpuReservedCapacity()).thenReturn(0L);
|
||||
Mockito.when(hostJoin1.getCpus()).thenReturn(4);
|
||||
Mockito.when(hostJoin1.getSpeed()).thenReturn(1000L);
|
||||
Mockito.when(hostJoin1.getMemUsedCapacity()).thenReturn(1024L);
|
||||
Mockito.when(hostJoin1.getMemReservedCapacity()).thenReturn(0L);
|
||||
Mockito.when(hostJoin1.getTotalMemory()).thenReturn(8192L);
|
||||
|
||||
HostJoinVO hostJoin2 = Mockito.mock(HostJoinVO.class);
|
||||
Mockito.when(hostJoin2.getId()).thenReturn(2L);
|
||||
Mockito.when(hostJoin2.getCpuUsedCapacity()).thenReturn(1000L);
|
||||
Mockito.when(hostJoin2.getCpuReservedCapacity()).thenReturn(0L);
|
||||
Mockito.when(hostJoin2.getCpus()).thenReturn(4);
|
||||
Mockito.when(hostJoin2.getSpeed()).thenReturn(1000L);
|
||||
Mockito.when(hostJoin2.getMemUsedCapacity()).thenReturn(1024L);
|
||||
Mockito.when(hostJoin2.getMemReservedCapacity()).thenReturn(0L);
|
||||
Mockito.when(hostJoin2.getTotalMemory()).thenReturn(8192L);
|
||||
|
||||
ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class);
|
||||
Mockito.when(serviceOffering.getCpu()).thenReturn(1);
|
||||
Mockito.when(serviceOffering.getRamSize()).thenReturn(1024);
|
||||
Mockito.when(serviceOffering.getSpeed()).thenReturn(1000);
|
||||
|
||||
Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList);
|
||||
Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList);
|
||||
Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn(
|
||||
true, true, false);
|
||||
Mockito.when(serviceOfferingDao.findByIdIncludingRemoved(Mockito.anyLong(), Mockito.anyLong())).thenReturn(serviceOffering);
|
||||
Mockito.when(hostJoinDao.searchByIds(1L, 2L)).thenReturn(List.of(hostJoin1, hostJoin2));
|
||||
|
||||
// Return migrations for first two iterations, then null
|
||||
Mockito.doReturn(new Pair<>(vm1, host2), new Pair<>(vm2, host2), new Pair<>(null, null))
|
||||
.when(clusterDrsService).getBestMigration(
|
||||
Mockito.any(Cluster.class), Mockito.any(ClusterDrsAlgorithm.class),
|
||||
Mockito.anyList(), Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap(),
|
||||
Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap());
|
||||
|
||||
List<Ternary<VirtualMachine, Host, Host>> result = clusterDrsService.getDrsPlan(cluster, 5);
|
||||
assertEquals(2, result.size());
|
||||
Mockito.verify(balancedAlgorithm, Mockito.times(3)).needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDrsPlanWithMigrationToOriginalHost() throws ConfigurationException {
|
||||
ClusterVO cluster = Mockito.mock(ClusterVO.class);
|
||||
Mockito.when(cluster.getId()).thenReturn(1L);
|
||||
Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled);
|
||||
|
||||
HostVO host1 = Mockito.mock(HostVO.class);
|
||||
Mockito.when(host1.getId()).thenReturn(1L);
|
||||
|
||||
HostVO host2 = Mockito.mock(HostVO.class);
|
||||
Mockito.when(host2.getId()).thenReturn(2L);
|
||||
|
||||
VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class);
|
||||
Mockito.when(vm1.getId()).thenReturn(1L);
|
||||
Mockito.when(vm1.getHostId()).thenReturn(1L);
|
||||
Mockito.when(vm1.getType()).thenReturn(VirtualMachine.Type.User);
|
||||
Mockito.when(vm1.getState()).thenReturn(VirtualMachine.State.Running);
|
||||
Mockito.when(vm1.getDetails()).thenReturn(Collections.emptyMap());
|
||||
|
||||
List<HostVO> hostList = new ArrayList<>();
|
||||
hostList.add(host1);
|
||||
hostList.add(host2);
|
||||
|
||||
List<VMInstanceVO> vmList = new ArrayList<>();
|
||||
vmList.add(vm1);
|
||||
|
||||
ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class);
|
||||
|
||||
Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList);
|
||||
Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList);
|
||||
Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn(true);
|
||||
Mockito.when(serviceOfferingDao.findByIdIncludingRemoved(Mockito.anyLong(), Mockito.anyLong())).thenReturn(serviceOffering);
|
||||
|
||||
// Return migration to original host (host1) - should break the loop
|
||||
Mockito.doReturn(new Pair<>(vm1, host1)).when(clusterDrsService).getBestMigration(
|
||||
Mockito.any(Cluster.class), Mockito.any(ClusterDrsAlgorithm.class),
|
||||
Mockito.anyList(), Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap(),
|
||||
Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap());
|
||||
|
||||
List<Ternary<VirtualMachine, Host, Host>> result = clusterDrsService.getDrsPlan(cluster, 5);
|
||||
assertEquals(0, result.size());
|
||||
// Should break early when VM would migrate to original host
|
||||
}
|
||||
|
||||
@Test(expected = InvalidParameterValueException.class)
|
||||
public void testGenerateDrsPlanClusterNotFound() {
|
||||
Mockito.when(clusterDao.findById(1L)).thenReturn(null);
|
||||
|
|
@ -387,18 +812,41 @@ public class ClusterDrsServiceImplTest {
|
|||
vmIdServiceOfferingMap.put(vm.getId(), serviceOffering);
|
||||
}
|
||||
|
||||
Mockito.when(managementServer.listHostsForMigrationOfVM(vm1, 0L, 500L, null, vmList)).thenReturn(
|
||||
new Ternary<>(new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, false)));
|
||||
Mockito.when(managementServer.listHostsForMigrationOfVM(vm2, 0L, 500L, null, vmList)).thenReturn(
|
||||
new Ternary<>(new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, false)));
|
||||
Mockito.when(balancedAlgorithm.getMetrics(cluster, vm1, serviceOffering, destHost, new HashMap<>(),
|
||||
new HashMap<>(), false)).thenReturn(new Ternary<>(1.0, 0.5, 1.5));
|
||||
// Create caches for the new method signature
|
||||
Map<Long, List<? extends Host>> vmToCompatibleHostsCache = new HashMap<>();
|
||||
vmToCompatibleHostsCache.put(vm1.getId(), List.of(destHost));
|
||||
vmToCompatibleHostsCache.put(vm2.getId(), List.of(destHost));
|
||||
|
||||
Mockito.when(balancedAlgorithm.getMetrics(cluster, vm2, serviceOffering, destHost, new HashMap<>(),
|
||||
new HashMap<>(), false)).thenReturn(new Ternary<>(1.0, 2.5, 1.5));
|
||||
Map<Long, Map<Host, Boolean>> vmToStorageMotionCache = new HashMap<>();
|
||||
vmToStorageMotionCache.put(vm1.getId(), Map.of(destHost, false));
|
||||
vmToStorageMotionCache.put(vm2.getId(), Map.of(destHost, false));
|
||||
|
||||
Map<Long, com.cloud.deploy.DeploymentPlanner.ExcludeList> vmToExcludesMap = new HashMap<>();
|
||||
vmToExcludesMap.put(vm1.getId(), Mockito.mock(com.cloud.deploy.DeploymentPlanner.ExcludeList.class));
|
||||
vmToExcludesMap.put(vm2.getId(), Mockito.mock(com.cloud.deploy.DeploymentPlanner.ExcludeList.class));
|
||||
|
||||
// Create capacity maps with dummy data for getClusterImbalance (include both source and dest hosts)
|
||||
Map<Long, Ternary<Long, Long, Long>> hostCpuCapacityMap = new HashMap<>();
|
||||
hostCpuCapacityMap.put(host.getId(), new Ternary<>(2000L, 0L, 3000L)); // Source host
|
||||
hostCpuCapacityMap.put(destHost.getId(), new Ternary<>(1000L, 0L, 2000L)); // Dest host
|
||||
Map<Long, Ternary<Long, Long, Long>> hostMemoryCapacityMap = new HashMap<>();
|
||||
hostMemoryCapacityMap.put(host.getId(), new Ternary<>(2L * 1024L * 1024L * 1024L, 0L, 3L * 1024L * 1024L * 1024L)); // Source host
|
||||
hostMemoryCapacityMap.put(destHost.getId(), new Ternary<>(1024L * 1024L * 1024L, 0L, 2L * 1024L * 1024L * 1024L)); // Dest host
|
||||
|
||||
// Mock getMetrics for the optimized 10-parameter version used by getBestMigration
|
||||
// Return better improvement for vm1, worse for vm2
|
||||
Mockito.doReturn(new Ternary<>(1.0, 0.5, 1.5)).when(balancedAlgorithm).getMetrics(
|
||||
Mockito.eq(cluster), Mockito.eq(vm1), Mockito.any(ServiceOffering.class),
|
||||
Mockito.eq(destHost), Mockito.eq(hostCpuCapacityMap), Mockito.eq(hostMemoryCapacityMap), Mockito.any(Boolean.class),
|
||||
Mockito.any(Double.class), Mockito.any(double[].class), Mockito.any(Map.class));
|
||||
Mockito.doReturn(new Ternary<>(0.5, 2.5, 1.5)).when(balancedAlgorithm).getMetrics(
|
||||
Mockito.eq(cluster), Mockito.eq(vm2), Mockito.any(ServiceOffering.class),
|
||||
Mockito.eq(destHost), Mockito.eq(hostCpuCapacityMap), Mockito.eq(hostMemoryCapacityMap), Mockito.any(Boolean.class),
|
||||
Mockito.any(Double.class), Mockito.any(double[].class), Mockito.any(Map.class));
|
||||
|
||||
Pair<VirtualMachine, Host> bestMigration = clusterDrsService.getBestMigration(cluster, balancedAlgorithm,
|
||||
vmList, vmIdServiceOfferingMap, new HashMap<>(), new HashMap<>());
|
||||
vmList, vmIdServiceOfferingMap, hostCpuCapacityMap, hostMemoryCapacityMap,
|
||||
vmToCompatibleHostsCache, vmToStorageMotionCache, vmToExcludesMap);
|
||||
|
||||
assertEquals(destHost, bestMigration.second());
|
||||
assertEquals(vm1, bestMigration.first());
|
||||
|
|
@ -443,12 +891,28 @@ public class ClusterDrsServiceImplTest {
|
|||
vmIdServiceOfferingMap.put(vm.getId(), serviceOffering);
|
||||
}
|
||||
|
||||
Mockito.when(managementServer.listHostsForMigrationOfVM(vm1, 0L, 500L, null, vmList)).thenReturn(
|
||||
new Ternary<>(new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, false)));
|
||||
Mockito.when(managementServer.listHostsForMigrationOfVM(vm2, 0L, 500L, null, vmList)).thenReturn(
|
||||
new Ternary<>(new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, false)));
|
||||
// Create caches for the new method signature
|
||||
Map<Long, List<? extends Host>> vmToCompatibleHostsCache = new HashMap<>();
|
||||
vmToCompatibleHostsCache.put(vm1.getId(), List.of(destHost));
|
||||
vmToCompatibleHostsCache.put(vm2.getId(), List.of(destHost));
|
||||
|
||||
Map<Long, Map<Host, Boolean>> vmToStorageMotionCache = new HashMap<>();
|
||||
vmToStorageMotionCache.put(vm1.getId(), Map.of(destHost, false));
|
||||
vmToStorageMotionCache.put(vm2.getId(), Map.of(destHost, false));
|
||||
|
||||
Map<Long, com.cloud.deploy.DeploymentPlanner.ExcludeList> vmToExcludesMap = new HashMap<>();
|
||||
vmToExcludesMap.put(vm1.getId(), Mockito.mock(com.cloud.deploy.DeploymentPlanner.ExcludeList.class));
|
||||
vmToExcludesMap.put(vm2.getId(), Mockito.mock(com.cloud.deploy.DeploymentPlanner.ExcludeList.class));
|
||||
|
||||
// Create capacity maps with dummy data for getClusterImbalance
|
||||
Map<Long, Ternary<Long, Long, Long>> hostCpuCapacityMap = new HashMap<>();
|
||||
hostCpuCapacityMap.put(destHost.getId(), new Ternary<>(1000L, 0L, 2000L));
|
||||
Map<Long, Ternary<Long, Long, Long>> hostMemoryCapacityMap = new HashMap<>();
|
||||
hostMemoryCapacityMap.put(destHost.getId(), new Ternary<>(1024L * 1024L * 1024L, 0L, 2L * 1024L * 1024L * 1024L));
|
||||
|
||||
Pair<VirtualMachine, Host> bestMigration = clusterDrsService.getBestMigration(cluster, balancedAlgorithm,
|
||||
vmList, vmIdServiceOfferingMap, new HashMap<>(), new HashMap<>());
|
||||
vmList, vmIdServiceOfferingMap, hostCpuCapacityMap, hostMemoryCapacityMap,
|
||||
vmToCompatibleHostsCache, vmToStorageMotionCache, vmToExcludesMap);
|
||||
|
||||
assertNull(bestMigration.second());
|
||||
assertNull(bestMigration.first());
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ class TestNFSMountOptsKVM(cloudstackTestCase):
|
|||
def getNFSMountOptionForPool(self, option, poolId):
|
||||
nfsstat_cmd = "nfsstat -m | sed -n '/%s/{ n; p }'" % poolId
|
||||
nfsstat = self.sshClient.execute(nfsstat_cmd)
|
||||
if (nfsstat == None):
|
||||
if nfsstat == None or len(nfsstat) == 0:
|
||||
return None
|
||||
stat = nfsstat[0]
|
||||
vers = stat[stat.find(option):].split("=")[1].split(",")[0]
|
||||
|
|
|
|||
|
|
@ -532,6 +532,7 @@
|
|||
"label.checksum": "Checksum",
|
||||
"label.choose.resource.icon": "Choose icon",
|
||||
"label.choose.saml.identity": "Choose SAML identity provider",
|
||||
"label.choose.isolation.method.public.ip.range": "Choose the proper isolation method for the public IP range in accordance with the zone. Valid options currently 'vlan' or 'vxlan', defaults to 'vlan'.",
|
||||
"label.cidr": "CIDR",
|
||||
"label.cidrsize": "CIDR size",
|
||||
"label.cidr.destination.network": "Destination Network CIDR",
|
||||
|
|
@ -2048,6 +2049,7 @@
|
|||
"label.release.dedicated.pod": "Release dedicated Pod",
|
||||
"label.release.dedicated.zone": "Release dedicated Zone",
|
||||
"label.releasing.ip": "Releasing IP",
|
||||
"label.remote.access.vpn.specify.iprange": "Specify IP Range of remote VPN",
|
||||
"label.remote.instances": "Remote Instances",
|
||||
"label.remove": "Remove",
|
||||
"label.remove.annotation": "Remove comment",
|
||||
|
|
@ -3349,6 +3351,7 @@
|
|||
"message.enable.vpn.processing": "Enabling VPN...",
|
||||
"message.enabled.vpn": "Your remote access VPN is currently enabled and can be accessed via the IP",
|
||||
"message.enabled.vpn.ip.sec": "Your IPSec pre-shared key is",
|
||||
"message.enabled.vpn.ip.range": "Your VPN IP Range is",
|
||||
"message.enabling.security.group.provider": "Enabling security group provider",
|
||||
"message.enter.valid.nic.ip": "Please enter a valid IP address for NIC",
|
||||
"message.error.access.key": "Please enter access key.",
|
||||
|
|
@ -3693,6 +3696,7 @@
|
|||
"message.releasing.dedicated.host": "Releasing dedicated host...",
|
||||
"message.releasing.dedicated.pod": "Releasing dedicated Pod...",
|
||||
"message.releasing.dedicated.zone": "Releasing dedicated Zone...",
|
||||
"message.remote.access.vpn.iprange.description": "The range of IP addresses to allocate to VPN clients. The first IP in the range will be taken by the VPN server. (Optional)",
|
||||
"message.remove.annotation": "Are you sure you want to delete the comment?",
|
||||
"message.remove.egress.rule.failed": "Removing egress rule failed",
|
||||
"message.remove.egress.rule.processing": "Deleting egress rule...",
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@
|
|||
v-model:value="form.networkdomain"
|
||||
:placeholder="apiParams.networkdomain.description" />
|
||||
</a-form-item>
|
||||
<a-form-item ref="roleid" name="roleid">
|
||||
<a-form-item ref="roleid" name="roleid" v-if="!resource.isdefault">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.role')" :tooltip="apiParams.roleid.description"/>
|
||||
</template>
|
||||
|
|
@ -145,11 +145,13 @@ export default {
|
|||
const params = {
|
||||
newname: values.newname,
|
||||
networkdomain: values.networkdomain,
|
||||
roleid: values.roleid,
|
||||
apikeyaccess: values.apikeyaccess,
|
||||
account: this.account,
|
||||
domainid: this.domainId
|
||||
}
|
||||
if (values.roleid) {
|
||||
params.roleid = values.roleid
|
||||
}
|
||||
if (this.isValidValueForKey(values, 'networkdomain') && values.networkdomain.length > 0) {
|
||||
params.networkdomain = values.networkdomain
|
||||
}
|
||||
|
|
|
|||
|
|
@ -220,6 +220,20 @@
|
|||
<a-select-option v-for="pod in pods" :key="pod.id" :value="pod.id" :label="pod.name">{{ pod.name }}</a-select-option>
|
||||
</a-select>
|
||||
</a-form-item>
|
||||
<a-form-item name="isolationmethod" ref="isolationmethod" class="form__item" v-if="!basicGuestNetwork">
|
||||
<tooltip-label :title="$t('label.isolation.method')" :tooltip="$t('label.choose.isolation.method.public.ip.range')" class="tooltip-label-wrapper"/>
|
||||
<a-select
|
||||
v-model:value="form.isolationmethod"
|
||||
showSearch
|
||||
optionFilterProp="label"
|
||||
:filterOption="(input, option) => {
|
||||
return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0
|
||||
}" >
|
||||
<a-select-option value="">{{ }}</a-select-option>
|
||||
<a-select-option value="vlan"> VLAN </a-select-option>
|
||||
<a-select-option value="vxlan"> VXLAN </a-select-option>
|
||||
</a-select>
|
||||
</a-form-item>
|
||||
<a-form-item name="vlan" ref="vlan" :label="$t('label.vlan')" class="form__item" v-if="!basicGuestNetwork">
|
||||
<a-input v-model:value="form.vlan" />
|
||||
</a-form-item>
|
||||
|
|
@ -472,7 +486,8 @@ export default {
|
|||
initAddIpRangeForm () {
|
||||
this.formRef = ref()
|
||||
this.form = reactive({
|
||||
iptype: ''
|
||||
iptype: '',
|
||||
isolationmethod: ''
|
||||
})
|
||||
this.rules = reactive({
|
||||
podid: [{ required: true, message: this.$t('label.required') }],
|
||||
|
|
@ -644,6 +659,15 @@ export default {
|
|||
if (!this.basicGuestNetwork) {
|
||||
params.zoneId = this.resource.zoneid
|
||||
params.vlan = values.vlan
|
||||
const vlanInput = (values.vlan || '').toString().trim()
|
||||
if (vlanInput) {
|
||||
const vlanInputLower = vlanInput.toLowerCase()
|
||||
const startsWithPrefix = vlanInputLower.startsWith('vlan') || vlanInputLower.startsWith('vxlan')
|
||||
const isNumeric = /^[0-9]+$/.test(vlanInput)
|
||||
if (!startsWithPrefix && isNumeric && values.isolationmethod) {
|
||||
params.vlan = `${values.isolationmethod}://${vlanInput}`
|
||||
}
|
||||
}
|
||||
params.forsystemvms = values.forsystemvms
|
||||
params.account = values.forsystemvms ? null : values.account
|
||||
params.domainid = values.forsystemvms ? null : values.domain
|
||||
|
|
|
|||
|
|
@ -80,10 +80,16 @@
|
|||
:footer="null"
|
||||
@cancel="closeModals">
|
||||
{{ $t('message.network.addvm.desc') }}
|
||||
<a-form @finish="submitAddNetwork" v-ctrl-enter="submitAddNetwork">
|
||||
<div class="modal-form">
|
||||
<p class="modal-form__label">{{ $t('label.network') }}:</p>
|
||||
<a-form
|
||||
@finish="submitAddNetwork"
|
||||
v-ctrl-enter="submitAddNetwork"
|
||||
layout="vertical">
|
||||
<a-form-item name="network" ref="network">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.network')" :tooltip="addNetworkData.apiParams.networkid.description"/>
|
||||
</template>
|
||||
<a-select
|
||||
:placeholder="addNetworkData.apiParams.networkid.description"
|
||||
:value="addNetworkData.network"
|
||||
@change="e => addNetworkData.network = e"
|
||||
v-focus="true"
|
||||
|
|
@ -104,14 +110,28 @@
|
|||
</span>
|
||||
</a-select-option>
|
||||
</a-select>
|
||||
<p class="modal-form__label">{{ $t('label.publicip') }}:</p>
|
||||
<a-input v-model:value="addNetworkData.ip"></a-input>
|
||||
<br>
|
||||
</a-form-item>
|
||||
<a-form-item name="ip" ref="ip">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.ipaddress')" :tooltip="addNetworkData.apiParams.ipaddress.description"/>
|
||||
</template>
|
||||
<a-input
|
||||
:placeholder="addNetworkData.apiParams.ipaddress.description"
|
||||
v-model:value="addNetworkData.ipaddress" />
|
||||
</a-form-item>
|
||||
<a-form-item name="macaddress" ref="macaddress">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.macaddress')" :tooltip="addNetworkData.apiParams.macaddress.description"/>
|
||||
</template>
|
||||
<a-input
|
||||
:placeholder="addNetworkData.apiParams.macaddress.description"
|
||||
v-model:value="addNetworkData.macaddress" />
|
||||
</a-form-item>
|
||||
<a-form-item name="makedefault" ref="makedefault">
|
||||
<a-checkbox v-model:checked="addNetworkData.makedefault">
|
||||
{{ $t('label.make.default') }}
|
||||
</a-checkbox>
|
||||
<br>
|
||||
</div>
|
||||
</a-form-item>
|
||||
|
||||
<div :span="24" class="action-button">
|
||||
<a-button @click="closeModals">{{ $t('label.cancel') }}</a-button>
|
||||
|
|
@ -229,6 +249,7 @@
|
|||
<script>
|
||||
import { getAPI, postAPI } from '@/api'
|
||||
import NicsTable from '@/views/network/NicsTable'
|
||||
import TooltipLabel from '@/components/widgets/TooltipLabel'
|
||||
import TooltipButton from '@/components/widgets/TooltipButton'
|
||||
import ResourceIcon from '@/components/view/ResourceIcon'
|
||||
|
||||
|
|
@ -236,6 +257,7 @@ export default {
|
|||
name: 'NicsTab',
|
||||
components: {
|
||||
NicsTable,
|
||||
TooltipLabel,
|
||||
TooltipButton,
|
||||
ResourceIcon
|
||||
},
|
||||
|
|
@ -279,6 +301,7 @@ export default {
|
|||
},
|
||||
created () {
|
||||
this.vm = this.resource
|
||||
this.addNetworkData.apiParams = this.$getApiParams('addNicToVirtualMachine')
|
||||
},
|
||||
methods: {
|
||||
listNetworks () {
|
||||
|
|
@ -338,7 +361,8 @@ export default {
|
|||
this.showUpdateIpModal = false
|
||||
this.showSecondaryIpModal = false
|
||||
this.addNetworkData.network = ''
|
||||
this.addNetworkData.ip = ''
|
||||
this.addNetworkData.ipaddress = ''
|
||||
this.addNetworkData.macaddress = ''
|
||||
this.addNetworkData.makedefault = false
|
||||
this.editIpAddressValue = ''
|
||||
this.newSecondaryIp = ''
|
||||
|
|
@ -367,8 +391,11 @@ export default {
|
|||
const params = {}
|
||||
params.virtualmachineid = this.vm.id
|
||||
params.networkid = this.addNetworkData.network
|
||||
if (this.addNetworkData.ip) {
|
||||
params.ipaddress = this.addNetworkData.ip
|
||||
if (this.addNetworkData.ipaddress) {
|
||||
params.ipaddress = this.addNetworkData.ipaddress
|
||||
}
|
||||
if (this.addNetworkData.macaddress) {
|
||||
params.macaddress = this.addNetworkData.macaddress
|
||||
}
|
||||
this.showAddNetworkModal = false
|
||||
this.loadingNic = true
|
||||
|
|
@ -603,22 +630,6 @@ export default {
|
|||
}
|
||||
}
|
||||
|
||||
.action-button {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
|
||||
button {
|
||||
padding: 5px;
|
||||
height: auto;
|
||||
margin-bottom: 10px;
|
||||
align-self: flex-start;
|
||||
|
||||
&:not(:last-child) {
|
||||
margin-right: 10px;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.wide-modal {
|
||||
min-width: 50vw;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,11 +16,14 @@
|
|||
// under the License.
|
||||
|
||||
<template>
|
||||
<div class="vpn-details">
|
||||
<div v-if="remoteAccessVpn">
|
||||
<div>
|
||||
<p>{{ $t('message.enabled.vpn') }} <strong>{{ remoteAccessVpn.publicip }}</strong></p>
|
||||
<p>{{ $t('message.enabled.vpn.ip.sec') }} <strong>{{ remoteAccessVpn.presharedkey }}</strong></p>
|
||||
<p>{{ $t('message.enabled.vpn.ip.range') }} <strong>{{ remoteAccessVpn.iprange }}</strong></p>
|
||||
<a-divider/>
|
||||
<a-button><router-link :to="{ path: '/vpnuser'}">{{ $t('label.manage.vpn.user') }}</router-link></a-button>
|
||||
<a-button
|
||||
style="margin-left: 10px"
|
||||
type="primary"
|
||||
|
|
@ -29,7 +32,6 @@
|
|||
:disabled="!('deleteRemoteAccessVpn' in $store.getters.apis)">
|
||||
{{ $t('label.disable.vpn') }}
|
||||
</a-button>
|
||||
<a-button><router-link :to="{ path: '/vpnuser'}">{{ $t('label.manage.vpn.user') }}</router-link></a-button>
|
||||
</div>
|
||||
|
||||
<a-modal
|
||||
|
|
@ -70,6 +72,24 @@
|
|||
@cancel="enableVpn = false">
|
||||
<div v-ctrl-enter="handleCreateVpn">
|
||||
<p>{{ $t('message.enable.vpn') }}</p>
|
||||
<a-form-item>
|
||||
<a-checkbox v-model:checked="specifyIpRange">
|
||||
{{ $t('label.remote.access.vpn.specify.iprange') }}
|
||||
</a-checkbox>
|
||||
</a-form-item>
|
||||
<a-form-item
|
||||
v-if="specifyIpRange"
|
||||
name="iprange"
|
||||
:colon="false"
|
||||
ref="iprange">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.ip.range')" :tooltip="$t('message.remote.access.vpn.iprange.description')"/>
|
||||
</template>
|
||||
<a-input
|
||||
v-model:value="vpnIpRange"
|
||||
:placeholder="'10.1.2.1-10.1.2.8'"
|
||||
/>
|
||||
</a-form-item>
|
||||
|
||||
<a-divider />
|
||||
|
||||
|
|
@ -136,11 +156,13 @@
|
|||
</div>
|
||||
</a-modal>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</template>
|
||||
|
||||
<script>
|
||||
import { getAPI, postAPI } from '@/api'
|
||||
import TooltipLabel from '@/components/widgets/TooltipLabel'
|
||||
|
||||
export default {
|
||||
props: {
|
||||
|
|
@ -149,6 +171,9 @@ export default {
|
|||
required: true
|
||||
}
|
||||
},
|
||||
components: {
|
||||
TooltipLabel
|
||||
},
|
||||
data () {
|
||||
return {
|
||||
remoteAccessVpn: null,
|
||||
|
|
@ -158,7 +183,9 @@ export default {
|
|||
vpnGatewayEnabled: false,
|
||||
createVpnGateway: false,
|
||||
deleteVpnGateway: false,
|
||||
isSubmitted: false
|
||||
isSubmitted: false,
|
||||
specifyIpRange: false,
|
||||
vpnIpRange: ''
|
||||
}
|
||||
},
|
||||
inject: ['parentFetchData', 'parentToggleLoading'],
|
||||
|
|
@ -211,11 +238,15 @@ export default {
|
|||
this.isSubmitted = true
|
||||
this.parentToggleLoading()
|
||||
this.enableVpn = false
|
||||
postAPI('createRemoteAccessVpn', {
|
||||
const params = {
|
||||
publicipid: this.resource.id,
|
||||
domainid: this.resource.domainid,
|
||||
account: this.resource.account
|
||||
}).then(response => {
|
||||
}
|
||||
if (this.specifyIpRange && this.vpnIpRange?.trim()) {
|
||||
params.iprange = this.vpnIpRange.trim()
|
||||
}
|
||||
postAPI('createRemoteAccessVpn', params).then(response => {
|
||||
this.$pollJob({
|
||||
jobId: response.createremoteaccessvpnresponse.jobid,
|
||||
successMethod: result => {
|
||||
|
|
@ -387,4 +418,7 @@ export default {
|
|||
}
|
||||
}
|
||||
}
|
||||
.vpn-details {
|
||||
padding: 8px 0;
|
||||
}
|
||||
</style>
|
||||
|
|
|
|||
Loading…
Reference in New Issue