diff --git a/api/src/main/java/com/cloud/server/ManagementService.java b/api/src/main/java/com/cloud/server/ManagementService.java index d2a987bb078..51737546ffa 100644 --- a/api/src/main/java/com/cloud/server/ManagementService.java +++ b/api/src/main/java/com/cloud/server/ManagementService.java @@ -77,6 +77,8 @@ import com.cloud.alert.Alert; import com.cloud.capacity.Capacity; import com.cloud.dc.Pod; import com.cloud.dc.Vlan; +import com.cloud.deploy.DeploymentPlan; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.ManagementServerException; import com.cloud.exception.ResourceUnavailableException; @@ -97,6 +99,7 @@ import com.cloud.utils.Ternary; import com.cloud.vm.InstanceGroup; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Type; +import com.cloud.vm.VirtualMachineProfile; /** * Hopefully this is temporary. @@ -478,6 +481,19 @@ public interface ManagementService { Ternary, Integer>, List, Map> listHostsForMigrationOfVM(VirtualMachine vm, Long startIndex, Long pageSize, String keyword, List vmList); + /** + * Apply affinity group constraints and other exclusion rules for VM migration. + * This is a helper method that can be used independently for per-iteration affinity checks in DRS. + * + * @param vm The virtual machine to migrate + * @param vmProfile The VM profile + * @param plan The deployment plan + * @param vmList List of VMs with current/simulated placements for affinity processing + * @return ExcludeList containing hosts to avoid + */ + ExcludeList applyAffinityConstraints(VirtualMachine vm, VirtualMachineProfile vmProfile, + DeploymentPlan plan, List vmList); + /** * List storage pools for live migrating of a volume. The API returns list of all pools in the cluster to which the * volume can be migrated. Current pool is not included in the list. In case of vSphere datastore cluster storage pools, diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateManagementNetworkIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateManagementNetworkIpRangeCmd.java index 85cfddfb714..a7826e022a6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateManagementNetworkIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/CreateManagementNetworkIpRangeCmd.java @@ -34,6 +34,7 @@ import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.user.Account; +import com.cloud.utils.StringUtils; @APICommand(name = "createManagementNetworkIpRange", description = "Creates a Management network IP range.", @@ -118,7 +119,7 @@ public class CreateManagementNetworkIpRangeCmd extends BaseAsyncCmd { } public String getVlan() { - if (vlan == null || vlan.isEmpty()) { + if (StringUtils.isBlank(vlan)) { vlan = "untagged"; } return vlan; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/CreateVlanIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/CreateVlanIpRangeCmd.java index 5cb384925e8..48a6ca620e0 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/CreateVlanIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/CreateVlanIpRangeCmd.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.api.command.admin.vlan; import com.cloud.configuration.ConfigurationService; import com.cloud.network.Network; import com.cloud.utils.net.NetUtils; +import com.cloud.utils.StringUtils; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -163,7 +164,7 @@ public class CreateVlanIpRangeCmd extends BaseCmd { } public String getVlan() { - if ((vlan == null || vlan.isEmpty()) && !ConfigurationService.IsIpRangeForProvider(getProvider())) { + if (StringUtils.isBlank(vlan) && !ConfigurationService.IsIpRangeForProvider(getProvider())) { vlan = "untagged"; } return vlan; diff --git a/api/src/main/java/org/apache/cloudstack/cluster/ClusterDrsAlgorithm.java b/api/src/main/java/org/apache/cloudstack/cluster/ClusterDrsAlgorithm.java index 665f95842b0..368487c2b9b 100644 --- a/api/src/main/java/org/apache/cloudstack/cluster/ClusterDrsAlgorithm.java +++ b/api/src/main/java/org/apache/cloudstack/cluster/ClusterDrsAlgorithm.java @@ -22,10 +22,10 @@ package org.apache.cloudstack.cluster; import com.cloud.host.Host; import com.cloud.offering.ServiceOffering; import com.cloud.org.Cluster; -import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.component.Adapter; import com.cloud.vm.VirtualMachine; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.math3.stat.descriptive.moment.Mean; import org.apache.commons.math3.stat.descriptive.moment.StandardDeviation; @@ -40,6 +40,9 @@ import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsMetricUs public interface ClusterDrsAlgorithm extends Adapter { + Mean MEAN_CALCULATOR = new Mean(); + StandardDeviation STDDEV_CALCULATOR = new StandardDeviation(false); + /** * Determines whether a DRS operation is needed for a given cluster and host-VM * mapping. @@ -59,79 +62,121 @@ public interface ClusterDrsAlgorithm extends Adapter { boolean needsDrs(Cluster cluster, List> cpuList, List> memoryList) throws ConfigurationException; - /** - * Determines the metrics for a given virtual machine and destination host in a DRS cluster. - * - * @param clusterId - * the ID of the cluster to check - * @param vm - * the virtual machine to check - * @param serviceOffering - * the service offering for the virtual machine - * @param destHost - * the destination host for the virtual machine - * @param hostCpuMap - * a map of host IDs to the Ternary of used, reserved and total CPU on each host - * @param hostMemoryMap - * a map of host IDs to the Ternary of used, reserved and total memory on each host - * @param requiresStorageMotion - * whether storage motion is required for the virtual machine + * Calculates the metrics (improvement, cost, benefit) for migrating a VM to a destination host. Improvement is + * calculated based on the change in cluster imbalance before and after the migration. * + * @param cluster the cluster to check + * @param vm the virtual machine to check + * @param serviceOffering the service offering for the virtual machine + * @param destHost the destination host for the virtual machine + * @param hostCpuMap a map of host IDs to the Ternary of used, reserved and total CPU on each host + * @param hostMemoryMap a map of host IDs to the Ternary of used, reserved and total memory on each host + * @param requiresStorageMotion whether storage motion is required for the virtual machine + * @param preImbalance the pre-calculated cluster imbalance before migration (null to calculate it) + * @param baseMetricsArray pre-calculated array of all host metrics before migration + * @param hostIdToIndexMap mapping from host ID to index in the metrics array * @return a ternary containing improvement, cost, benefit */ Ternary getMetrics(Cluster cluster, VirtualMachine vm, ServiceOffering serviceOffering, Host destHost, Map> hostCpuMap, Map> hostMemoryMap, - Boolean requiresStorageMotion) throws ConfigurationException; + Boolean requiresStorageMotion, Double preImbalance, + double[] baseMetricsArray, Map hostIdToIndexMap) throws ConfigurationException; /** - * Calculates the imbalance of the cluster after a virtual machine migration. + * Calculates the cluster imbalance after migrating a VM to a destination host. * - * @param serviceOffering - * the service offering for the virtual machine - * @param vm - * the virtual machine being migrated - * @param destHost - * the destination host for the virtual machine - * @param hostCpuMap - * a map of host IDs to the Ternary of used, reserved and total CPU on each host - * @param hostMemoryMap - * a map of host IDs to the Ternary of used, reserved and total memory on each host - * - * @return a pair containing the CPU and memory imbalance of the cluster after the migration + * @param vm the virtual machine being migrated + * @param destHost the destination host for the virtual machine + * @param clusterId the cluster ID + * @param vmMetric the VM's resource consumption metric + * @param baseMetricsArray pre-calculated array of all host metrics before migration + * @param hostIdToIndexMap mapping from host ID to index in the metrics array + * @return the cluster imbalance after migration */ - default Double getImbalancePostMigration(ServiceOffering serviceOffering, VirtualMachine vm, - Host destHost, Map> hostCpuMap, - Map> hostMemoryMap) throws ConfigurationException { - Pair>> pair = getHostMetricsMapAndType(destHost.getClusterId(), serviceOffering, hostCpuMap, hostMemoryMap); - long vmMetric = pair.first(); - Map> hostMetricsMap = pair.second(); + default Double getImbalancePostMigration(VirtualMachine vm, + Host destHost, Long clusterId, long vmMetric, double[] baseMetricsArray, + Map hostIdToIndexMap, Map> hostCpuMap, + Map> hostMemoryMap) { + // Create a copy of the base array and adjust only the two affected hosts + double[] adjustedMetrics = new double[baseMetricsArray.length]; + System.arraycopy(baseMetricsArray, 0, adjustedMetrics, 0, baseMetricsArray.length); - List list = new ArrayList<>(); - for (Long hostId : hostMetricsMap.keySet()) { - list.add(getMetricValuePostMigration(destHost.getClusterId(), hostMetricsMap.get(hostId), vmMetric, hostId, destHost.getId(), vm.getHostId())); + long destHostId = destHost.getId(); + long vmHostId = vm.getHostId(); + + // Adjust source host (remove VM resources) + Integer sourceIndex = hostIdToIndexMap.get(vmHostId); + if (sourceIndex != null && sourceIndex < adjustedMetrics.length) { + Map> sourceMetricsMap = getClusterDrsMetric(clusterId).equals("cpu") ? hostCpuMap : hostMemoryMap; + Ternary sourceMetrics = sourceMetricsMap.get(vmHostId); + if (sourceMetrics != null) { + adjustedMetrics[sourceIndex] = getMetricValuePostMigration(clusterId, sourceMetrics, vmMetric, vmHostId, destHostId, vmHostId); + } } - return getImbalance(list); + + // Adjust destination host (add VM resources) + Integer destIndex = hostIdToIndexMap.get(destHostId); + if (destIndex != null && destIndex < adjustedMetrics.length) { + Map> destMetricsMap = getClusterDrsMetric(clusterId).equals("cpu") ? hostCpuMap : hostMemoryMap; + Ternary destMetrics = destMetricsMap.get(destHostId); + if (destMetrics != null) { + adjustedMetrics[destIndex] = getMetricValuePostMigration(clusterId, destMetrics, vmMetric, destHostId, destHostId, vmHostId); + } + } + + return calculateImbalance(adjustedMetrics); } - private Pair>> getHostMetricsMapAndType(Long clusterId, - ServiceOffering serviceOffering, Map> hostCpuMap, - Map> hostMemoryMap) throws ConfigurationException { + /** + * Calculate imbalance from an array of metric values. + * Imbalance is defined as standard deviation divided by mean. + * + * Uses reusable stateless calculator objects to avoid object creation overhead. + * @param values array of metric values + * @return calculated imbalance + */ + private static double calculateImbalance(double[] values) { + if (values == null || values.length == 0) { + return 0.0; + } + + double mean = MEAN_CALCULATOR.evaluate(values); + if (mean == 0.0) { + return 0.0; // Avoid division by zero + } + double stdDev = STDDEV_CALCULATOR.evaluate(values, mean); + return stdDev / mean; + } + + /** + * Helper method to get VM metric based on cluster configuration. + */ + static long getVmMetric(ServiceOffering serviceOffering, Long clusterId) throws ConfigurationException { String metric = getClusterDrsMetric(clusterId); - Pair>> pair; switch (metric) { case "cpu": - pair = new Pair<>((long) serviceOffering.getCpu() * serviceOffering.getSpeed(), hostCpuMap); - break; + return (long) serviceOffering.getCpu() * serviceOffering.getSpeed(); case "memory": - pair = new Pair<>(serviceOffering.getRamSize() * 1024L * 1024L, hostMemoryMap); - break; + return serviceOffering.getRamSize() * 1024L * 1024L; default: throw new ConfigurationException( String.format("Invalid metric: %s for cluster: %d", metric, clusterId)); } - return pair; + } + + /** + * Helper method to calculate metrics from pre and post imbalance values. + */ + default Ternary calculateMetricsFromImbalances(Double preImbalance, Double postImbalance) { + // This needs more research to determine the cost and benefit of a migration + // TODO: Cost should be a factor of the VM size and the host capacity + // TODO: Benefit should be a factor of the VM size and the host capacity and the number of VMs on the host + final double improvement = preImbalance - postImbalance; + final double cost = 0.0; + final double benefit = 1.0; + return new Ternary<>(improvement, cost, benefit); } private Double getMetricValuePostMigration(Long clusterId, Ternary metrics, long vmMetric, @@ -151,9 +196,26 @@ public interface ClusterDrsAlgorithm extends Adapter { } private static Double getImbalance(List metricList) { - Double clusterMeanMetric = getClusterMeanMetric(metricList); - Double clusterStandardDeviation = getClusterStandardDeviation(metricList, clusterMeanMetric); - return clusterStandardDeviation / clusterMeanMetric; + if (CollectionUtils.isEmpty(metricList)) { + return 0.0; + } + // Convert List to double[] once, avoiding repeated conversions + double[] values = new double[metricList.size()]; + int index = 0; + for (Double value : metricList) { + if (value != null) { + values[index++] = value; + } + } + + // Trim array if some values were null + if (index < values.length) { + double[] trimmed = new double[index]; + System.arraycopy(values, 0, trimmed, 0, index); + values = trimmed; + } + + return calculateImbalance(values); } static String getClusterDrsMetric(long clusterId) { @@ -181,36 +243,6 @@ public interface ClusterDrsAlgorithm extends Adapter { return null; } - /** - * Mean is the average of a collection or set of metrics. In context of a DRS - * cluster, the cluster metrics defined as the average metrics value for some - * metric (such as CPU, memory etc.) for every resource such as host. - * Cluster Mean Metric, mavg = (∑mi) / N, where mi is a measurable metric for a - * resource ‘i’ in a cluster with total N number of resources. - */ - static Double getClusterMeanMetric(List metricList) { - return new Mean().evaluate(metricList.stream().mapToDouble(i -> i).toArray()); - } - - /** - * Standard deviation is defined as the square root of the absolute squared sum - * of difference of a metric from its mean for every resource divided by the - * total number of resources. In context of the DRS, the cluster standard - * deviation is the standard deviation based on a metric of resources in a - * cluster such as for the allocation or utilisation CPU/memory metric of hosts - * in a cluster. - * Cluster Standard Deviation, σc = sqrt((∑∣mi−mavg∣^2) / N), where mavg is the - * mean metric value and mi is a measurable metric for some resource ‘i’ in the - * cluster with total N number of resources. - */ - static Double getClusterStandardDeviation(List metricList, Double mean) { - if (mean != null) { - return new StandardDeviation(false).evaluate(metricList.stream().mapToDouble(i -> i).toArray(), mean); - } else { - return new StandardDeviation(false).evaluate(metricList.stream().mapToDouble(i -> i).toArray()); - } - } - static boolean getDrsMetricUseRatio(long clusterId) { return ClusterDrsMetricUseRatio.valueIn(clusterId); } diff --git a/api/src/main/java/org/apache/cloudstack/config/ApiServiceConfiguration.java b/api/src/main/java/org/apache/cloudstack/config/ApiServiceConfiguration.java index a4aa860487f..113b97f43c8 100644 --- a/api/src/main/java/org/apache/cloudstack/config/ApiServiceConfiguration.java +++ b/api/src/main/java/org/apache/cloudstack/config/ApiServiceConfiguration.java @@ -16,10 +16,15 @@ // under the License. package org.apache.cloudstack.config; +import com.cloud.exception.InvalidParameterValueException; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; public class ApiServiceConfiguration implements Configurable { + protected static Logger LOGGER = LogManager.getLogger(ApiServiceConfiguration.class); public static final ConfigKey ManagementServerAddresses = new ConfigKey<>(String.class, "host", "Advanced", "localhost", "The ip address of management server. This can also accept comma separated addresses.", true, ConfigKey.Scope.Global, null, null, null, null, null, ConfigKey.Kind.CSV, null); public static final ConfigKey ApiServletPath = new ConfigKey("Advanced", String.class, "endpoint.url", "http://localhost:8080/client/api", "API end point. Can be used by CS components/services deployed remotely, for sending CS API requests", true); @@ -29,6 +34,20 @@ public class ApiServiceConfiguration implements Configurable { "true", "Are the source checks on API calls enabled (true) or not (false)? See api.allowed.source.cidr.list", true, ConfigKey.Scope.Global); public static final ConfigKey ApiAllowedSourceCidrList = new ConfigKey<>(String.class, "api.allowed.source.cidr.list", "Advanced", "0.0.0.0/0,::/0", "Comma separated list of IPv4/IPv6 CIDRs from which API calls can be performed. Can be set on Global and Account levels.", true, ConfigKey.Scope.Account, null, null, null, null, null, ConfigKey.Kind.CSV, null); + + + public static void validateEndpointUrl() { + String csUrl = getApiServletPathValue(); + if (StringUtils.isBlank(csUrl) || StringUtils.containsAny(csUrl, "localhost", "127.0.0.1", "[::1]")) { + LOGGER.error("Global setting [{}] cannot contain localhost or be blank. Current value: {}", ApiServletPath.key(), csUrl); + throw new InvalidParameterValueException("Unable to complete this operation. Contact your cloud admin."); + } + } + + public static String getApiServletPathValue() { + return ApiServletPath.value(); + } + @Override public String getConfigComponentName() { return ApiServiceConfiguration.class.getSimpleName(); diff --git a/api/src/test/java/org/apache/cloudstack/config/ApiServiceConfigurationTest.java b/api/src/test/java/org/apache/cloudstack/config/ApiServiceConfigurationTest.java new file mode 100644 index 00000000000..4e96af3ead4 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/config/ApiServiceConfigurationTest.java @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.config; + +import com.cloud.exception.InvalidParameterValueException; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class ApiServiceConfigurationTest { + + private static final String LOCALHOST = "http://localhost"; + + private static final String ENDPOINT_URL = "https://acs.clouds.com/client/api"; + + private static final String WHITE_SPACE = " "; + + private static final String BLANK_STRING = ""; + + private static final String NULL_STRING = null; + + private static final String LOCALHOST_IP = "127.0.0.1"; + + @Test(expected = InvalidParameterValueException.class) + public void validateEndpointUrlTestIfEndpointUrlContainLocalhostShouldThrowInvalidParameterValueException() { + try (MockedStatic apiServiceConfigurationMockedStatic = Mockito.mockStatic(ApiServiceConfiguration.class)) { + apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::getApiServletPathValue).thenReturn(LOCALHOST); + apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::validateEndpointUrl).thenCallRealMethod(); + ApiServiceConfiguration.validateEndpointUrl(); + } + } + + @Test + public void validateEndpointUrlTestIfEndpointUrlContainLocalhostShouldNotThrowInvalidParameterValueException() { + try (MockedStatic apiServiceConfigurationMockedStatic = Mockito.mockStatic(ApiServiceConfiguration.class)) { + apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::getApiServletPathValue).thenReturn(ENDPOINT_URL); + apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::validateEndpointUrl).thenCallRealMethod(); + ApiServiceConfiguration.validateEndpointUrl(); + } + } + + @Test(expected = InvalidParameterValueException.class) + public void validateEndpointUrlTestIfEndpointUrlIsNullShouldThrowInvalidParameterValueException() { + try (MockedStatic apiServiceConfigurationMockedStatic = Mockito.mockStatic(ApiServiceConfiguration.class)) { + apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::getApiServletPathValue).thenReturn(NULL_STRING); + apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::validateEndpointUrl).thenCallRealMethod(); + ApiServiceConfiguration.validateEndpointUrl(); + } + } + + @Test(expected = InvalidParameterValueException.class) + public void validateEndpointUrlTestIfEndpointUrlIsBlankShouldThrowInvalidParameterValueException() { + try (MockedStatic apiServiceConfigurationMockedStatic = Mockito.mockStatic(ApiServiceConfiguration.class)) { + apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::getApiServletPathValue).thenReturn(BLANK_STRING); + apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::validateEndpointUrl).thenCallRealMethod(); + ApiServiceConfiguration.validateEndpointUrl(); + } + } + + @Test(expected = InvalidParameterValueException.class) + public void validateEndpointUrlTestIfEndpointUrlIsWhiteSpaceShouldThrowInvalidParameterValueException() { + try (MockedStatic apiServiceConfigurationMockedStatic = Mockito.mockStatic(ApiServiceConfiguration.class)) { + apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::getApiServletPathValue).thenReturn(WHITE_SPACE); + apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::validateEndpointUrl).thenCallRealMethod(); + ApiServiceConfiguration.validateEndpointUrl(); + } + } + + @Test(expected = InvalidParameterValueException.class) + public void validateEndpointUrlTestIfEndpointUrlContainLocalhostIpShouldThrowInvalidParameterValueException() { + try (MockedStatic apiServiceConfigurationMockedStatic = Mockito.mockStatic(ApiServiceConfiguration.class)) { + apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::getApiServletPathValue).thenReturn(LOCALHOST_IP); + apiServiceConfigurationMockedStatic.when(ApiServiceConfiguration::validateEndpointUrl).thenCallRealMethod(); + ApiServiceConfiguration.validateEndpointUrl(); + } + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java index 004a5f9dd65..0e784d961b3 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -127,8 +127,12 @@ import com.cloud.utils.component.SystemIntegrityChecker; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.ScriptRunner; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; + import com.google.common.annotations.VisibleForTesting; public class DatabaseUpgradeChecker implements SystemIntegrityChecker { @@ -255,7 +259,6 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { LOGGER.error("Unable to execute upgrade script", e); throw new CloudRuntimeException("Unable to execute upgrade script", e); } - } @VisibleForTesting @@ -459,43 +462,101 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { throw new CloudRuntimeException("Unable to acquire lock to check for database integrity."); } - try { - initializeDatabaseEncryptors(); - - final CloudStackVersion dbVersion = CloudStackVersion.parse(_dao.getCurrentVersion()); - final String currentVersionValue = this.getClass().getPackage().getImplementationVersion(); - - if (StringUtils.isBlank(currentVersionValue)) { - return; - } - - String csVersion = SystemVmTemplateRegistration.parseMetadataFile(); - final CloudStackVersion sysVmVersion = CloudStackVersion.parse(csVersion); - final CloudStackVersion currentVersion = CloudStackVersion.parse(currentVersionValue); - SystemVmTemplateRegistration.CS_MAJOR_VERSION = sysVmVersion.getMajorRelease() + "." + sysVmVersion.getMinorRelease(); - SystemVmTemplateRegistration.CS_TINY_VERSION = String.valueOf(sysVmVersion.getPatchRelease()); - - LOGGER.info("DB version = {} Code Version = {}", dbVersion, currentVersion); - - if (dbVersion.compareTo(currentVersion) > 0) { - throw new CloudRuntimeException("Database version " + dbVersion + " is higher than management software version " + currentVersionValue); - } - - if (dbVersion.compareTo(currentVersion) == 0) { - LOGGER.info("DB version and code version matches so no upgrade needed."); - return; - } - - upgrade(dbVersion, currentVersion); - } finally { - lock.unlock(); - } + doUpgrades(lock); } finally { lock.releaseRef(); } } - private void initializeDatabaseEncryptors() { + boolean isStandalone() throws CloudRuntimeException { + return Transaction.execute(new TransactionCallback<>() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + String sql = "SELECT COUNT(*) FROM `cloud`.`mshost` WHERE `state` = 'UP'"; + try (Connection conn = TransactionLegacy.getStandaloneConnection(); + PreparedStatement pstmt = conn.prepareStatement(sql); + ResultSet rs = pstmt.executeQuery()) { + if (rs.next()) { + int count = rs.getInt(1); + return count == 0; + } + } catch (SQLException e) { + String errorMessage = "Unable to check if the management server is running in standalone mode."; + LOGGER.error(errorMessage, e); + return false; + } catch (NullPointerException npe) { + String errorMessage = "Unable to check if the management server is running in standalone mode. Not able to get a Database connection."; + LOGGER.error(errorMessage, npe); + return false; + } + return true; + } + }); + } + + @VisibleForTesting + protected void doUpgrades(GlobalLock lock) { + try { + initializeDatabaseEncryptors(); + + final CloudStackVersion dbVersion = CloudStackVersion.parse(_dao.getCurrentVersion()); + final String currentVersionValue = getImplementationVersion(); + + if (StringUtils.isBlank(currentVersionValue)) { + return; + } + + String csVersion = parseSystemVmMetadata(); + final CloudStackVersion sysVmVersion = CloudStackVersion.parse(csVersion); + final CloudStackVersion currentVersion = CloudStackVersion.parse(currentVersionValue); + SystemVmTemplateRegistration.CS_MAJOR_VERSION = sysVmVersion.getMajorRelease() + "." + sysVmVersion.getMinorRelease(); + SystemVmTemplateRegistration.CS_TINY_VERSION = String.valueOf(sysVmVersion.getPatchRelease()); + + LOGGER.info("DB version = {} Code Version = {}", dbVersion, currentVersion); + + if (dbVersion.compareTo(currentVersion) > 0) { + throw new CloudRuntimeException("Database version " + dbVersion + " is higher than management software version " + currentVersionValue); + } + + if (dbVersion.compareTo(currentVersion) == 0) { + LOGGER.info("DB version and code version matches so no upgrade needed."); + return; + } + + if (isStandalone()) { + upgrade(dbVersion, currentVersion); + } else { + String errorMessage = "Database upgrade is required but the management server is running in a clustered environment. " + + "Please perform the database upgrade when the management server is not running in a clustered environment."; + LOGGER.error(errorMessage); + handleClusteredUpgradeRequired(); // allow tests to override behavior + } + } finally { + lock.unlock(); + } + } + + /** + * Hook that is called when an upgrade is required but the management server is clustered. + * Default behavior is to exit the JVM, tests can override to throw instead. + */ + @VisibleForTesting + protected void handleClusteredUpgradeRequired() { + System.exit(5); // I would prefer ServerDaemon.abort(errorMessage) but that would create a dependency hell + } + + @VisibleForTesting + protected String getImplementationVersion() { + return this.getClass().getPackage().getImplementationVersion(); + } + + @VisibleForTesting + protected String parseSystemVmMetadata() { + return SystemVmTemplateRegistration.parseMetadataFile(); + } + + // Make this protected so tests can noop it out + protected void initializeDatabaseEncryptors() { TransactionLegacy txn = TransactionLegacy.open("initializeDatabaseEncryptors"); txn.start(); String errorMessage = "Unable to get the database connections"; diff --git a/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerDoUpgradesTest.java b/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerDoUpgradesTest.java new file mode 100644 index 00000000000..6241bd7cede --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerDoUpgradesTest.java @@ -0,0 +1,173 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.cloud.upgrade.dao.VersionDao; +import com.cloud.upgrade.dao.VersionDaoImpl; +import com.cloud.upgrade.dao.VersionVO; +import com.cloud.utils.db.GlobalLock; +import org.junit.Test; + +public class DatabaseUpgradeCheckerDoUpgradesTest { + + static class StubVersionDao extends VersionDaoImpl implements VersionDao { + private final String currentVersion; + + StubVersionDao(String currentVersion) { + this.currentVersion = currentVersion; + } + + @Override + public VersionVO findByVersion(String version, VersionVO.Step step) { + return null; + } + + @Override + public String getCurrentVersion() { + return currentVersion; + } + + } + + private static class TestableChecker extends DatabaseUpgradeChecker { + boolean initializeCalled = false; + boolean upgradeCalled = false; + boolean clusterHandlerCalled = false; + String implVersionOverride = null; + String sysVmMetadataOverride = "4.8.0"; + boolean standaloneOverride = true; + + TestableChecker(String daoVersion) { + // set a stub DAO + this._dao = new StubVersionDao(daoVersion); + } + + @Override + protected void initializeDatabaseEncryptors() { + initializeCalled = true; + // noop instead of doing DB work + } + + @Override + protected String getImplementationVersion() { + return implVersionOverride; + } + + @Override + protected String parseSystemVmMetadata() { + return sysVmMetadataOverride; + } + + @Override + boolean isStandalone() { + return standaloneOverride; + } + + @Override + protected void upgrade(org.apache.cloudstack.utils.CloudStackVersion dbVersion, org.apache.cloudstack.utils.CloudStackVersion currentVersion) { + upgradeCalled = true; + } + + @Override + protected void handleClusteredUpgradeRequired() { + clusterHandlerCalled = true; + } + } + + @Test + public void testDoUpgrades_noImplementationVersion_returnsEarly() { + TestableChecker checker = new TestableChecker("4.8.0"); + checker.implVersionOverride = ""; // blank -> should return early + + GlobalLock lock = GlobalLock.getInternLock("test-noimpl"); + try { + // acquire lock so doUpgrades can safely call unlock in finally + lock.lock(1); + checker.doUpgrades(lock); + } finally { + // ensure lock released if still held + lock.releaseRef(); + } + + assertTrue("initializeDatabaseEncryptors should be called before returning", checker.initializeCalled); + assertFalse("upgrade should not be called when implementation version is blank", checker.upgradeCalled); + assertFalse("cluster handler should not be called", checker.clusterHandlerCalled); + } + + @Test + public void testDoUpgrades_dbUpToDate_noUpgrade() { + // DB version = code version -> no upgrade + TestableChecker checker = new TestableChecker("4.8.1"); + checker.implVersionOverride = "4.8.1"; + checker.sysVmMetadataOverride = "4.8.1"; + + GlobalLock lock = GlobalLock.getInternLock("test-uptodate"); + try { + lock.lock(1); + checker.doUpgrades(lock); + } finally { + lock.releaseRef(); + } + + assertTrue(checker.initializeCalled); + assertFalse(checker.upgradeCalled); + assertFalse(checker.clusterHandlerCalled); + } + + @Test + public void testDoUpgrades_requiresUpgrade_standalone_invokesUpgrade() { + TestableChecker checker = new TestableChecker("4.8.0"); + checker.implVersionOverride = "4.8.2"; // code is newer than DB + checker.sysVmMetadataOverride = "4.8.2"; + checker.standaloneOverride = true; + + GlobalLock lock = GlobalLock.getInternLock("test-upgrade-standalone"); + try { + lock.lock(1); + checker.doUpgrades(lock); + } finally { + lock.releaseRef(); + } + + assertTrue(checker.initializeCalled); + assertTrue("upgrade should be invoked in standalone mode", checker.upgradeCalled); + assertFalse(checker.clusterHandlerCalled); + } + + @Test + public void testDoUpgrades_requiresUpgrade_clustered_invokesHandler() { + TestableChecker checker = new TestableChecker("4.8.0"); + checker.implVersionOverride = "4.8.2"; // code is newer than DB + checker.sysVmMetadataOverride = "4.8.2"; + checker.standaloneOverride = false; + + GlobalLock lock = GlobalLock.getInternLock("test-upgrade-clustered"); + try { + lock.lock(1); + checker.doUpgrades(lock); + } finally { + lock.releaseRef(); + } + + assertTrue(checker.initializeCalled); + assertFalse("upgrade should not be invoked in clustered mode", checker.upgradeCalled); + assertTrue("cluster handler should be invoked in clustered mode", checker.clusterHandlerCalled); + } +} diff --git a/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerTest.java b/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerTest.java index 1a9372f73ea..27995eb179a 100644 --- a/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerTest.java +++ b/engine/schema/src/test/java/com/cloud/upgrade/DatabaseUpgradeCheckerTest.java @@ -16,14 +16,24 @@ // under the License. package com.cloud.upgrade; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import java.sql.SQLException; +import java.lang.reflect.Field; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; -import java.util.Arrays; +import javax.sql.DataSource; import org.apache.cloudstack.utils.CloudStackVersion; import org.junit.Test; +import org.junit.Before; +import org.junit.After; +import org.junit.runner.RunWith; + +import org.mockito.ArgumentMatchers; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; import com.cloud.upgrade.DatabaseUpgradeChecker.NoopDbUpgrade; import com.cloud.upgrade.dao.DbUpgrade; @@ -43,8 +53,51 @@ import com.cloud.upgrade.dao.Upgrade471to480; import com.cloud.upgrade.dao.Upgrade480to481; import com.cloud.upgrade.dao.Upgrade490to4910; +import com.cloud.utils.db.TransactionLegacy; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertArrayEquals; + + +@RunWith(MockitoJUnitRunner.class) public class DatabaseUpgradeCheckerTest { + @Mock + DataSource dataSource; + + @Mock + Connection connection; + + @Mock + PreparedStatement preparedStatement; + + @Mock + ResultSet resultSet; + + private DataSource backupDataSource; + + @Before + public void setup() throws Exception { + Field dsField = TransactionLegacy.class.getDeclaredField("s_ds"); + dsField.setAccessible(true); + backupDataSource = (DataSource) dsField.get(null); + dsField.set(null, dataSource); + + Mockito.when(dataSource.getConnection()).thenReturn(connection); + Mockito.when(connection.prepareStatement(ArgumentMatchers.anyString())).thenReturn(preparedStatement); + Mockito.when(preparedStatement.executeQuery()).thenReturn(resultSet); + } + + @After + public void cleanup() throws Exception { + Field dsField = TransactionLegacy.class.getDeclaredField("s_ds"); + dsField.setAccessible(true); + dsField.set(null, backupDataSource); + } + @Test public void testCalculateUpgradePath480to481() { @@ -79,7 +132,7 @@ public class DatabaseUpgradeCheckerTest { assertTrue(upgrades.length >= 1); assertTrue(upgrades[0] instanceof Upgrade490to4910); - assertTrue(Arrays.equals(new String[] {"4.9.0", currentVersion.toString()}, upgrades[0].getUpgradableVersionRange())); + assertArrayEquals(new String[]{"4.9.0", currentVersion.toString()}, upgrades[0].getUpgradableVersionRange()); assertEquals(currentVersion.toString(), upgrades[0].getUpgradedVersion()); } @@ -104,7 +157,7 @@ public class DatabaseUpgradeCheckerTest { assertTrue(upgrades[3] instanceof Upgrade41120to41130); assertTrue(upgrades[4] instanceof Upgrade41120to41200); - assertTrue(Arrays.equals(new String[] {"4.11.0.0", "4.11.1.0"}, upgrades[1].getUpgradableVersionRange())); + assertArrayEquals(new String[]{"4.11.0.0", "4.11.1.0"}, upgrades[1].getUpgradableVersionRange()); assertEquals(currentVersion.toString(), upgrades[4].getUpgradedVersion()); } @@ -151,12 +204,12 @@ public class DatabaseUpgradeCheckerTest { assertTrue(upgrades[5] instanceof Upgrade471to480); assertTrue(upgrades[6] instanceof Upgrade480to481); - assertTrue(Arrays.equals(new String[] {"4.8.1", currentVersion.toString()}, upgrades[upgrades.length - 1].getUpgradableVersionRange())); + assertArrayEquals(new String[]{"4.8.1", currentVersion.toString()}, upgrades[upgrades.length - 1].getUpgradableVersionRange()); assertEquals(currentVersion.toString(), upgrades[upgrades.length - 1].getUpgradedVersion()); } @Test - public void testCalculateUpgradePathUnkownDbVersion() { + public void testCalculateUpgradePathUnknownDbVersion() { final CloudStackVersion dbVersion = CloudStackVersion.parse("4.99.0.0"); assertNotNull(dbVersion); @@ -173,7 +226,7 @@ public class DatabaseUpgradeCheckerTest { } @Test - public void testCalculateUpgradePathFromKownDbVersion() { + public void testCalculateUpgradePathFromKnownDbVersion() { final CloudStackVersion dbVersion = CloudStackVersion.parse("4.17.0.0"); assertNotNull(dbVersion); @@ -306,4 +359,25 @@ public class DatabaseUpgradeCheckerTest { assertEquals(upgrades.length + 1, upgradesFromSecurityReleaseToNext.length); assertTrue(upgradesFromSecurityReleaseToNext[upgradesFromSecurityReleaseToNext.length - 1] instanceof NoopDbUpgrade); } + + @Test + public void isStandalone() throws SQLException { + // simulate zero 'UP' hosts -> standalone + Mockito.when(resultSet.next()).thenReturn(true); + Mockito.when(resultSet.getInt(1)).thenReturn(0); + + final DatabaseUpgradeChecker checker = new DatabaseUpgradeChecker(); + assertTrue("DatabaseUpgradeChecker should be a standalone component", checker.isStandalone()); + } + + @Test + public void isNotStandalone() throws SQLException { + // simulate at least one 'UP' host -> not standalone + Mockito.when(resultSet.next()).thenReturn(true); + Mockito.when(resultSet.getInt(1)).thenReturn(1); + + final DatabaseUpgradeChecker checker = new DatabaseUpgradeChecker(); + assertFalse("DatabaseUpgradeChecker should not be a standalone component", checker.isStandalone()); + } + } diff --git a/plugins/drs/cluster/balanced/src/main/java/org/apache/cloudstack/cluster/Balanced.java b/plugins/drs/cluster/balanced/src/main/java/org/apache/cloudstack/cluster/Balanced.java index b6a5ed1aac1..902ab0900bd 100644 --- a/plugins/drs/cluster/balanced/src/main/java/org/apache/cloudstack/cluster/Balanced.java +++ b/plugins/drs/cluster/balanced/src/main/java/org/apache/cloudstack/cluster/Balanced.java @@ -68,23 +68,26 @@ public class Balanced extends AdapterBase implements ClusterDrsAlgorithm { return "balanced"; } + @Override public Ternary getMetrics(Cluster cluster, VirtualMachine vm, ServiceOffering serviceOffering, Host destHost, Map> hostCpuMap, Map> hostMemoryMap, - Boolean requiresStorageMotion) throws ConfigurationException { - Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(cluster.getId(), new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()), null); - Double postImbalance = getImbalancePostMigration(serviceOffering, vm, destHost, hostCpuMap, hostMemoryMap); + Boolean requiresStorageMotion, Double preImbalance, + double[] baseMetricsArray, Map hostIdToIndexMap) throws ConfigurationException { + // Use provided pre-imbalance if available, otherwise calculate it + if (preImbalance == null) { + preImbalance = ClusterDrsAlgorithm.getClusterImbalance(cluster.getId(), new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()), null); + } - logger.debug("Cluster {} pre-imbalance: {} post-imbalance: {} Algorithm: {} VM: {} srcHost: {} destHost: {}", + // Use optimized post-imbalance calculation that adjusts only affected hosts + Double postImbalance = getImbalancePostMigration(vm, destHost, + cluster.getId(), ClusterDrsAlgorithm.getVmMetric(serviceOffering, cluster.getId()), + baseMetricsArray, hostIdToIndexMap, hostCpuMap, hostMemoryMap); + + logger.trace("Cluster {} pre-imbalance: {} post-imbalance: {} Algorithm: {} VM: {} srcHost ID: {} destHost: {}", cluster, preImbalance, postImbalance, getName(), vm, vm.getHostId(), destHost); - // This needs more research to determine the cost and benefit of a migration - // TODO: Cost should be a factor of the VM size and the host capacity - // TODO: Benefit should be a factor of the VM size and the host capacity and the number of VMs on the host - final double improvement = preImbalance - postImbalance; - final double cost = 0.0; - final double benefit = 1.0; - return new Ternary<>(improvement, cost, benefit); + return calculateMetricsFromImbalances(preImbalance, postImbalance); } } diff --git a/plugins/drs/cluster/balanced/src/test/java/org/apache/cloudstack/cluster/BalancedTest.java b/plugins/drs/cluster/balanced/src/test/java/org/apache/cloudstack/cluster/BalancedTest.java index d5160671958..e955a10f2a5 100644 --- a/plugins/drs/cluster/balanced/src/test/java/org/apache/cloudstack/cluster/BalancedTest.java +++ b/plugins/drs/cluster/balanced/src/test/java/org/apache/cloudstack/cluster/BalancedTest.java @@ -43,6 +43,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getClusterDrsMetric; +import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getClusterImbalance; +import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getMetricValue; import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsImbalanceThreshold; import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsMetric; import static org.junit.Assert.assertEquals; @@ -119,6 +122,48 @@ public class BalancedTest { closeable.close(); } + /** + * Helper method to prepare metrics data for getMetrics calls with optimized signature. + * Calculates pre-imbalance and builds baseMetricsArray and hostIdToIndexMap. + * + * @return a Ternary containing preImbalance, baseMetricsArray, and hostIdToIndexMap + */ + private Ternary> prepareMetricsData() throws ConfigurationException { + // Calculate pre-imbalance + Double preImbalance = getClusterImbalance(clusterId, new ArrayList<>(hostCpuFreeMap.values()), + new ArrayList<>(hostMemoryFreeMap.values()), null); + + // Build baseMetricsArray and hostIdToIndexMap + String metricType = getClusterDrsMetric(clusterId); + Map> baseMetricsMap = "cpu".equals(metricType) ? hostCpuFreeMap : hostMemoryFreeMap; + double[] baseMetricsArray = new double[baseMetricsMap.size()]; + Map hostIdToIndexMap = new HashMap<>(); + + int index = 0; + for (Map.Entry> entry : baseMetricsMap.entrySet()) { + Long hostId = entry.getKey(); + Ternary metrics = entry.getValue(); + long used = metrics.first(); + long actualTotal = metrics.third() - metrics.second(); + long free = actualTotal - metrics.first(); + Double metricValue = getMetricValue(clusterId, used, free, actualTotal, null); + if (metricValue != null) { + baseMetricsArray[index] = metricValue; + hostIdToIndexMap.put(hostId, index); + index++; + } + } + + // Trim array if some values were null + if (index < baseMetricsArray.length) { + double[] trimmed = new double[index]; + System.arraycopy(baseMetricsArray, 0, trimmed, 0, index); + baseMetricsArray = trimmed; + } + + return new Ternary<>(preImbalance, baseMetricsArray, hostIdToIndexMap); + } + /** * needsDrs tests *

Scenarios to test for needsDrs @@ -183,8 +228,14 @@ public class BalancedTest { @Test public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessException, ConfigurationException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu"); + + Ternary> metricsData = prepareMetricsData(); + Double preImbalance = metricsData.first(); + double[] baseMetricsArray = metricsData.second(); + Map hostIdToIndexMap = metricsData.third(); + Ternary result = balanced.getMetrics(cluster, vm3, serviceOffering, destHost, - hostCpuFreeMap, hostMemoryFreeMap, false); + hostCpuFreeMap, hostMemoryFreeMap, false, preImbalance, baseMetricsArray, hostIdToIndexMap); assertEquals(0.0, result.first(), 0.01); assertEquals(0.0, result.second(), 0.0); assertEquals(1.0, result.third(), 0.0); @@ -197,8 +248,14 @@ public class BalancedTest { @Test public void getMetricsWithMemory() throws NoSuchFieldException, IllegalAccessException, ConfigurationException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory"); + + Ternary> metricsData = prepareMetricsData(); + Double preImbalance = metricsData.first(); + double[] baseMetricsArray = metricsData.second(); + Map hostIdToIndexMap = metricsData.third(); + Ternary result = balanced.getMetrics(cluster, vm3, serviceOffering, destHost, - hostCpuFreeMap, hostMemoryFreeMap, false); + hostCpuFreeMap, hostMemoryFreeMap, false, preImbalance, baseMetricsArray, hostIdToIndexMap); assertEquals(0.4, result.first(), 0.01); assertEquals(0, result.second(), 0.0); assertEquals(1, result.third(), 0.0); diff --git a/plugins/drs/cluster/condensed/src/main/java/org/apache/cloudstack/cluster/Condensed.java b/plugins/drs/cluster/condensed/src/main/java/org/apache/cloudstack/cluster/Condensed.java index 70c5acd951f..d672ddfda61 100644 --- a/plugins/drs/cluster/condensed/src/main/java/org/apache/cloudstack/cluster/Condensed.java +++ b/plugins/drs/cluster/condensed/src/main/java/org/apache/cloudstack/cluster/Condensed.java @@ -75,20 +75,22 @@ public class Condensed extends AdapterBase implements ClusterDrsAlgorithm { public Ternary getMetrics(Cluster cluster, VirtualMachine vm, ServiceOffering serviceOffering, Host destHost, Map> hostCpuMap, Map> hostMemoryMap, - Boolean requiresStorageMotion) throws ConfigurationException { - Double preImbalance = ClusterDrsAlgorithm.getClusterImbalance(cluster.getId(), new ArrayList<>(hostCpuMap.values()), - new ArrayList<>(hostMemoryMap.values()), null); - Double postImbalance = getImbalancePostMigration(serviceOffering, vm, destHost, hostCpuMap, hostMemoryMap); + Boolean requiresStorageMotion, Double preImbalance, + double[] baseMetricsArray, Map hostIdToIndexMap) throws ConfigurationException { + // Use provided pre-imbalance if available, otherwise calculate it + if (preImbalance == null) { + preImbalance = ClusterDrsAlgorithm.getClusterImbalance(cluster.getId(), new ArrayList<>(hostCpuMap.values()), + new ArrayList<>(hostMemoryMap.values()), null); + } - logger.debug("Cluster {} pre-imbalance: {} post-imbalance: {} Algorithm: {} VM: {} srcHost: {} destHost: {}", + // Use optimized post-imbalance calculation that adjusts only affected hosts + Double postImbalance = getImbalancePostMigration(vm, destHost, + cluster.getId(), ClusterDrsAlgorithm.getVmMetric(serviceOffering, cluster.getId()), + baseMetricsArray, hostIdToIndexMap, hostCpuMap, hostMemoryMap); + + logger.trace("Cluster {} pre-imbalance: {} post-imbalance: {} Algorithm: {} VM: {} srcHost ID: {} destHost: {}", cluster, preImbalance, postImbalance, getName(), vm, vm.getHostId(), destHost); - // This needs more research to determine the cost and benefit of a migration - // TODO: Cost should be a factor of the VM size and the host capacity - // TODO: Benefit should be a factor of the VM size and the host capacity and the number of VMs on the host - final double improvement = postImbalance - preImbalance; - final double cost = 0; - final double benefit = 1; - return new Ternary<>(improvement, cost, benefit); + return calculateMetricsFromImbalances(postImbalance, preImbalance); } } diff --git a/plugins/drs/cluster/condensed/src/test/java/org/apache/cloudstack/cluster/CondensedTest.java b/plugins/drs/cluster/condensed/src/test/java/org/apache/cloudstack/cluster/CondensedTest.java index 3d3896704da..b2a5e6bf84f 100644 --- a/plugins/drs/cluster/condensed/src/test/java/org/apache/cloudstack/cluster/CondensedTest.java +++ b/plugins/drs/cluster/condensed/src/test/java/org/apache/cloudstack/cluster/CondensedTest.java @@ -43,6 +43,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getClusterDrsMetric; +import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getClusterImbalance; +import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getMetricValue; import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsImbalanceThreshold; import static org.apache.cloudstack.cluster.ClusterDrsService.ClusterDrsMetric; import static org.junit.Assert.assertEquals; @@ -121,6 +124,48 @@ public class CondensedTest { closeable.close(); } + /** + * Helper method to prepare metrics data for getMetrics calls with optimized signature. + * Calculates pre-imbalance and builds baseMetricsArray and hostIdToIndexMap. + * + * @return a Ternary containing preImbalance, baseMetricsArray, and hostIdToIndexMap + */ + private Ternary> prepareMetricsData() throws ConfigurationException { + // Calculate pre-imbalance + Double preImbalance = getClusterImbalance(clusterId, new ArrayList<>(hostCpuFreeMap.values()), + new ArrayList<>(hostMemoryFreeMap.values()), null); + + // Build baseMetricsArray and hostIdToIndexMap + String metricType = getClusterDrsMetric(clusterId); + Map> baseMetricsMap = "cpu".equals(metricType) ? hostCpuFreeMap : hostMemoryFreeMap; + double[] baseMetricsArray = new double[baseMetricsMap.size()]; + Map hostIdToIndexMap = new HashMap<>(); + + int index = 0; + for (Map.Entry> entry : baseMetricsMap.entrySet()) { + Long hostId = entry.getKey(); + Ternary metrics = entry.getValue(); + long used = metrics.first(); + long actualTotal = metrics.third() - metrics.second(); + long free = actualTotal - metrics.first(); + Double metricValue = getMetricValue(clusterId, used, free, actualTotal, null); + if (metricValue != null) { + baseMetricsArray[index] = metricValue; + hostIdToIndexMap.put(hostId, index); + index++; + } + } + + // Trim array if some values were null + if (index < baseMetricsArray.length) { + double[] trimmed = new double[index]; + System.arraycopy(baseMetricsArray, 0, trimmed, 0, index); + baseMetricsArray = trimmed; + } + + return new Ternary<>(preImbalance, baseMetricsArray, hostIdToIndexMap); + } + /** *

needsDrs tests *

Scenarios to test for needsDrs @@ -185,8 +230,14 @@ public class CondensedTest { @Test public void getMetricsWithCpu() throws NoSuchFieldException, IllegalAccessException, ConfigurationException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "cpu"); + + Ternary> metricsData = prepareMetricsData(); + Double preImbalance = metricsData.first(); + double[] baseMetricsArray = metricsData.second(); + Map hostIdToIndexMap = metricsData.third(); + Ternary result = condensed.getMetrics(cluster, vm3, serviceOffering, destHost, - hostCpuFreeMap, hostMemoryFreeMap, false); + hostCpuFreeMap, hostMemoryFreeMap, false, preImbalance, baseMetricsArray, hostIdToIndexMap); assertEquals(0.0, result.first(), 0.0); assertEquals(0, result.second(), 0.0); assertEquals(1, result.third(), 0.0); @@ -199,8 +250,14 @@ public class CondensedTest { @Test public void getMetricsWithMemory() throws NoSuchFieldException, IllegalAccessException, ConfigurationException { overrideDefaultConfigValue(ClusterDrsMetric, "_defaultValue", "memory"); + + Ternary> metricsData = prepareMetricsData(); + Double preImbalance = metricsData.first(); + double[] baseMetricsArray = metricsData.second(); + Map hostIdToIndexMap = metricsData.third(); + Ternary result = condensed.getMetrics(cluster, vm3, serviceOffering, destHost, - hostCpuFreeMap, hostMemoryFreeMap, false); + hostCpuFreeMap, hostMemoryFreeMap, false, preImbalance, baseMetricsArray, hostIdToIndexMap); assertEquals(-0.4, result.first(), 0.01); assertEquals(0, result.second(), 0.0); assertEquals(1, result.third(), 0.0); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 213657db073..504d8c528e6 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -47,6 +47,8 @@ import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.configuration.Resource; +import com.cloud.user.ResourceLimitService; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.Role; import org.apache.cloudstack.acl.RolePermissionEntity; @@ -398,6 +400,8 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne public ProjectManager projectManager; @Inject RoleService roleService; + @Inject + ResourceLimitService resourceLimitService; private void logMessage(final Level logLevel, final String message, final Exception e) { if (logLevel == Level.WARN) { @@ -905,15 +909,6 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne return response; } - private void validateEndpointUrl() { - String csUrl = ApiServiceConfiguration.ApiServletPath.value(); - if (csUrl == null || csUrl.contains("localhost")) { - String error = String.format("Global setting %s has to be set to the Management Server's API end point", - ApiServiceConfiguration.ApiServletPath.key()); - throw new InvalidParameterValueException(error); - } - } - private DataCenter validateAndGetZoneForKubernetesCreateParameters(Long zoneId, Long networkId) { DataCenter zone = dataCenterDao.findById(zoneId); if (zone == null) { @@ -1008,7 +1003,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne } private void validateManagedKubernetesClusterCreateParameters(final CreateKubernetesClusterCmd cmd) throws CloudRuntimeException { - validateEndpointUrl(); + ApiServiceConfiguration.validateEndpointUrl(); final String name = cmd.getName(); final Long zoneId = cmd.getZoneId(); final Long kubernetesVersionId = cmd.getKubernetesVersionId(); @@ -1308,7 +1303,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne KubernetesVersionManagerImpl.MINIMUN_AUTOSCALER_SUPPORTED_VERSION )); } - validateEndpointUrl(); + ApiServiceConfiguration.validateEndpointUrl(); if (minSize == null || maxSize == null) { throw new InvalidParameterValueException("Autoscaling requires minsize and maxsize to be passed"); @@ -1350,8 +1345,58 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne validateServiceOfferingsForNodeTypesScale(serviceOfferingNodeTypeMap, defaultServiceOfferingId, kubernetesCluster, clusterVersion); validateKubernetesClusterScaleSize(kubernetesCluster, clusterSize, maxClusterSize, zone); + + ensureResourceLimitsForScale(kubernetesCluster, serviceOfferingNodeTypeMap, + clusterSize != null ? clusterSize : null, + kubernetesCluster.getAccountId()); } + protected void ensureResourceLimitsForScale(final KubernetesClusterVO cluster, + final Map requestedServiceOfferingIds, + final Long targetNodeCounts, + final Long accountId) { + + long totalAdditionalVms = 0L; + long totalAdditionalCpuUnits = 0L; + long totalAdditionalRamMb = 0L; + + + List clusterVmMapVOS = kubernetesClusterVmMapDao.listByClusterIdAndVmType(cluster.getId(), WORKER); + long currentCount = clusterVmMapVOS != null ? clusterVmMapVOS.size() : 0L; + long desiredCount = targetNodeCounts != null ? targetNodeCounts : currentCount; + long additional = Math.max(0L, desiredCount - currentCount); + if (additional == 0L) { + return; + } + + Long offeringId = (requestedServiceOfferingIds != null && requestedServiceOfferingIds.containsKey(WORKER.name())) ? + requestedServiceOfferingIds.get(WORKER.name()) : + getExistingServiceOfferingIdForNodeType(WORKER.name(), cluster); + + if (offeringId == null) { + offeringId = cluster.getServiceOfferingId(); + } + + ServiceOffering so = serviceOfferingDao.findById(offeringId); + if (so == null) { + throw new InvalidParameterValueException(String.format("Invalid service offering for node type %s", WORKER.name())); + } + + totalAdditionalVms += additional; + long effectiveCpu = (long) so.getCpu() * so.getSpeed(); + totalAdditionalCpuUnits += effectiveCpu * additional; + totalAdditionalRamMb += so.getRamSize() * additional; + + try { + resourceLimitService.checkResourceLimit(accountDao.findById(accountId), Resource.ResourceType.user_vm, totalAdditionalVms); + resourceLimitService.checkResourceLimit(accountDao.findById(accountId), Resource.ResourceType.cpu, totalAdditionalCpuUnits); + resourceLimitService.checkResourceLimit(accountDao.findById(accountId), Resource.ResourceType.memory, totalAdditionalRamMb); + } catch (Exception e) { + throw new CloudRuntimeException("Resource limits prevent scaling the cluster: " + e.getMessage(), e); + } + } + + protected void validateServiceOfferingsForNodeTypesScale(Map map, Long defaultServiceOfferingId, KubernetesClusterVO kubernetesCluster, KubernetesSupportedVersion clusterVersion) { for (String key : CLUSTER_NODES_TYPES_LIST) { Long serviceOfferingId = map.getOrDefault(key, defaultServiceOfferingId); @@ -1413,7 +1458,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne private void validateKubernetesClusterUpgradeParameters(UpgradeKubernetesClusterCmd cmd) { // Validate parameters - validateEndpointUrl(); + ApiServiceConfiguration.validateEndpointUrl(); final Long kubernetesClusterId = cmd.getId(); final Long upgradeVersionId = cmd.getKubernetesVersionId(); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index baf717612f8..cd334954946 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -685,7 +685,7 @@ public class KubernetesClusterActionWorker { try { String command = String.format("sudo %s/%s -u '%s' -k '%s' -s '%s'", - scriptPath, deploySecretsScriptFilename, ApiServiceConfiguration.ApiServletPath.value(), keys[0], keys[1]); + scriptPath, deploySecretsScriptFilename, ApiServiceConfiguration.getApiServletPathValue(), keys[0], keys[1]); Account account = accountDao.findById(kubernetesCluster.getAccountId()); if (account != null && account.getType() == Account.Type.PROJECT) { String projectId = projectService.findByProjectAccountId(account.getId()).getUuid(); diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java index bb9769044ca..b0d86005485 100644 --- a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java @@ -520,7 +520,6 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage String apiKey = user.getApiKey(); String secretKey = user.getSecretKey(); - String csUrl = ApiServiceConfiguration.ApiServletPath.value(); if (apiKey == null) { throw new InvalidParameterValueException("apiKey for user: " + user.getUsername() + " is empty. Please generate it"); @@ -530,9 +529,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage throw new InvalidParameterValueException("secretKey for user: " + user.getUsername() + " is empty. Please generate it"); } - if (csUrl == null || csUrl.contains("localhost")) { - throw new InvalidParameterValueException(String.format("Global setting %s has to be set to the Management Server's API end point", ApiServiceConfiguration.ApiServletPath.key())); - } + ApiServiceConfiguration.validateEndpointUrl(); } @Override @ActionEvent(eventType = EventTypes.EVENT_AUTOSCALEVMPROFILE_CREATE, eventDescription = "creating autoscale vm profile", create = true) diff --git a/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java index 5ceebf06dd8..df60553bb8e 100644 --- a/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java +++ b/server/src/main/java/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java @@ -337,7 +337,6 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements String apiKey = null; String secretKey = null; - String csUrl = ApiServiceConfiguration.ApiServletPath.value(); Network.Provider provider = getLoadBalancerServiceProvider(lb); if (Network.Provider.Netscaler.equals(provider)) { Long autoscaleUserId = autoScaleVmProfile.getAutoScaleUserId(); @@ -358,13 +357,12 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements throw new InvalidParameterValueException("secretKey for user: " + user.getUsername() + " is empty. Please generate it"); } - if (csUrl == null || csUrl.contains("localhost")) { - throw new InvalidParameterValueException(String.format("Global setting %s has to be set to the Management Server's API end point", ApiServiceConfiguration.ApiServletPath.key())); - } + ApiServiceConfiguration.validateEndpointUrl(); } LbAutoScaleVmProfile lbAutoScaleVmProfile = - new LbAutoScaleVmProfile(autoScaleVmProfile, apiKey, secretKey, csUrl, zoneId, domainId, serviceOfferingId, templateId, vmName, lbNetworkUuid); + new LbAutoScaleVmProfile(autoScaleVmProfile, apiKey, secretKey, ApiServiceConfiguration.getApiServletPathValue(), zoneId, domainId, serviceOfferingId, templateId, + vmName, lbNetworkUuid); return new LbAutoScaleVmGroup(vmGroup, autoScalePolicies, lbAutoScaleVmProfile, currentState); } diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index e6032662e92..1a3f18011be 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -716,6 +716,7 @@ import com.cloud.dc.dao.PodVlanMapDao; import com.cloud.dc.dao.VlanDao; import com.cloud.dc.dao.VlanDetailsDao; import com.cloud.deploy.DataCenterDeployment; +import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.deploy.DeploymentPlanningManager; @@ -962,7 +963,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Inject private HostPodDao _hostPodDao; @Inject - private VgpuProfileDao vgpuProfileDao; + VgpuProfileDao vgpuProfileDao; @Inject private VMInstanceDao _vmInstanceDao; @Inject @@ -1484,17 +1485,27 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe return false; } - @Override - public Ternary, Integer>, List, Map> listHostsForMigrationOfVM(final VirtualMachine vm, final Long startIndex, final Long pageSize, - final String keyword, List vmList) { - - validateVmForHostMigration(vm); + /** + * Get technically compatible hosts for VM migration (storage, hypervisor, UEFI filtering). + * This determines which hosts are technically capable of hosting the VM based on + * storage requirements, hypervisor capabilities, and UEFI requirements. + * + * @param vm The virtual machine to migrate + * @param startIndex Starting index for pagination + * @param pageSize Page size for pagination + * @param keyword Keyword filter for host search + * @return Ternary containing: (all hosts with count, filtered compatible hosts, storage motion requirements map) + */ + Ternary, Integer>, List, Map> getTechnicallyCompatibleHosts( + final VirtualMachine vm, + final Long startIndex, + final Long pageSize, + final String keyword) { + // GPU check if (_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) { - logger.info(" Live Migration of GPU enabled VM : " + vm.getInstanceName() + " is not supported"); - // Return empty list. - return new Ternary<>(new Pair<>(new ArrayList<>(), 0), - new ArrayList<>(), new HashMap<>()); + logger.info("Live Migration of GPU enabled VM : {} is not supported", vm); + return new Ternary<>(new Pair<>(new ArrayList<>(), 0), new ArrayList<>(), new HashMap<>()); } final long srcHostId = vm.getHostId(); @@ -1508,6 +1519,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } + String srcHostVersion = srcHost.getHypervisorVersion(); if (HypervisorType.KVM.equals(srcHost.getHypervisorType()) && srcHostVersion == null) { srcHostVersion = ""; @@ -1545,7 +1557,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe List allHosts; List filteredHosts = null; final Map requiresStorageMotion = new HashMap<>(); - DataCenterDeployment plan; + if (canMigrateWithStorage) { Long podId = !VirtualMachine.Type.User.equals(vm.getType()) ? srcHost.getPodId() : null; allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, srcHost.getDataCenterId(), podId, null, null, keyword, @@ -1594,7 +1606,6 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (CollectionUtils.isEmpty(filteredHosts)) { return new Ternary<>(new Pair<>(allHosts, allHostsPair.second()), new ArrayList<>(), new HashMap<>()); } - plan = new DataCenterDeployment(srcHost.getDataCenterId(), podId, null, null, null, null); } else { final Long cluster = srcHost.getClusterId(); if (logger.isDebugEnabled()) { @@ -1603,22 +1614,38 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, null, null, cluster, null, keyword, null, null, null, null, srcHost.getId()); allHosts = allHostsPair.first(); - plan = new DataCenterDeployment(srcHost.getDataCenterId(), srcHost.getPodId(), srcHost.getClusterId(), null, null, null); + filteredHosts = allHosts; } - final Pair, Integer> otherHosts = new Pair<>(allHosts, allHostsPair.second()); + final Pair, Integer> allHostsPairResult = new Pair<>(allHosts, allHostsPair.second()); Pair> uefiFilteredResult = filterUefiHostsForMigration(allHosts, filteredHosts, vm); if (!uefiFilteredResult.first()) { - return new Ternary<>(otherHosts, new ArrayList<>(), new HashMap<>()); + return new Ternary<>(allHostsPairResult, new ArrayList<>(), new HashMap<>()); } filteredHosts = uefiFilteredResult.second(); - List suitableHosts = new ArrayList<>(); + return new Ternary<>(allHostsPairResult, filteredHosts, requiresStorageMotion); + } + + /** + * Apply affinity group constraints and other exclusion rules for VM migration. + * This builds an ExcludeList based on affinity groups, DPDK requirements, and dedicated resources. + * + * @param vm The virtual machine to migrate + * @param vmProfile The VM profile + * @param plan The deployment plan + * @param vmList List of VMs with current/simulated placements for affinity processing + * @return ExcludeList containing hosts to avoid + */ + @Override + public ExcludeList applyAffinityConstraints(VirtualMachine vm, VirtualMachineProfile vmProfile, DeploymentPlan plan, List vmList) { final ExcludeList excludes = new ExcludeList(); - excludes.addHost(srcHostId); + excludes.addHost(vm.getHostId()); if (dpdkHelper.isVMDpdkEnabled(vm.getId())) { - excludeNonDPDKEnabledHosts(plan, excludes); + if (plan instanceof DataCenterDeployment) { + excludeNonDPDKEnabledHosts((DataCenterDeployment) plan, excludes); + } } // call affinitygroup chain @@ -1631,13 +1658,37 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } if (vm.getType() == VirtualMachine.Type.User || vm.getType() == VirtualMachine.Type.DomainRouter) { - final DataCenterVO dc = _dcDao.findById(srcHost.getDataCenterId()); + final DataCenterVO dc = _dcDao.findById(plan.getDataCenterId()); _dpMgr.checkForNonDedicatedResources(vmProfile, dc, excludes); } + return excludes; + } + + /** + * Get hosts with available capacity using host allocators, and apply architecture filtering. + * + * @param vm The virtual machine (for architecture filtering) + * @param vmProfile The VM profile + * @param plan The deployment plan + * @param compatibleHosts List of technically compatible hosts + * @param excludes ExcludeList with hosts to avoid + * @param srcHost Source host (for architecture filtering) + * @return List of suitable hosts with capacity + */ + protected List getCapableSuitableHosts( + final VirtualMachine vm, + final VirtualMachineProfile vmProfile, + final DataCenterDeployment plan, + final List compatibleHosts, + final ExcludeList excludes, + final Host srcHost) { + + List suitableHosts = new ArrayList<>(); + for (final HostAllocator allocator : hostAllocators) { - if (CollectionUtils.isNotEmpty(filteredHosts)) { - suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, filteredHosts, HostAllocator.RETURN_UPTO_ALL, false); + if (CollectionUtils.isNotEmpty(compatibleHosts)) { + suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, compatibleHosts, HostAllocator.RETURN_UPTO_ALL, false); } else { suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, HostAllocator.RETURN_UPTO_ALL, false); } @@ -1663,6 +1714,43 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } } + return suitableHosts; + } + + @Override + public Ternary, Integer>, List, Map> listHostsForMigrationOfVM(final VirtualMachine vm, final Long startIndex, final Long pageSize, + final String keyword, List vmList) { + + validateVmForHostMigration(vm); + + // Get technically compatible hosts (storage, hypervisor, UEFI) + Ternary, Integer>, List, Map> compatibilityResult = + getTechnicallyCompatibleHosts(vm, startIndex, pageSize, keyword); + + Pair, Integer> allHostsPair = compatibilityResult.first(); + List filteredHosts = compatibilityResult.second(); + Map requiresStorageMotion = compatibilityResult.third(); + + // If no compatible hosts, return early + if (CollectionUtils.isEmpty(filteredHosts)) { + final Pair, Integer> otherHosts = new Pair<>(allHostsPair.first(), allHostsPair.second()); + return new Ternary<>(otherHosts, new ArrayList<>(), requiresStorageMotion); + } + + // Create deployment plan and VM profile + final Host srcHost = _hostDao.findById(vm.getHostId()); + final DataCenterDeployment plan = new DataCenterDeployment( + srcHost.getDataCenterId(), srcHost.getPodId(), srcHost.getClusterId(), null, null, null); + final VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl( + vm, null, _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()), null, null); + + // Apply affinity constraints + final ExcludeList excludes = applyAffinityConstraints(vm, vmProfile, plan, vmList); + + // Get hosts with capacity + List suitableHosts = getCapableSuitableHosts(vm, vmProfile, plan, filteredHosts, excludes, srcHost); + + final Pair, Integer> otherHosts = new Pair<>(allHostsPair.first(), allHostsPair.second()); return new Ternary<>(otherHosts, suitableHosts, requiresStorageMotion); } @@ -1966,7 +2054,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe return suitablePools; } - private Pair, Integer> searchForServers(final Long startIndex, final Long pageSize, final Object name, final Object type, + Pair, Integer> searchForServers(final Long startIndex, final Long pageSize, final Object name, final Object type, final Object state, final Object zone, final Object pod, final Object cluster, final Object id, final Object keyword, final Object resourceState, final Object haHosts, final Object hypervisorType, final Object hypervisorVersion, final Object... excludes) { final Filter searchFilter = new Filter(HostVO.class, "id", Boolean.TRUE, startIndex, pageSize); @@ -4133,7 +4221,6 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(StartInternalLBVMCmd.class); cmdList.add(ListInternalLBVMsCmd.class); cmdList.add(ListNetworkIsolationMethodsCmd.class); - cmdList.add(ListNetworkIsolationMethodsCmd.class); cmdList.add(CreateNetworkACLListCmd.class); cmdList.add(DeleteNetworkACLListCmd.class); cmdList.add(ListNetworkACLListsCmd.class); diff --git a/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java b/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java index 93bc1261530..a5192e25a63 100644 --- a/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java +++ b/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java @@ -18,22 +18,24 @@ */ package com.cloud.storage; -import java.util.ArrayList; import java.util.List; import java.util.Map; import javax.inject.Inject; -import org.apache.cloudstack.context.CallContext; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.ResourceUnavailableException; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.commons.collections.MapUtils; -import org.apache.logging.log4j.LogManager; + +import org.apache.commons.collections4.CollectionUtils; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; @@ -49,17 +51,10 @@ import com.cloud.server.ManagementServer; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.StoragePoolWorkDao; import com.cloud.storage.dao.VolumeDao; -import com.cloud.user.Account; -import com.cloud.user.User; import com.cloud.user.dao.UserDao; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.vm.ConsoleProxyVO; -import com.cloud.vm.DomainRouterVO; -import com.cloud.vm.SecondaryStorageVmVO; -import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.dao.ConsoleProxyDao; @@ -118,189 +113,32 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation { @Override public boolean maintain(DataStore store, Map details) { - Long userId = CallContext.current().getCallingUserId(); - User user = _userDao.findById(userId); - Account account = CallContext.current().getCallingAccount(); StoragePoolVO pool = primaryDataStoreDao.findById(store.getId()); try { - List spes = null; - // Handling Zone and Cluster wide storage scopes. - // if the storage is ZONE wide then we pass podid and cluster id as null as they will be empty for ZWPS - if (pool.getScope() == ScopeType.ZONE) { - spes = primaryDataStoreDao.listBy(pool.getDataCenterId(), null, null, ScopeType.ZONE); - } else { - spes = primaryDataStoreDao.listBy(pool.getDataCenterId(), pool.getPodId(), pool.getClusterId(), ScopeType.CLUSTER); - } - for (StoragePoolVO sp : spes) { - if (sp.getParent() != pool.getParent() && sp.getId() != pool.getParent()) { // If Datastore cluster is tried to prepare for maintenance then child storage pools are also kept in PrepareForMaintenance mode - if (sp.getStatus() == StoragePoolStatus.PrepareForMaintenance) { - throw new CloudRuntimeException(String.format("Only one storage pool in a cluster can be in PrepareForMaintenance mode, %s is already in PrepareForMaintenance mode ", sp)); - } - } - } - StoragePool storagePool = (StoragePool)store; + getStoragePoolForSpecification(pool); - //Handeling the Zone wide and cluster wide primay storage - List hosts = new ArrayList(); - // if the storage scope is ZONE wide, then get all the hosts for which hypervisor ZWSP created to send Modifystoragepoolcommand - //TODO: if it's zone wide, this code will list a lot of hosts in the zone, which may cause performance/OOM issue. - if (pool.getScope().equals(ScopeType.ZONE)) { - if (HypervisorType.Any.equals(pool.getHypervisor())) { - hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZone(pool.getDataCenterId()); - } - else { - hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(pool.getHypervisor(), pool.getDataCenterId()); - } - } else { - hosts = _resourceMgr.listHostsInClusterByStatus(pool.getClusterId(), Status.Up); - } + List hosts = getHostsForStoragePool(pool); - if (hosts == null || hosts.size() == 0) { - pool.setStatus(StoragePoolStatus.Maintenance); - primaryDataStoreDao.update(pool.getId(), pool); - return true; - } else { - // set the pool state to prepare for maintenance - pool.setStatus(StoragePoolStatus.PrepareForMaintenance); - primaryDataStoreDao.update(pool.getId(), pool); - } - // remove heartbeat - for (HostVO host : hosts) { - ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(false, storagePool); - if (MapUtils.isNotEmpty(details) && storageManager.canDisconnectHostFromStoragePool(host, storagePool)) { - cmd.setDetails(details); - } - final Answer answer = agentMgr.easySend(host.getId(), cmd); - if (answer == null || !answer.getResult()) { - if (logger.isDebugEnabled()) { - logger.debug("ModifyStoragePool false failed due to " + ((answer == null) ? "answer null" : answer.getDetails())); - } - } else { - if (logger.isDebugEnabled()) { - logger.debug("ModifyStoragePool false succeeded"); - } - if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) { - logger.debug("Started synchronising datastore cluster storage pool {} with vCenter", pool); - storageManager.syncDatastoreClusterStoragePool(pool.getId(), ((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren(), host.getId()); - } - } - } - // check to see if other ps exist - // if they do, then we can migrate over the system vms to them - // if they don't, then just stop all vms on this one - List upPools = primaryDataStoreDao.listByStatusInZone(pool.getDataCenterId(), StoragePoolStatus.Up); - boolean restart = true; - if (upPools == null || upPools.size() == 0) { - restart = false; - } + if (setNextStateForMaintenance(hosts, pool) == StoragePoolStatus.PrepareForMaintenance) { + removeHeartbeatForHostsFromPool(hosts, pool); + // check to see if other ps exist + // if they do, then we can migrate over the system vms to them + // if they don't, then just stop all vms on this one + List upPools = primaryDataStoreDao.listByStatusInZone(pool.getDataCenterId(), StoragePoolStatus.Up); + boolean restart = !CollectionUtils.isEmpty(upPools); - // 2. Get a list of all the ROOT volumes within this storage pool - List allVolumes = volumeDao.findByPoolId(pool.getId()); - - // 3. Enqueue to the work queue - for (VolumeVO volume : allVolumes) { - VMInstanceVO vmInstance = vmDao.findById(volume.getInstanceId()); - - if (vmInstance == null) { - continue; - } - - // enqueue sp work - if (vmInstance.getState().equals(State.Running) || vmInstance.getState().equals(State.Starting) || vmInstance.getState().equals(State.Stopping)) { - - try { - StoragePoolWorkVO work = new StoragePoolWorkVO(vmInstance.getId(), pool.getId(), false, false, server.getId()); - _storagePoolWorkDao.persist(work); - } catch (Exception e) { - if (logger.isDebugEnabled()) { - logger.debug("Work record already exists, reusing by re-setting values"); - } - StoragePoolWorkVO work = _storagePoolWorkDao.findByPoolIdAndVmId(pool.getId(), vmInstance.getId()); - work.setStartedAfterMaintenance(false); - work.setStoppedForMaintenance(false); - work.setManagementServerId(server.getId()); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - - // 4. Process the queue - List pendingWork = _storagePoolWorkDao.listPendingWorkForPrepareForMaintenanceByPoolId(pool.getId()); - - for (StoragePoolWorkVO work : pendingWork) { - // shut down the running vms - VMInstanceVO vmInstance = vmDao.findById(work.getVmId()); - - if (vmInstance == null) { - continue; - } - - // if the instance is of type consoleproxy, call the console - // proxy - if (vmInstance.getType().equals(VirtualMachine.Type.ConsoleProxy)) { - // call the consoleproxymanager - ConsoleProxyVO consoleProxy = _consoleProxyDao.findById(vmInstance.getId()); - vmMgr.advanceStop(consoleProxy.getUuid(), false); - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - - if (restart) { - - vmMgr.advanceStart(consoleProxy.getUuid(), null, null); - // update work status - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type uservm, call the user vm manager - if (vmInstance.getType() == VirtualMachine.Type.User) { - UserVmVO userVm = userVmDao.findById(vmInstance.getId()); - vmMgr.advanceStop(userVm.getUuid(), false); - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - - // if the instance is of type secondary storage vm, call the - // secondary storage vm manager - if (vmInstance.getType().equals(VirtualMachine.Type.SecondaryStorageVm)) { - SecondaryStorageVmVO secStrgVm = _secStrgDao.findById(vmInstance.getId()); - vmMgr.advanceStop(secStrgVm.getUuid(), false); - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - - if (restart) { - vmMgr.advanceStart(secStrgVm.getUuid(), null, null); - // update work status - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type domain router vm, call the network - // manager - if (vmInstance.getType().equals(VirtualMachine.Type.DomainRouter)) { - DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); - vmMgr.advanceStop(domR.getUuid(), false); - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - - if (restart) { - vmMgr.advanceStart(domR.getUuid(), null, null); - // update work status - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } + // 2. Get a list of all the ROOT volumes within this storage pool + List allVolumes = volumeDao.findByPoolId(pool.getId()); + // 3. Enqueue to the work queue + enqueueMigrationsForVolumes(allVolumes, pool); + // 4. Process the queue + processMigrationWorkloads(pool, restart); } } catch (Exception e) { logger.error("Exception in enabling primary storage maintenance:", e); pool.setStatus(StoragePoolStatus.ErrorInMaintenance); primaryDataStoreDao.update(pool.getId(), pool); + // TODO decide on what recovery is possible throw new CloudRuntimeException(e.getMessage()); } return true; @@ -314,67 +152,64 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation { @Override public boolean cancelMaintain(DataStore store, Map details) { // Change the storage state back to up - Long userId = CallContext.current().getCallingUserId(); - User user = _userDao.findById(userId); - Account account = CallContext.current().getCallingAccount(); StoragePoolVO poolVO = primaryDataStoreDao.findById(store.getId()); StoragePool pool = (StoragePool)store; - //Handeling the Zone wide and cluster wide primay storage - List hosts = new ArrayList(); - // if the storage scope is ZONE wide, then get all the hosts for which hypervisor ZWSP created to send Modifystoragepoolcommand - if (poolVO.getScope().equals(ScopeType.ZONE)) { - if (HypervisorType.Any.equals(pool.getHypervisor())) { - hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZone(pool.getDataCenterId()); - } - else { - hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(poolVO.getHypervisor(), pool.getDataCenterId()); - } - } else { - hosts = _resourceMgr.listHostsInClusterByStatus(pool.getClusterId(), Status.Up); - } + List hosts = getHostsForStoragePool(poolVO); - if (hosts == null || hosts.size() == 0) { + if (CollectionUtils.isEmpty(hosts)) { return true; } Pair, Boolean> nfsMountOpts = storageManager.getStoragePoolNFSMountOpts(pool, null); - // add heartbeat - for (HostVO host : hosts) { - ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand(true, pool); - if (MapUtils.isNotEmpty(details)) { - msPoolCmd.setDetails(details); - } - final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd); - if (answer == null || !answer.getResult()) { - if (logger.isDebugEnabled()) { - logger.debug("ModifyStoragePool add failed due to " + ((answer == null) ? "answer null" : answer.getDetails())); - } - if (answer != null && nfsMountOpts.second()) { - logger.error(String.format("Unable to attach storage pool to the host %s due to %s", host, answer.getDetails())); - StringBuilder exceptionSB = new StringBuilder("Unable to attach storage pool to the host ").append(host.getName()); - String reason = storageManager.getStoragePoolMountFailureReason(answer.getDetails()); - if (reason!= null) { - exceptionSB.append(". ").append(reason).append("."); - } - throw new CloudRuntimeException(exceptionSB.toString()); - } - } else { - if (logger.isDebugEnabled()) { - logger.debug("ModifyStoragePool add succeeded"); - } - storageManager.updateStoragePoolHostVOAndBytes(pool, host.getId(), (ModifyStoragePoolAnswer) answer); - if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) { - logger.debug("Started synchronising datastore cluster storage pool {} with vCenter", pool); - storageManager.syncDatastoreClusterStoragePool(pool.getId(), ((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren(), host.getId()); - } - } - } + addHeartbeatToHostsInPool(hosts, pool, nfsMountOpts); // 2. Get a list of pending work for this queue List pendingWork = _storagePoolWorkDao.listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId()); // 3. work through the queue + cancelMigrationWorkloads(pendingWork); + return false; + } + + private StoragePoolStatus setNextStateForMaintenance(List hosts, StoragePoolVO pool) { + if (CollectionUtils.isEmpty(hosts)) { + pool.setStatus(StoragePoolStatus.Maintenance); + primaryDataStoreDao.update(pool.getId(), pool); + return StoragePoolStatus.Maintenance; + } else { + // set the pool state to prepare for maintenance + pool.setStatus(StoragePoolStatus.PrepareForMaintenance); + primaryDataStoreDao.update(pool.getId(), pool); + return StoragePoolStatus.PrepareForMaintenance; + } + } + + private void processMigrationWorkloads(StoragePoolVO pool, boolean restart) throws ResourceUnavailableException, OperationTimedoutException, InsufficientCapacityException { + List pendingWork = _storagePoolWorkDao.listPendingWorkForPrepareForMaintenanceByPoolId(pool.getId()); + + for (StoragePoolWorkVO work : pendingWork) { + // shut down the running vms + VMInstanceVO vmInstance = vmDao.findById(work.getVmId()); + + if (vmInstance == null) { + continue; + } + + switch (vmInstance.getType()) { + case ConsoleProxy: + case SecondaryStorageVm: + case DomainRouter: + handleVmMigration(restart, work, vmInstance); + break; + case User: + handleStopVmForMigration(work, vmInstance); + break; + } + } + } + + private void cancelMigrationWorkloads(List pendingWork) { for (StoragePoolWorkVO work : pendingWork) { try { VMInstanceVO vmInstance = vmDao.findById(work.getVmId()); @@ -383,61 +218,189 @@ public class StoragePoolAutomationImpl implements StoragePoolAutomation { continue; } - // if the instance is of type consoleproxy, call the console - // proxy - if (vmInstance.getType().equals(VirtualMachine.Type.ConsoleProxy)) { - - ConsoleProxyVO consoleProxy = _consoleProxyDao - .findById(vmInstance.getId()); - vmMgr.advanceStart(consoleProxy.getUuid(), null, null); - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - - // if the instance is of type ssvm, call the ssvm manager - if (vmInstance.getType().equals( - VirtualMachine.Type.SecondaryStorageVm)) { - SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance - .getId()); - vmMgr.advanceStart(ssVm.getUuid(), null, null); - - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - - // if the instance is of type domain router vm, call the network - // manager - if (vmInstance.getType().equals(VirtualMachine.Type.DomainRouter)) { - DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); - vmMgr.advanceStart(domR.getUuid(), null, null); - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - - // if the instance is of type user vm, call the user vm manager - if (vmInstance.getType().equals(VirtualMachine.Type.User)) { - // check if the vm has a root volume. If not, remove the item from the queue, the vm should be - // started only when it has at least one root volume attached to it - // don't allow to start vm that doesn't have a root volume - if (volumeDao.findByInstanceAndType(vmInstance.getId(), Volume.Type.ROOT).isEmpty()) { - _storagePoolWorkDao.remove(work.getId()); - } else { - UserVmVO userVm = userVmDao.findById(vmInstance.getId()); - - vmMgr.advanceStart(userVm.getUuid(), null, null); - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } + switch (vmInstance.getType()) { + case ConsoleProxy: + case SecondaryStorageVm: + case DomainRouter: + handleVmStart(work, vmInstance); + break; + case User: + handleUserVmStart(work, vmInstance); + break; } } catch (Exception e) { logger.debug("Failed start vm", e); throw new CloudRuntimeException(e.toString()); } } - return false; } + private void handleStopVmForMigration(StoragePoolWorkVO work, VMInstanceVO vmInstance) throws ResourceUnavailableException, OperationTimedoutException { + vmMgr.advanceStop(vmInstance.getUuid(), false); + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + + private void handleVmMigration(boolean restart, StoragePoolWorkVO work, VMInstanceVO vmInstance) throws ResourceUnavailableException, OperationTimedoutException, InsufficientCapacityException { + handleStopVmForMigration(work, vmInstance); + + if (restart) { + handleVmStart(work, vmInstance); + } + } + + private void handleVmStart(StoragePoolWorkVO work, VMInstanceVO vmInstance) throws InsufficientCapacityException, ResourceUnavailableException, OperationTimedoutException { + vmMgr.advanceStart(vmInstance.getUuid(), null, null); + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + + private void enqueueMigrationsForVolumes(List allVolumes, StoragePoolVO pool) { + for (VolumeVO volume : allVolumes) { + VMInstanceVO vmInstance = vmDao.findById(volume.getInstanceId()); + + if (vmInstance == null) { + continue; + } + + // enqueue sp work + if (vmInstance.getState().equals(State.Running) || vmInstance.getState().equals(State.Starting) || vmInstance.getState().equals(State.Stopping)) { + + try { + StoragePoolWorkVO work = new StoragePoolWorkVO(vmInstance.getId(), pool.getId(), false, false, server.getId()); + _storagePoolWorkDao.persist(work); + } catch (Exception e) { + if (logger.isDebugEnabled()) { + logger.debug("Work record already exists, reusing by re-setting values"); + } + StoragePoolWorkVO work = _storagePoolWorkDao.findByPoolIdAndVmId(pool.getId(), vmInstance.getId()); + work.setStartedAfterMaintenance(false); + work.setStoppedForMaintenance(false); + work.setManagementServerId(server.getId()); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + } + + private void removeHeartbeatForHostsFromPool(List hosts, StoragePool storagePool) { + // remove heartbeat + for (HostVO host : hosts) { + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(false, storagePool); + final Answer answer = agentMgr.easySend(host.getId(), cmd); + if (answer == null || !answer.getResult()) { + if (logger.isDebugEnabled()) { + logger.debug("ModifyStoragePool false failed due to {}", ((answer == null) ? "answer null" : answer.getDetails())); + } + } else { + reportSucceededModifyStorePool(storagePool, (ModifyStoragePoolAnswer) answer, host, false); + } + } + } + + private void reportSucceededModifyStorePool(StoragePool storagePool, ModifyStoragePoolAnswer answer, HostVO host, boolean add) { + if (logger.isDebugEnabled()) { + logger.debug("ModifyStoragePool succeeded for {}", add ? "adding" : "removing"); + } + if (storagePool.getPoolType() == Storage.StoragePoolType.DatastoreCluster) { + logger.debug("Started synchronising datastore cluster storage pool {} with vCenter", storagePool); + storageManager.syncDatastoreClusterStoragePool(storagePool.getId(), answer.getDatastoreClusterChildren(), host.getId()); + } + } + + /** + * Handling the Zone wide and cluster wide primary storage + * if the storage scope is ZONE wide, then get all the hosts for which hypervisor ZoneWideStoragePools created to send ModifyStoragePoolCommand + * TODO: if it's zone wide, this code will list a lot of hosts in the zone, which may cause performance/OOM issue. + * @param pool pool to check for connected hosts + * @return a list of connected hosts + */ + private List getHostsForStoragePool(StoragePoolVO pool) { + List hosts; + if (pool.getScope().equals(ScopeType.ZONE)) { + if (HypervisorType.Any.equals(pool.getHypervisor())) { + hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZone(pool.getDataCenterId()); + } + else { + hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(pool.getHypervisor(), pool.getDataCenterId()); + } + } else { + hosts = _resourceMgr.listHostsInClusterByStatus(pool.getClusterId(), Status.Up); + } + return hosts; + } + + /** + * Handling Zone and Cluster wide storage scopes. Depending on the scope of the pool, check for other storage pools in the same scope + * If the storage is ZONE wide then we pass podId and cluster id as null as they will be empty for Zone wide storage + * + * @param pool pool to check for other pools in the same scope + */ + private void getStoragePoolForSpecification(StoragePoolVO pool) { + List storagePools; + if (pool.getScope() == ScopeType.ZONE) { + storagePools = primaryDataStoreDao.listBy(pool.getDataCenterId(), null, null, ScopeType.ZONE); + } else { + storagePools = primaryDataStoreDao.listBy(pool.getDataCenterId(), pool.getPodId(), pool.getClusterId(), ScopeType.CLUSTER); + } + checkHierarchyForPreparingForMaintenance(pool, storagePools); + } + + /** + * If Datastore cluster is tried to prepare for maintenance then child storage pools are also kept in PrepareForMaintenance mode + * @param pool target to put in maintenance + * @param storagePools list of possible peers/parents/children + */ + private static void checkHierarchyForPreparingForMaintenance(StoragePoolVO pool, List storagePools) { + for (StoragePoolVO storagePool : storagePools) { + if (!(storagePool.getParent().equals(pool.getParent()) || !pool.getParent().equals(storagePool.getId())) && + (storagePool.getStatus() == StoragePoolStatus.PrepareForMaintenance)) { + throw new CloudRuntimeException(String.format("Only one storage pool in a cluster can be in PrepareForMaintenance mode, %s is already in PrepareForMaintenance mode ", storagePool)); + } + } + } + + /** + * // check if the vm has a root volume. If not, remove the item from the queue, the vm should be + * // started only when it has at least one root volume attached to it + * // don't allow to start vm that doesn't have a root volume + * @param work work item to handle for this VM + * @param vmInstance VM to start + * @throws InsufficientCapacityException no migration target found + * @throws ResourceUnavailableException a resource required for migration is not in the expected state + * @throws OperationTimedoutException migration operation took too long + */ + private void handleUserVmStart(StoragePoolWorkVO work, VMInstanceVO vmInstance) throws InsufficientCapacityException, ResourceUnavailableException, OperationTimedoutException { + if (volumeDao.findByInstanceAndType(vmInstance.getId(), Volume.Type.ROOT).isEmpty()) { + _storagePoolWorkDao.remove(work.getId()); + } else { + handleVmStart(work, vmInstance); + } + } + + private void addHeartbeatToHostsInPool(List hosts, StoragePool pool, Pair, Boolean> nfsMountOpts) { + for (HostVO host : hosts) { + ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand(true, pool, nfsMountOpts.first()); + final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd); + if (answer == null || !answer.getResult()) { + if (logger.isDebugEnabled()) { + logger.debug("ModifyStoragePool add failed due to {}", ((answer == null) ? "answer null" : answer.getDetails())); + } + if (answer != null && nfsMountOpts.second()) { + logger.error("Unable to attach storage pool to the host {} due to {}", host, answer.getDetails()); + StringBuilder exceptionSB = new StringBuilder("Unable to attach storage pool to the host ").append(host.getName()); + String reason = storageManager.getStoragePoolMountFailureReason(answer.getDetails()); + if (reason!= null) { + exceptionSB.append(". ").append(reason).append("."); + } + throw new CloudRuntimeException(exceptionSB.toString()); + } + } else { + storageManager.updateStoragePoolHostVOAndBytes(pool, host.getId(), (ModifyStoragePoolAnswer) answer); + reportSucceededModifyStorePool(pool, (ModifyStoragePoolAnswer) answer, host, true); + } + } + } } diff --git a/server/src/main/java/com/cloud/user/AccountManagerImpl.java b/server/src/main/java/com/cloud/user/AccountManagerImpl.java index bbfc8fd3682..80387c2fc12 100644 --- a/server/src/main/java/com/cloud/user/AccountManagerImpl.java +++ b/server/src/main/java/com/cloud/user/AccountManagerImpl.java @@ -1405,6 +1405,9 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M - New role should not be of type Admin with domain other than ROOT domain */ protected void validateRoleChange(Account account, Role role, Account caller) { + if (account.getRoleId() != null && account.getRoleId().equals(role.getId())) { + return; + } Role currentRole = roleService.findRole(account.getRoleId()); Role callerRole = roleService.findRole(caller.getRoleId()); String errorMsg = String.format("Unable to update account role to %s, ", role.getName()); @@ -1420,6 +1423,9 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M throw new PermissionDeniedException(String.format("%s as either current or new role has higher " + "privileges than the caller", errorMsg)); } + if (account.isDefault()) { + throw new PermissionDeniedException(String.format("%s as the account is a default account", errorMsg)); + } if (role.getRoleType().equals(RoleType.Admin) && account.getDomainId() != Domain.ROOT_DOMAIN) { throw new PermissionDeniedException(String.format("%s as the user does not belong to the ROOT domain", errorMsg)); diff --git a/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java b/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java index a662d47d454..7e1011ff931 100644 --- a/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/cluster/ClusterDrsServiceImpl.java @@ -24,6 +24,8 @@ import com.cloud.api.query.dao.HostJoinDao; import com.cloud.api.query.vo.HostJoinVO; import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; +import com.cloud.deploy.DataCenterDeployment; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.domain.Domain; import com.cloud.event.ActionEventUtils; import com.cloud.event.EventTypes; @@ -51,6 +53,8 @@ import com.cloud.utils.db.TransactionCallback; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.VirtualMachineProfileImpl; import com.cloud.vm.VmDetailConstants; import com.cloud.vm.dao.VMInstanceDao; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -62,6 +66,8 @@ import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd; import org.apache.cloudstack.api.response.ClusterDrsPlanMigrationResponse; import org.apache.cloudstack.api.response.ClusterDrsPlanResponse; import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.affinity.AffinityGroupVMMapVO; +import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; import org.apache.cloudstack.cluster.dao.ClusterDrsPlanDao; import org.apache.cloudstack.cluster.dao.ClusterDrsPlanMigrationDao; import org.apache.cloudstack.context.CallContext; @@ -71,6 +77,7 @@ import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; import org.apache.cloudstack.jobs.JobInfo; import org.apache.cloudstack.managed.context.ManagedContextTimerTask; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.time.DateUtils; @@ -81,13 +88,18 @@ import java.util.Calendar; import java.util.Collections; import java.util.Date; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.Timer; import java.util.TimerTask; import java.util.stream.Collectors; import static com.cloud.org.Grouping.AllocationState.Disabled; +import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getClusterImbalance; +import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getClusterDrsMetric; +import static org.apache.cloudstack.cluster.ClusterDrsAlgorithm.getMetricValue; public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsService, PluggableService { @@ -125,6 +137,9 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ @Inject ManagementServer managementServer; + @Inject + AffinityGroupVMMapDao affinityGroupVMMapDao; + List drsAlgorithms = new ArrayList<>(); Map drsAlgorithmMap = new HashMap<>(); @@ -318,19 +333,14 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ * @throws ConfigurationException * If there is an error in the DRS configuration. */ - List> getDrsPlan(Cluster cluster, - int maxIterations) throws ConfigurationException { - List> migrationPlan = new ArrayList<>(); + List> getDrsPlan(Cluster cluster, int maxIterations) throws ConfigurationException { if (cluster.getAllocationState() == Disabled || maxIterations <= 0) { return Collections.emptyList(); } - ClusterDrsAlgorithm algorithm = getDrsAlgorithm(ClusterDrsAlgorithm.valueIn(cluster.getId())); List hostList = hostDao.findByClusterId(cluster.getId()); List vmList = new ArrayList<>(vmInstanceDao.listByClusterId(cluster.getId())); - int iteration = 0; - Map hostMap = hostList.stream().collect(Collectors.toMap(HostVO::getId, host -> host)); Map> hostVmMap = getHostVmMap(hostList, vmList); @@ -357,10 +367,39 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ serviceOfferingDao.findByIdIncludingRemoved(vm.getId(), vm.getServiceOfferingId())); } + Pair>, Map>> hostCache = getCompatibleHostAndVmStorageMotionCache(vmList); + Map> vmToCompatibleHostsCache = hostCache.first(); + Map> vmToStorageMotionCache = hostCache.second(); + + Set vmsWithAffinityGroups = getVmsWithAffinityGroups(vmList, vmToCompatibleHostsCache); + + return getMigrationPlans(maxIterations, cluster, hostMap, vmList, vmsWithAffinityGroups, vmToCompatibleHostsCache, + vmToStorageMotionCache, vmIdServiceOfferingMap, originalHostIdVmIdMap, hostVmMap, hostCpuMap, hostMemoryMap); + } + + private List> getMigrationPlans( + long maxIterations, Cluster cluster, Map hostMap, List vmList, + Set vmsWithAffinityGroups, Map> vmToCompatibleHostsCache, + Map> vmToStorageMotionCache, Map vmIdServiceOfferingMap, + Map> originalHostIdVmIdMap, Map> hostVmMap, + Map> hostCpuMap, Map> hostMemoryMap + ) throws ConfigurationException { + ClusterDrsAlgorithm algorithm = getDrsAlgorithm(ClusterDrsAlgorithm.valueIn(cluster.getId())); + int iteration = 0; + List> migrationPlan = new ArrayList<>(); while (iteration < maxIterations && algorithm.needsDrs(cluster, new ArrayList<>(hostCpuMap.values()), new ArrayList<>(hostMemoryMap.values()))) { + + logger.debug("Starting DRS iteration {} for cluster {}", iteration + 1, cluster); + // Re-evaluate affinity constraints with current (simulated) VM placements + Map vmToExcludesMap = getVmToExcludesMap(vmList, hostMap, vmsWithAffinityGroups, + vmToCompatibleHostsCache, vmIdServiceOfferingMap); + + logger.debug("Completed affinity evaluation for DRS iteration {} for cluster {}", iteration + 1, cluster); + Pair bestMigration = getBestMigration(cluster, algorithm, vmList, - vmIdServiceOfferingMap, hostCpuMap, hostMemoryMap); + vmIdServiceOfferingMap, hostCpuMap, hostMemoryMap, + vmToCompatibleHostsCache, vmToStorageMotionCache, vmToExcludesMap); VirtualMachine vm = bestMigration.first(); Host destHost = bestMigration.second(); if (destHost == null || vm == null || originalHostIdVmIdMap.get(destHost.getId()).contains(vm.getId())) { @@ -372,8 +411,6 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ ServiceOffering serviceOffering = vmIdServiceOfferingMap.get(vm.getId()); migrationPlan.add(new Ternary<>(vm, hostMap.get(vm.getHostId()), hostMap.get(destHost.getId()))); - hostVmMap.get(vm.getHostId()).remove(vm); - hostVmMap.get(destHost.getId()).add(vm); hostVmMap.get(vm.getHostId()).remove(vm); hostVmMap.get(destHost.getId()).add(vm); @@ -391,6 +428,106 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ return migrationPlan; } + private Map getVmToExcludesMap(List vmList, Map hostMap, + Set vmsWithAffinityGroups, Map> vmToCompatibleHostsCache, + Map vmIdServiceOfferingMap) { + Map vmToExcludesMap = new HashMap<>(); + for (VirtualMachine vm : vmList) { + if (vmToCompatibleHostsCache.containsKey(vm.getId())) { + Host srcHost = hostMap.get(vm.getHostId()); + if (srcHost != null) { + // Only call expensive applyAffinityConstraints for VMs with affinity groups + // For VMs without affinity groups, create minimal ExcludeList (just source host) + ExcludeList excludes; + if (vmsWithAffinityGroups.contains(vm.getId())) { + DataCenterDeployment plan = new DataCenterDeployment( + srcHost.getDataCenterId(), srcHost.getPodId(), srcHost.getClusterId(), + null, null, null); + VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(vm, null, + vmIdServiceOfferingMap.get(vm.getId()), null, null); + + excludes = managementServer.applyAffinityConstraints( + vm, vmProfile, plan, vmList); + } else { + // VM has no affinity groups - create minimal ExcludeList (just source host) + excludes = new ExcludeList(); + excludes.addHost(vm.getHostId()); + } + vmToExcludesMap.put(vm.getId(), excludes); + } + } + } + return vmToExcludesMap; + } + + + /** + * Pre-compute suitable hosts (once per eligible VM - never changes) + * Use listHostsForMigrationOfVM to get hosts validated by getCapableSuitableHosts + * This ensures DRS uses the same validation as "find host for migration" command + * + * @param vmList List of VMs to pre-compute suitable hosts for + * @return Pair of VM to compatible hosts map and VM to storage motion requirement map + */ + private Pair>, Map>> getCompatibleHostAndVmStorageMotionCache( + List vmList + ) { + Map> vmToCompatibleHostsCache = new HashMap<>(); + Map> vmToStorageMotionCache = new HashMap<>(); + + for (VirtualMachine vm : vmList) { + // Skip ineligible VMs + if (vm.getType().isUsedBySystem() || + vm.getState() != VirtualMachine.State.Running || + (MapUtils.isNotEmpty(vm.getDetails()) && + "true".equalsIgnoreCase(vm.getDetails().get(VmDetailConstants.SKIP_DRS)))) { + continue; + } + + try { + // Use listHostsForMigrationOfVM to get suitable hosts (validated by getCapableSuitableHosts) + // This ensures the same validation as the "find host for migration" command + Ternary, Integer>, List, Map> hostsForMigration = + managementServer.listHostsForMigrationOfVM(vm, 0L, 500L, null, vmList); + + List suitableHosts = hostsForMigration.second(); // Get suitable hosts (validated by HostAllocator) + Map requiresStorageMotion = hostsForMigration.third(); + + if (suitableHosts != null && !suitableHosts.isEmpty()) { + vmToCompatibleHostsCache.put(vm.getId(), suitableHosts); + vmToStorageMotionCache.put(vm.getId(), requiresStorageMotion); + } + } catch (Exception e) { + logger.debug("Could not get suitable hosts for VM {}: {}", vm, e.getMessage()); + } + } + return new Pair<>(vmToCompatibleHostsCache, vmToStorageMotionCache); + } + + /** + * Pre-fetch affinity group mappings for all eligible VMs (once, before iterations) + * This allows us to skip expensive affinity processing for VMs without affinity groups + * + * @param vmList List of VMs to check for affinity groups + * @param vmToCompatibleHostsCache Cached map of VM IDs to their compatible hosts + * @return Set of VM IDs that have affinity groups + */ + private Set getVmsWithAffinityGroups( + List vmList, Map> vmToCompatibleHostsCache + ) { + Set vmsWithAffinityGroups = new HashSet<>(); + for (VirtualMachine vm : vmList) { + if (vmToCompatibleHostsCache.containsKey(vm.getId())) { + // Check if VM has any affinity groups - if list is empty, VM has no affinity groups + List affinityGroupMappings = affinityGroupVMMapDao.listByInstanceId(vm.getId()); + if (CollectionUtils.isNotEmpty(affinityGroupMappings)) { + vmsWithAffinityGroups.add(vm.getId()); + } + } + } + return vmsWithAffinityGroups; + } + private ClusterDrsAlgorithm getDrsAlgorithm(String algoName) { if (drsAlgorithmMap.containsKey(algoName)) { return drsAlgorithmMap.get(algoName); @@ -429,6 +566,12 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ * @param hostMemoryCapacityMap * a map of host IDs to their corresponding memory * capacity + * @param vmToCompatibleHostsCache + * cached map of VM IDs to their compatible hosts + * @param vmToStorageMotionCache + * cached map of VM IDs to storage motion requirements + * @param vmToExcludesMap + * map of VM IDs to their ExcludeList (affinity constraints) * * @return a pair of the virtual machine and host that represent the best * migration, or null if no migration is @@ -438,33 +581,46 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ List vmList, Map vmIdServiceOfferingMap, Map> hostCpuCapacityMap, - Map> hostMemoryCapacityMap) throws ConfigurationException { + Map> hostMemoryCapacityMap, + Map> vmToCompatibleHostsCache, + Map> vmToStorageMotionCache, + Map vmToExcludesMap) throws ConfigurationException { + // Pre-calculate cluster imbalance once per iteration (same for all VM-host combinations) + Double preImbalance = getClusterImbalance(cluster.getId(), + new ArrayList<>(hostCpuCapacityMap.values()), + new ArrayList<>(hostMemoryCapacityMap.values()), + null); + + // Pre-calculate base metrics array once per iteration for optimized imbalance calculation + String metricType = getClusterDrsMetric(cluster.getId()); + Map> baseMetricsMap = "cpu".equals(metricType) ? hostCpuCapacityMap : hostMemoryCapacityMap; + Pair> baseMetricsAndIndexMap = getBaseMetricsArrayAndHostIdIndexMap(cluster, baseMetricsMap); + double[] baseMetricsArray = baseMetricsAndIndexMap.first(); + Map hostIdToIndexMap = baseMetricsAndIndexMap.second(); + double improvement = 0; Pair bestMigration = new Pair<>(null, null); for (VirtualMachine vm : vmList) { - if (vm.getType().isUsedBySystem() || vm.getState() != VirtualMachine.State.Running || - (MapUtils.isNotEmpty(vm.getDetails()) && - vm.getDetails().get(VmDetailConstants.SKIP_DRS).equalsIgnoreCase("true")) - ) { + List compatibleHosts = vmToCompatibleHostsCache.get(vm.getId()); + Map requiresStorageMotion = vmToStorageMotionCache.get(vm.getId()); + ExcludeList excludes = vmToExcludesMap.get(vm.getId()); + + ServiceOffering serviceOffering = vmIdServiceOfferingMap.get(vm.getId()); + if (skipDrs(vm, compatibleHosts, serviceOffering)) { continue; } - Ternary, Integer>, List, Map> hostsForMigrationOfVM = managementServer - .listHostsForMigrationOfVM( - vm, 0L, 500L, null, vmList); - List compatibleDestinationHosts = hostsForMigrationOfVM.first().first(); - List suitableDestinationHosts = hostsForMigrationOfVM.second(); - Map requiresStorageMotion = hostsForMigrationOfVM.third(); + long vmCpu = (long) serviceOffering.getCpu() * serviceOffering.getSpeed(); + long vmMemory = serviceOffering.getRamSize() * 1024L * 1024L; - for (Host destHost : compatibleDestinationHosts) { - if (!suitableDestinationHosts.contains(destHost) || cluster.getId() != destHost.getClusterId()) { + for (Host destHost : compatibleHosts) { + Ternary metrics = getMetricsForMigration(cluster, algorithm, vm, vmCpu, + vmMemory, serviceOffering, destHost, hostCpuCapacityMap, hostMemoryCapacityMap, + requiresStorageMotion, preImbalance, baseMetricsArray, hostIdToIndexMap, excludes); + if (metrics == null) { continue; } - Ternary metrics = algorithm.getMetrics(cluster, vm, - vmIdServiceOfferingMap.get(vm.getId()), destHost, hostCpuCapacityMap, hostMemoryCapacityMap, - requiresStorageMotion.get(destHost)); - Double currentImprovement = metrics.first(); Double cost = metrics.second(); Double benefit = metrics.third(); @@ -477,6 +633,86 @@ public class ClusterDrsServiceImpl extends ManagerBase implements ClusterDrsServ return bestMigration; } + private boolean skipDrs(VirtualMachine vm, List compatibleHosts, ServiceOffering serviceOffering) { + if (vm.getType().isUsedBySystem() || vm.getState() != VirtualMachine.State.Running) { + return true; + } + if (MapUtils.isNotEmpty(vm.getDetails()) && + "true".equalsIgnoreCase(vm.getDetails().get(VmDetailConstants.SKIP_DRS))) { + return true; + } + if (CollectionUtils.isEmpty(compatibleHosts)) { + return true; + } + if (serviceOffering == null) { + return true; + } + return false; + } + + private Pair> getBaseMetricsArrayAndHostIdIndexMap( + Cluster cluster, Map> baseMetricsMap + ) { + double[] baseMetricsArray = new double[baseMetricsMap.size()]; + Map hostIdToIndexMap = new HashMap<>(); + + int index = 0; + for (Map.Entry> entry : baseMetricsMap.entrySet()) { + Long hostId = entry.getKey(); + Ternary metrics = entry.getValue(); + long used = metrics.first(); + long actualTotal = metrics.third() - metrics.second(); + long free = actualTotal - metrics.first(); + Double metricValue = getMetricValue(cluster.getId(), used, free, actualTotal, null); + if (metricValue != null) { + baseMetricsArray[index] = metricValue; + hostIdToIndexMap.put(hostId, index); + index++; + } + } + + // Trim array if some values were null + if (index < baseMetricsArray.length) { + double[] trimmed = new double[index]; + System.arraycopy(baseMetricsArray, 0, trimmed, 0, index); + baseMetricsArray = trimmed; + } + return new Pair<>(baseMetricsArray, hostIdToIndexMap); + } + + private Ternary getMetricsForMigration( + Cluster cluster, ClusterDrsAlgorithm algorithm, VirtualMachine vm, long vmCpu, long vmMemory, + ServiceOffering serviceOffering, Host destHost, Map> hostCpuCapacityMap, + Map> hostMemoryCapacityMap, Map requiresStorageMotion, + Double preImbalance, double[] baseMetricsArray, Map hostIdToIndexMap, ExcludeList excludes + ) throws ConfigurationException { + if (cluster.getId() != destHost.getClusterId()) { + return null; + } + + // Check affinity constraints + if (excludes != null && excludes.shouldAvoid(destHost)) { + return null; + } + + // Quick capacity pre-filter: skip hosts that don't have enough capacity + Ternary destHostCpu = hostCpuCapacityMap.get(destHost.getId()); + Ternary destHostMemory = hostMemoryCapacityMap.get(destHost.getId()); + if (destHostCpu == null || destHostMemory == null) { + return null; + } + + long destHostAvailableCpu = (destHostCpu.third() - destHostCpu.second()) - destHostCpu.first(); + long destHostAvailableMemory = (destHostMemory.third() - destHostMemory.second()) - destHostMemory.first(); + + if (destHostAvailableCpu < vmCpu || destHostAvailableMemory < vmMemory) { + return null; // Skip hosts without sufficient capacity + } + + return algorithm.getMetrics(cluster, vm, serviceOffering, destHost, hostCpuCapacityMap, hostMemoryCapacityMap, + requiresStorageMotion.getOrDefault(destHost, false), preImbalance, baseMetricsArray, hostIdToIndexMap); + } + /** * Saves a DRS plan for a given cluster and returns the saved plan along with the list of migrations to be executed. diff --git a/server/src/test/java/com/cloud/server/ManagementServerImplTest.java b/server/src/test/java/com/cloud/server/ManagementServerImplTest.java index 2f52df982b7..b569368f248 100644 --- a/server/src/test/java/com/cloud/server/ManagementServerImplTest.java +++ b/server/src/test/java/com/cloud/server/ManagementServerImplTest.java @@ -23,6 +23,21 @@ import static org.mockito.Mockito.lenient; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; +import org.springframework.test.util.ReflectionTestUtils; + import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Arrays; @@ -52,24 +67,11 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.config.impl.ConfigurationVO; import org.apache.cloudstack.framework.extensions.manager.ExtensionsManager; import org.apache.cloudstack.userdata.UserDataManager; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.InjectMocks; -import org.mockito.Mock; -import org.mockito.MockedStatic; -import org.mockito.Mockito; -import org.mockito.MockitoAnnotations; -import org.mockito.Spy; -import org.mockito.junit.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; -import org.springframework.test.util.ReflectionTestUtils; import com.cloud.cpu.CPU; import com.cloud.dc.Vlan.VlanType; import com.cloud.domain.dao.DomainDao; +import com.cloud.api.ApiDBUtils; import com.cloud.exception.InvalidParameterValueException; import com.cloud.host.DetailVO; import com.cloud.host.Host; @@ -104,6 +106,7 @@ import com.cloud.vm.VMInstanceDetailVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDetailsDao; +import com.cloud.agent.manager.allocator.HostAllocator; @RunWith(MockitoJUnitRunner.class) public class ManagementServerImplTest { @@ -130,10 +133,10 @@ public class ManagementServerImplTest { IpAddressManagerImpl ipAddressManagerImpl; @Mock - AccountManager _accountMgr; + AccountManager accountManager; @Mock - UserDataDao _userDataDao; + UserDataDao userDataDao; @Mock VMTemplateDao templateDao; @@ -142,7 +145,7 @@ public class ManagementServerImplTest { AnnotationDao annotationDao; @Mock - UserVmDao _userVmDao; + UserVmDao userVmDao; @Mock UserDataManager userDataManager; @@ -163,10 +166,10 @@ public class ManagementServerImplTest { DomainDao domainDao; @Mock - GuestOSCategoryDao _guestOSCategoryDao; + GuestOSCategoryDao guestOSCategoryDao; @Mock - GuestOSDao _guestOSDao; + GuestOSDao guestOSDao; @Mock ExtensionsManager extensionManager; @@ -175,16 +178,38 @@ public class ManagementServerImplTest { @InjectMocks ManagementServerImpl spy = new ManagementServerImpl(); + @Mock + HostAllocator hostAllocator; + private AutoCloseable closeable; + private MockedStatic apiDBUtilsMock; @Before public void setup() throws IllegalAccessException, NoSuchFieldException { closeable = MockitoAnnotations.openMocks(this); CallContext.register(Mockito.mock(User.class), Mockito.mock(Account.class)); + spy._accountMgr = accountManager; + spy.userDataDao = userDataDao; + spy.templateDao = templateDao; + spy._userVmDao = userVmDao; + spy.annotationDao = annotationDao; + spy._detailsDao = hostDetailsDao; + spy.userDataManager = userDataManager; + + spy.setHostAllocators(List.of(hostAllocator)); + + // Mock ApiDBUtils static method + apiDBUtilsMock = Mockito.mockStatic(ApiDBUtils.class); + // Return empty list to avoid architecture filtering in most tests + apiDBUtilsMock.when(() -> ApiDBUtils.listZoneClustersArchs(Mockito.anyLong())) + .thenReturn(new ArrayList<>()); } @After public void tearDown() throws Exception { + if (apiDBUtilsMock != null) { + apiDBUtilsMock.close(); + } CallContext.unregister(); closeable.close(); } @@ -345,7 +370,7 @@ public class ManagementServerImplTest { when(account.getAccountId()).thenReturn(1L); when(account.getDomainId()).thenReturn(2L); when(callContextMock.getCallingAccount()).thenReturn(account); - when(_accountMgr.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account); + when(accountManager.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account); String testUserData = "testUserdata"; RegisterUserDataCmd cmd = Mockito.mock(RegisterUserDataCmd.class); @@ -353,8 +378,8 @@ public class ManagementServerImplTest { when(cmd.getName()).thenReturn("testName"); when(cmd.getHttpMethod()).thenReturn(BaseCmd.HTTPMethod.GET); - when(_userDataDao.findByName(account.getAccountId(), account.getDomainId(), "testName")).thenReturn(null); - when(_userDataDao.findByUserData(account.getAccountId(), account.getDomainId(), testUserData)).thenReturn(null); + when(userDataDao.findByName(account.getAccountId(), account.getDomainId(), "testName")).thenReturn(null); + when(userDataDao.findByUserData(account.getAccountId(), account.getDomainId(), testUserData)).thenReturn(null); when(userDataManager.validateUserData(testUserData, BaseCmd.HTTPMethod.GET)).thenReturn(testUserData); UserData userData = spy.registerUserData(cmd); @@ -373,15 +398,15 @@ public class ManagementServerImplTest { when(account.getAccountId()).thenReturn(1L); when(account.getDomainId()).thenReturn(2L); when(callContextMock.getCallingAccount()).thenReturn(account); - when(_accountMgr.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account); + when(accountManager.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account); RegisterUserDataCmd cmd = Mockito.mock(RegisterUserDataCmd.class); when(cmd.getUserData()).thenReturn("testUserdata"); when(cmd.getName()).thenReturn("testName"); UserDataVO userData = Mockito.mock(UserDataVO.class); - when(_userDataDao.findByName(account.getAccountId(), account.getDomainId(), "testName")).thenReturn(null); - when(_userDataDao.findByUserData(account.getAccountId(), account.getDomainId(), "testUserdata")).thenReturn(userData); + when(userDataDao.findByName(account.getAccountId(), account.getDomainId(), "testName")).thenReturn(null); + when(userDataDao.findByUserData(account.getAccountId(), account.getDomainId(), "testUserdata")).thenReturn(userData); spy.registerUserData(cmd); } @@ -395,13 +420,13 @@ public class ManagementServerImplTest { when(account.getAccountId()).thenReturn(1L); when(account.getDomainId()).thenReturn(2L); Mockito.when(callContextMock.getCallingAccount()).thenReturn(account); - when(_accountMgr.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account); + when(accountManager.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account); RegisterUserDataCmd cmd = Mockito.mock(RegisterUserDataCmd.class); when(cmd.getName()).thenReturn("testName"); UserDataVO userData = Mockito.mock(UserDataVO.class); - when(_userDataDao.findByName(account.getAccountId(), account.getDomainId(), "testName")).thenReturn(userData); + when(userDataDao.findByName(account.getAccountId(), account.getDomainId(), "testName")).thenReturn(userData); spy.registerUserData(cmd); } @@ -413,7 +438,7 @@ public class ManagementServerImplTest { CallContext callContextMock = Mockito.mock(CallContext.class); when(CallContext.current()).thenReturn(callContextMock); when(callContextMock.getCallingAccount()).thenReturn(account); - when(_accountMgr.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account); + when(accountManager.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account); DeleteUserDataCmd cmd = Mockito.mock(DeleteUserDataCmd.class); when(cmd.getAccountName()).thenReturn("testAccountName"); @@ -423,10 +448,10 @@ public class ManagementServerImplTest { UserDataVO userData = Mockito.mock(UserDataVO.class); Mockito.when(userData.getId()).thenReturn(1L); - when(_userDataDao.findById(1L)).thenReturn(userData); + when(userDataDao.findById(1L)).thenReturn(userData); when(templateDao.findTemplatesLinkedToUserdata(1L)).thenReturn(new ArrayList()); - when(_userVmDao.findByUserDataId(1L)).thenReturn(new ArrayList()); - when(_userDataDao.remove(1L)).thenReturn(true); + when(userVmDao.findByUserDataId(1L)).thenReturn(new ArrayList()); + when(userDataDao.remove(1L)).thenReturn(true); boolean result = spy.deleteUserData(cmd); Assert.assertEquals(true, result); @@ -439,7 +464,7 @@ public class ManagementServerImplTest { CallContext callContextMock = Mockito.mock(CallContext.class); when(CallContext.current()).thenReturn(callContextMock); when(callContextMock.getCallingAccount()).thenReturn(account); - when(_accountMgr.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account); + when(accountManager.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account); DeleteUserDataCmd cmd = Mockito.mock(DeleteUserDataCmd.class); when(cmd.getAccountName()).thenReturn("testAccountName"); @@ -449,7 +474,7 @@ public class ManagementServerImplTest { UserDataVO userData = Mockito.mock(UserDataVO.class); Mockito.when(userData.getId()).thenReturn(1L); - when(_userDataDao.findById(1L)).thenReturn(userData); + when(userDataDao.findById(1L)).thenReturn(userData); VMTemplateVO vmTemplateVO = Mockito.mock(VMTemplateVO.class); List linkedTemplates = new ArrayList<>(); @@ -466,7 +491,7 @@ public class ManagementServerImplTest { CallContext callContextMock = Mockito.mock(CallContext.class); when(CallContext.current()).thenReturn(callContextMock); when(callContextMock.getCallingAccount()).thenReturn(account); - when(_accountMgr.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account); + when(accountManager.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account); DeleteUserDataCmd cmd = Mockito.mock(DeleteUserDataCmd.class); when(cmd.getAccountName()).thenReturn("testAccountName"); @@ -476,14 +501,14 @@ public class ManagementServerImplTest { UserDataVO userData = Mockito.mock(UserDataVO.class); Mockito.when(userData.getId()).thenReturn(1L); - when(_userDataDao.findById(1L)).thenReturn(userData); + when(userDataDao.findById(1L)).thenReturn(userData); when(templateDao.findTemplatesLinkedToUserdata(1L)).thenReturn(new ArrayList()); UserVmVO userVmVO = Mockito.mock(UserVmVO.class); List vms = new ArrayList<>(); vms.add(userVmVO); - when(_userVmDao.findByUserDataId(1L)).thenReturn(vms); + when(userVmDao.findByUserDataId(1L)).thenReturn(vms); spy.deleteUserData(cmd); } @@ -504,7 +529,7 @@ public class ManagementServerImplTest { UserDataVO userData = Mockito.mock(UserDataVO.class); SearchBuilder sb = Mockito.mock(SearchBuilder.class); - when(_userDataDao.createSearchBuilder()).thenReturn(sb); + when(userDataDao.createSearchBuilder()).thenReturn(sb); when(sb.entity()).thenReturn(userData); SearchCriteria sc = Mockito.mock(SearchCriteria.class); @@ -513,7 +538,7 @@ public class ManagementServerImplTest { List userDataList = new ArrayList(); userDataList.add(userData); Pair, Integer> result = new Pair(userDataList, 1); - when(_userDataDao.searchAndCount(nullable(SearchCriteria.class), nullable(Filter.class))).thenReturn(result); + when(userDataDao.searchAndCount(nullable(SearchCriteria.class), nullable(Filter.class))).thenReturn(result); Pair, Integer> userdataResultList = spy.listUserDatas(cmd, false); @@ -537,7 +562,7 @@ public class ManagementServerImplTest { UserDataVO userData = Mockito.mock(UserDataVO.class); SearchBuilder sb = Mockito.mock(SearchBuilder.class); - when(_userDataDao.createSearchBuilder()).thenReturn(sb); + when(userDataDao.createSearchBuilder()).thenReturn(sb); when(sb.entity()).thenReturn(userData); SearchCriteria sc = Mockito.mock(SearchCriteria.class); @@ -546,7 +571,7 @@ public class ManagementServerImplTest { List userDataList = new ArrayList(); userDataList.add(userData); Pair, Integer> result = new Pair(userDataList, 1); - when(_userDataDao.searchAndCount(nullable(SearchCriteria.class), nullable(Filter.class))).thenReturn(result); + when(userDataDao.searchAndCount(nullable(SearchCriteria.class), nullable(Filter.class))).thenReturn(result); Pair, Integer> userdataResultList = spy.listUserDatas(cmd, false); @@ -570,7 +595,7 @@ public class ManagementServerImplTest { UserDataVO userData = Mockito.mock(UserDataVO.class); SearchBuilder sb = Mockito.mock(SearchBuilder.class); - when(_userDataDao.createSearchBuilder()).thenReturn(sb); + when(userDataDao.createSearchBuilder()).thenReturn(sb); when(sb.entity()).thenReturn(userData); SearchCriteria sc = Mockito.mock(SearchCriteria.class); @@ -579,7 +604,7 @@ public class ManagementServerImplTest { List userDataList = new ArrayList(); userDataList.add(userData); Pair, Integer> result = new Pair(userDataList, 1); - when(_userDataDao.searchAndCount(nullable(SearchCriteria.class), nullable(Filter.class))).thenReturn(result); + when(userDataDao.searchAndCount(nullable(SearchCriteria.class), nullable(Filter.class))).thenReturn(result); Pair, Integer> userdataResultList = spy.listUserDatas(cmd, false); @@ -764,12 +789,12 @@ public class ManagementServerImplTest { boolean featured = true; Mockito.when(addCmd.getName()).thenReturn(name); Mockito.when(addCmd.isFeatured()).thenReturn(featured); - Mockito.doAnswer((Answer) invocation -> (GuestOSCategoryVO)invocation.getArguments()[0]).when(_guestOSCategoryDao).persist(Mockito.any(GuestOSCategoryVO.class)); + Mockito.doAnswer((Answer) invocation -> (GuestOSCategoryVO)invocation.getArguments()[0]).when(guestOSCategoryDao).persist(Mockito.any(GuestOSCategoryVO.class)); GuestOsCategory result = spy.addGuestOsCategory(addCmd); Assert.assertNotNull(result); Assert.assertEquals(name, result.getName()); Assert.assertEquals(featured, result.isFeatured()); - Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).persist(any(GuestOSCategoryVO.class)); + Mockito.verify(guestOSCategoryDao, Mockito.times(1)).persist(any(GuestOSCategoryVO.class)); } @Test @@ -784,14 +809,14 @@ public class ManagementServerImplTest { Mockito.when(updateCmd.getName()).thenReturn(name); Mockito.when(updateCmd.isFeatured()).thenReturn(featured); Mockito.when(updateCmd.getSortKey()).thenReturn(sortKey); - Mockito.when(_guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory); - Mockito.when(_guestOSCategoryDao.update(Mockito.eq(id), any(GuestOSCategoryVO.class))).thenReturn(true); + Mockito.when(guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory); + Mockito.when(guestOSCategoryDao.update(Mockito.eq(id), any(GuestOSCategoryVO.class))).thenReturn(true); GuestOsCategory result = spy.updateGuestOsCategory(updateCmd); Assert.assertNotNull(result); Assert.assertEquals(name, result.getName()); Assert.assertEquals(featured, result.isFeatured()); - Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).findById(id); - Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).update(Mockito.eq(id), any(GuestOSCategoryVO.class)); + Mockito.verify(guestOSCategoryDao, Mockito.times(1)).findById(id); + Mockito.verify(guestOSCategoryDao, Mockito.times(1)).update(Mockito.eq(id), any(GuestOSCategoryVO.class)); } @Test(expected = InvalidParameterValueException.class) @@ -799,7 +824,7 @@ public class ManagementServerImplTest { UpdateGuestOsCategoryCmd updateCmd = Mockito.mock(UpdateGuestOsCategoryCmd.class); long id = 1L; when(updateCmd.getId()).thenReturn(id); - when(_guestOSCategoryDao.findById(id)).thenReturn(null); + when(guestOSCategoryDao.findById(id)).thenReturn(null); spy.updateGuestOsCategory(updateCmd); } @@ -812,13 +837,13 @@ public class ManagementServerImplTest { when(updateCmd.getName()).thenReturn(null); when(updateCmd.isFeatured()).thenReturn(null); when(updateCmd.getSortKey()).thenReturn(null); - when(_guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory); + when(guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory); GuestOsCategory result = spy.updateGuestOsCategory(updateCmd); Assert.assertNotNull(result); Assert.assertNotNull(result.getName()); Assert.assertFalse(result.isFeatured()); - Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).findById(id); - Mockito.verify(_guestOSCategoryDao, Mockito.never()).update(Mockito.eq(id), any(GuestOSCategoryVO.class)); + Mockito.verify(guestOSCategoryDao, Mockito.times(1)).findById(id); + Mockito.verify(guestOSCategoryDao, Mockito.never()).update(Mockito.eq(id), any(GuestOSCategoryVO.class)); } @Test @@ -831,14 +856,14 @@ public class ManagementServerImplTest { Mockito.when(updateCmd.getName()).thenReturn(name); Mockito.when(updateCmd.isFeatured()).thenReturn(null); Mockito.when(updateCmd.getSortKey()).thenReturn(null); - Mockito.when(_guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory); - Mockito.when(_guestOSCategoryDao.update(Mockito.eq(id), any(GuestOSCategoryVO.class))).thenReturn(true); + Mockito.when(guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory); + Mockito.when(guestOSCategoryDao.update(Mockito.eq(id), any(GuestOSCategoryVO.class))).thenReturn(true); GuestOsCategory result = spy.updateGuestOsCategory(updateCmd); Assert.assertNotNull(result); Assert.assertEquals(name, result.getName()); Assert.assertFalse(result.isFeatured()); - Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).findById(id); - Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).update(Mockito.eq(id), any(GuestOSCategoryVO.class)); + Mockito.verify(guestOSCategoryDao, Mockito.times(1)).findById(id); + Mockito.verify(guestOSCategoryDao, Mockito.times(1)).update(Mockito.eq(id), any(GuestOSCategoryVO.class)); } @Test @@ -847,14 +872,14 @@ public class ManagementServerImplTest { GuestOSCategoryVO guestOSCategory = Mockito.mock(GuestOSCategoryVO.class); long id = 1L; Mockito.when(deleteCmd.getId()).thenReturn(id); - Mockito.when(_guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory); - Mockito.when(_guestOSDao.listIdsByCategoryId(id)).thenReturn(Arrays.asList()); - Mockito.when(_guestOSCategoryDao.remove(id)).thenReturn(true); + Mockito.when(guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory); + Mockito.when(guestOSDao.listIdsByCategoryId(id)).thenReturn(Arrays.asList()); + Mockito.when(guestOSCategoryDao.remove(id)).thenReturn(true); boolean result = spy.deleteGuestOsCategory(deleteCmd); Assert.assertTrue(result); - Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).findById(id); - Mockito.verify(_guestOSDao, Mockito.times(1)).listIdsByCategoryId(id); - Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).remove(id); + Mockito.verify(guestOSCategoryDao, Mockito.times(1)).findById(id); + Mockito.verify(guestOSDao, Mockito.times(1)).listIdsByCategoryId(id); + Mockito.verify(guestOSCategoryDao, Mockito.times(1)).remove(id); } @Test(expected = InvalidParameterValueException.class) @@ -862,7 +887,7 @@ public class ManagementServerImplTest { DeleteGuestOsCategoryCmd deleteCmd = Mockito.mock(DeleteGuestOsCategoryCmd.class); long id = 1L; Mockito.when(deleteCmd.getId()).thenReturn(id); - Mockito.when(_guestOSCategoryDao.findById(id)).thenReturn(null); + Mockito.when(guestOSCategoryDao.findById(id)).thenReturn(null); spy.deleteGuestOsCategory(deleteCmd); } @@ -872,8 +897,8 @@ public class ManagementServerImplTest { GuestOSCategoryVO guestOSCategory = Mockito.mock(GuestOSCategoryVO.class); long id = 1L; Mockito.when(deleteCmd.getId()).thenReturn(id); - Mockito.when(_guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory); - Mockito.when(_guestOSDao.listIdsByCategoryId(id)).thenReturn(Arrays.asList(1L)); + Mockito.when(guestOSCategoryDao.findById(id)).thenReturn(guestOSCategory); + Mockito.when(guestOSDao.listIdsByCategoryId(id)).thenReturn(Arrays.asList(1L)); spy.deleteGuestOsCategory(deleteCmd); } @@ -881,7 +906,7 @@ public class ManagementServerImplTest { GuestOSVO vo = mock(GuestOSVO.class); SearchBuilder sb = mock(SearchBuilder.class); when(sb.entity()).thenReturn(vo); - when(_guestOSDao.createSearchBuilder()).thenReturn(sb); + when(guestOSDao.createSearchBuilder()).thenReturn(sb); } @Test @@ -908,19 +933,19 @@ public class ManagementServerImplTest { SearchBuilder searchBuilder = Mockito.mock(SearchBuilder.class); Mockito.when(searchBuilder.entity()).thenReturn(guestOSCategory); SearchCriteria searchCriteria = Mockito.mock(SearchCriteria.class); - Mockito.when(_guestOSCategoryDao.createSearchBuilder()).thenReturn(searchBuilder); + Mockito.when(guestOSCategoryDao.createSearchBuilder()).thenReturn(searchBuilder); Mockito.when(searchBuilder.create()).thenReturn(searchCriteria); Mockito.when(templateDao.listTemplateIsoByArchVnfAndZone(zoneId, arch, isIso, isVnf)).thenReturn(Arrays.asList(1L, 2L)); Pair, Integer> mockResult = new Pair<>(Arrays.asList(guestOSCategory), 1); mockGuestOsJoin(); - Mockito.when(_guestOSCategoryDao.searchAndCount(Mockito.eq(searchCriteria), Mockito.any())).thenReturn(mockResult); + Mockito.when(guestOSCategoryDao.searchAndCount(Mockito.eq(searchCriteria), Mockito.any())).thenReturn(mockResult); Pair, Integer> result = spy.listGuestOSCategoriesByCriteria(listCmd); Assert.assertNotNull(result); Assert.assertEquals(1, result.second().intValue()); Assert.assertEquals(1, result.first().size()); - Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).createSearchBuilder(); + Mockito.verify(guestOSCategoryDao, Mockito.times(1)).createSearchBuilder(); Mockito.verify(templateDao, Mockito.times(1)).listTemplateIsoByArchVnfAndZone(zoneId, arch, isIso, isVnf); - Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).searchAndCount(Mockito.eq(searchCriteria), Mockito.any()); + Mockito.verify(guestOSCategoryDao, Mockito.times(1)).searchAndCount(Mockito.eq(searchCriteria), Mockito.any()); } @Test @@ -946,19 +971,19 @@ public class ManagementServerImplTest { SearchBuilder searchBuilder = Mockito.mock(SearchBuilder.class); Mockito.when(searchBuilder.entity()).thenReturn(guestOSCategory); SearchCriteria searchCriteria = Mockito.mock(SearchCriteria.class); - Mockito.when(_guestOSCategoryDao.createSearchBuilder()).thenReturn(searchBuilder); + Mockito.when(guestOSCategoryDao.createSearchBuilder()).thenReturn(searchBuilder); Mockito.when(searchBuilder.create()).thenReturn(searchCriteria); Mockito.when(templateDao.listTemplateIsoByArchVnfAndZone(zoneId, arch, isIso, isVnf)).thenReturn(Arrays.asList(1L, 2L)); Pair, Integer> mockResult = new Pair<>(Arrays.asList(), 0); - Mockito.when(_guestOSCategoryDao.searchAndCount(Mockito.eq(searchCriteria), Mockito.any())).thenReturn(mockResult); + Mockito.when(guestOSCategoryDao.searchAndCount(Mockito.eq(searchCriteria), Mockito.any())).thenReturn(mockResult); mockGuestOsJoin(); Pair, Integer> result = spy.listGuestOSCategoriesByCriteria(listCmd); Assert.assertNotNull(result); Assert.assertEquals(0, result.second().intValue()); Assert.assertEquals(0, result.first().size()); - Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).createSearchBuilder(); + Mockito.verify(guestOSCategoryDao, Mockito.times(1)).createSearchBuilder(); Mockito.verify(templateDao, Mockito.times(1)).listTemplateIsoByArchVnfAndZone(zoneId, arch, isIso, isVnf); - Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).searchAndCount(Mockito.eq(searchCriteria), Mockito.any()); + Mockito.verify(guestOSCategoryDao, Mockito.times(1)).searchAndCount(Mockito.eq(searchCriteria), Mockito.any()); } @Test @@ -984,19 +1009,19 @@ public class ManagementServerImplTest { SearchBuilder searchBuilder = Mockito.mock(SearchBuilder.class); Mockito.when(searchBuilder.entity()).thenReturn(guestOSCategory); SearchCriteria searchCriteria = Mockito.mock(SearchCriteria.class); - Mockito.when(_guestOSCategoryDao.createSearchBuilder()).thenReturn(searchBuilder); + Mockito.when(guestOSCategoryDao.createSearchBuilder()).thenReturn(searchBuilder); Mockito.when(searchBuilder.create()).thenReturn(searchCriteria); Mockito.when(templateDao.listTemplateIsoByArchVnfAndZone(zoneId, arch, isIso, isVnf)).thenReturn(Arrays.asList(1L, 2L)); Pair, Integer> mockResult = new Pair<>(Arrays.asList(), 0); - when(_guestOSCategoryDao.searchAndCount(Mockito.eq(searchCriteria), Mockito.any())).thenReturn(mockResult); + when(guestOSCategoryDao.searchAndCount(Mockito.eq(searchCriteria), Mockito.any())).thenReturn(mockResult); mockGuestOsJoin(); Pair, Integer> result = spy.listGuestOSCategoriesByCriteria(listCmd); Assert.assertNotNull(result); Assert.assertEquals(0, result.second().intValue()); Assert.assertEquals(0, result.first().size()); - Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).createSearchBuilder(); + Mockito.verify(guestOSCategoryDao, Mockito.times(1)).createSearchBuilder(); Mockito.verify(templateDao, Mockito.times(1)).listTemplateIsoByArchVnfAndZone(zoneId, arch, isIso, isVnf); - Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).searchAndCount(Mockito.eq(searchCriteria), Mockito.any()); + Mockito.verify(guestOSCategoryDao, Mockito.times(1)).searchAndCount(Mockito.eq(searchCriteria), Mockito.any()); } @Test @@ -1011,18 +1036,18 @@ public class ManagementServerImplTest { SearchBuilder searchBuilder = Mockito.mock(SearchBuilder.class); Mockito.when(searchBuilder.entity()).thenReturn(guestOSCategory); SearchCriteria searchCriteria = Mockito.mock(SearchCriteria.class); - Mockito.when(_guestOSCategoryDao.createSearchBuilder()).thenReturn(searchBuilder); + Mockito.when(guestOSCategoryDao.createSearchBuilder()).thenReturn(searchBuilder); Mockito.when(searchBuilder.create()).thenReturn(searchCriteria); Pair, Integer> mockResult = new Pair<>(Arrays.asList(guestOSCategory), 1); - Mockito.when(_guestOSCategoryDao.searchAndCount(Mockito.eq(searchCriteria), Mockito.any())).thenReturn(mockResult); + Mockito.when(guestOSCategoryDao.searchAndCount(Mockito.eq(searchCriteria), Mockito.any())).thenReturn(mockResult); mockGuestOsJoin(); Pair, Integer> result = spy.listGuestOSCategoriesByCriteria(listCmd); Assert.assertNotNull(result); Assert.assertEquals(1, result.second().intValue()); Assert.assertEquals(1, result.first().size()); - Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).createSearchBuilder(); + Mockito.verify(guestOSCategoryDao, Mockito.times(1)).createSearchBuilder(); Mockito.verify(searchCriteria, Mockito.times(1)).setParameters("id", id); - Mockito.verify(_guestOSCategoryDao, Mockito.times(1)).searchAndCount(Mockito.eq(searchCriteria), Mockito.any()); + Mockito.verify(guestOSCategoryDao, Mockito.times(1)).searchAndCount(Mockito.eq(searchCriteria), Mockito.any()); } @@ -1034,49 +1059,4 @@ public class ManagementServerImplTest { Assert.assertNotNull(spy.getExternalVmConsole(virtualMachine, host)); Mockito.verify(extensionManager).getInstanceConsole(virtualMachine, host); } - - @Test - public void getStatesForIpAddressSearchReturnsValidStates() { - ListPublicIpAddressesCmd cmd = Mockito.mock(ListPublicIpAddressesCmd.class); - Mockito.when(cmd.getState()).thenReturn("Allocated ,free"); - List result = spy.getStatesForIpAddressSearch(cmd); - Assert.assertEquals(2, result.size()); - Assert.assertTrue(result.contains(IpAddress.State.Allocated)); - Assert.assertTrue(result.contains(IpAddress.State.Free)); - } - - @Test - public void getStatesForIpAddressSearchReturnsEmptyListForNullState() { - ListPublicIpAddressesCmd cmd = Mockito.mock(ListPublicIpAddressesCmd.class); - Mockito.when(cmd.getState()).thenReturn(null); - List result = spy.getStatesForIpAddressSearch(cmd); - Assert.assertTrue(result.isEmpty()); - } - - @Test - public void getStatesForIpAddressSearchReturnsEmptyListForBlankState() { - ListPublicIpAddressesCmd cmd = Mockito.mock(ListPublicIpAddressesCmd.class); - Mockito.when(cmd.getState()).thenReturn(" "); - List result = spy.getStatesForIpAddressSearch(cmd); - Assert.assertTrue(result.isEmpty()); - } - - @Test(expected = InvalidParameterValueException.class) - public void getStatesForIpAddressSearchThrowsExceptionForInvalidState() { - ListPublicIpAddressesCmd cmd = Mockito.mock(ListPublicIpAddressesCmd.class); - Mockito.when(cmd.getState()).thenReturn("InvalidState"); - spy.getStatesForIpAddressSearch(cmd); - } - - @Test - public void getStatesForIpAddressSearchHandlesMixedValidAndInvalidStates() { - ListPublicIpAddressesCmd cmd = Mockito.mock(ListPublicIpAddressesCmd.class); - Mockito.when(cmd.getState()).thenReturn("Allocated,InvalidState"); - try { - spy.getStatesForIpAddressSearch(cmd); - Assert.fail("Expected InvalidParameterValueException to be thrown"); - } catch (InvalidParameterValueException e) { - Assert.assertEquals("Invalid state: InvalidState", e.getMessage()); - } - } } diff --git a/server/src/test/java/com/cloud/user/AccountManagerImplTest.java b/server/src/test/java/com/cloud/user/AccountManagerImplTest.java index 846d8cdc989..c33cb334e77 100644 --- a/server/src/test/java/com/cloud/user/AccountManagerImplTest.java +++ b/server/src/test/java/com/cloud/user/AccountManagerImplTest.java @@ -1368,6 +1368,22 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { accountManagerImpl.validateRoleChange(account, newRole, caller); } + @Test(expected = PermissionDeniedException.class) + public void testValidateRoleAdminCannotChangeDefaultAdmin() { + Account account = Mockito.mock(Account.class); + Mockito.when(account.isDefault()).thenReturn(true); + Mockito.when(account.getRoleId()).thenReturn(1L); + Role newRole = Mockito.mock(Role.class); + Mockito.when(newRole.getRoleType()).thenReturn(RoleType.User); + Role callerRole = Mockito.mock(Role.class); + Mockito.when(callerRole.getRoleType()).thenReturn(RoleType.Admin); + Account caller = Mockito.mock(Account.class); + Mockito.when(caller.getRoleId()).thenReturn(2L); + Mockito.when(roleService.findRole(1L)).thenReturn(Mockito.mock(Role.class)); + Mockito.when(roleService.findRole(2L)).thenReturn(callerRole); + accountManagerImpl.validateRoleChange(account, newRole, caller); + } + @Test public void checkIfAccountManagesProjectsTestNotThrowExceptionWhenTheAccountIsNotAProjectAdministrator() { long accountId = 1L; diff --git a/server/src/test/java/org/apache/cloudstack/cluster/ClusterDrsServiceImplTest.java b/server/src/test/java/org/apache/cloudstack/cluster/ClusterDrsServiceImplTest.java index 81aac9e4b54..98b18c66305 100644 --- a/server/src/test/java/org/apache/cloudstack/cluster/ClusterDrsServiceImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/cluster/ClusterDrsServiceImplTest.java @@ -42,7 +42,9 @@ import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VmDetailConstants; import com.cloud.vm.dao.VMInstanceDao; +import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; import org.apache.cloudstack.api.command.admin.cluster.GenerateClusterDrsPlanCmd; import org.apache.cloudstack.api.response.ClusterDrsPlanMigrationResponse; import org.apache.cloudstack.api.response.ClusterDrsPlanResponse; @@ -116,6 +118,9 @@ public class ClusterDrsServiceImplTest { @Mock private VMInstanceDao vmInstanceDao; + @Mock + private AffinityGroupVMMapDao affinityGroupVMMapDao; + @Spy @InjectMocks private ClusterDrsServiceImpl clusterDrsService = new ClusterDrsServiceImpl(); @@ -168,9 +173,14 @@ public class ClusterDrsServiceImplTest { VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class); Mockito.when(vm1.getId()).thenReturn(1L); Mockito.when(vm1.getHostId()).thenReturn(1L); + Mockito.when(vm1.getType()).thenReturn(VirtualMachine.Type.User); + Mockito.when(vm1.getState()).thenReturn(VirtualMachine.State.Running); VMInstanceVO vm2 = Mockito.mock(VMInstanceVO.class); Mockito.when(vm2.getHostId()).thenReturn(2L); + Mockito.when(vm2.getId()).thenReturn(2L); + Mockito.when(vm2.getType()).thenReturn(VirtualMachine.Type.User); + Mockito.when(vm2.getState()).thenReturn(VirtualMachine.State.Running); List hostList = new ArrayList<>(); hostList.add(host1); @@ -201,10 +211,11 @@ public class ClusterDrsServiceImplTest { Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList); Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn( true, false); - Mockito.when( - clusterDrsService.getBestMigration(Mockito.any(Cluster.class), Mockito.any(ClusterDrsAlgorithm.class), - Mockito.anyList(), Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap())).thenReturn( - new Pair<>(vm1, host2)); + + Mockito.doReturn(new Pair<>(vm1, host2)).when(clusterDrsService).getBestMigration( + Mockito.any(Cluster.class), Mockito.any(ClusterDrsAlgorithm.class), + Mockito.anyList(), Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap(), + Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap()); Mockito.when(serviceOfferingDao.findByIdIncludingRemoved(Mockito.anyLong(), Mockito.anyLong())).thenReturn( serviceOffering); Mockito.when(hostJoinDao.searchByIds(host1.getId(), host2.getId())).thenReturn(List.of(hostJoin1, hostJoin2)); @@ -219,6 +230,420 @@ public class ClusterDrsServiceImplTest { assertEquals(1, iterations.size()); } + @Test + public void testGetDrsPlanWithDisabledCluster() throws ConfigurationException { + ClusterVO cluster = Mockito.mock(ClusterVO.class); + Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Disabled); + + List> result = clusterDrsService.getDrsPlan(cluster, 5); + assertEquals(0, result.size()); + } + + @Test + public void testGetDrsPlanWithZeroMaxIterations() throws ConfigurationException { + ClusterVO cluster = Mockito.mock(ClusterVO.class); + Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); + + List> result = clusterDrsService.getDrsPlan(cluster, 0); + assertEquals(0, result.size()); + } + + @Test + public void testGetDrsPlanWithNegativeMaxIterations() throws ConfigurationException { + ClusterVO cluster = Mockito.mock(ClusterVO.class); + Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); + + List> result = clusterDrsService.getDrsPlan(cluster, -1); + assertEquals(0, result.size()); + } + + @Test + public void testGetDrsPlanWithSystemVMs() throws ConfigurationException { + ClusterVO cluster = Mockito.mock(ClusterVO.class); + Mockito.when(cluster.getId()).thenReturn(1L); + Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); + + HostVO host1 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(1L); + + VMInstanceVO systemVm = Mockito.mock(VMInstanceVO.class); + Mockito.when(systemVm.getId()).thenReturn(1L); + Mockito.when(systemVm.getHostId()).thenReturn(1L); + Mockito.when(systemVm.getType()).thenReturn(VirtualMachine.Type.SecondaryStorageVm); + + List hostList = new ArrayList<>(); + hostList.add(host1); + + List vmList = new ArrayList<>(); + vmList.add(systemVm); + + HostJoinVO hostJoin1 = Mockito.mock(HostJoinVO.class); + Mockito.when(hostJoin1.getId()).thenReturn(1L); + Mockito.when(hostJoin1.getCpuUsedCapacity()).thenReturn(1000L); + Mockito.when(hostJoin1.getCpuReservedCapacity()).thenReturn(0L); + Mockito.when(hostJoin1.getCpus()).thenReturn(4); + Mockito.when(hostJoin1.getSpeed()).thenReturn(1000L); + Mockito.when(hostJoin1.getMemUsedCapacity()).thenReturn(1024L); + Mockito.when(hostJoin1.getMemReservedCapacity()).thenReturn(0L); + Mockito.when(hostJoin1.getTotalMemory()).thenReturn(8192L); + + Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList); + Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList); + Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn(true); + Mockito.when(hostJoinDao.searchByIds(Mockito.any())).thenReturn(List.of(hostJoin1)); + + List> result = clusterDrsService.getDrsPlan(cluster, 5); + assertEquals(0, result.size()); + } + + @Test + public void testGetDrsPlanWithNonRunningVMs() throws ConfigurationException { + ClusterVO cluster = Mockito.mock(ClusterVO.class); + Mockito.when(cluster.getId()).thenReturn(1L); + Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); + + HostVO host1 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(1L); + + VMInstanceVO stoppedVm = Mockito.mock(VMInstanceVO.class); + Mockito.when(stoppedVm.getId()).thenReturn(1L); + Mockito.when(stoppedVm.getHostId()).thenReturn(1L); + Mockito.when(stoppedVm.getType()).thenReturn(VirtualMachine.Type.User); + Mockito.when(stoppedVm.getState()).thenReturn(VirtualMachine.State.Stopped); + + List hostList = new ArrayList<>(); + hostList.add(host1); + + List vmList = new ArrayList<>(); + vmList.add(stoppedVm); + + HostJoinVO hostJoin1 = Mockito.mock(HostJoinVO.class); + Mockito.when(hostJoin1.getId()).thenReturn(1L); + Mockito.when(hostJoin1.getCpuUsedCapacity()).thenReturn(1000L); + Mockito.when(hostJoin1.getCpuReservedCapacity()).thenReturn(0L); + Mockito.when(hostJoin1.getCpus()).thenReturn(4); + Mockito.when(hostJoin1.getSpeed()).thenReturn(1000L); + Mockito.when(hostJoin1.getMemUsedCapacity()).thenReturn(1024L); + Mockito.when(hostJoin1.getMemReservedCapacity()).thenReturn(0L); + Mockito.when(hostJoin1.getTotalMemory()).thenReturn(8192L); + + Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList); + Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList); + Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn(true); + Mockito.when(hostJoinDao.searchByIds(Mockito.any())).thenReturn(List.of(hostJoin1)); + + List> result = clusterDrsService.getDrsPlan(cluster, 5); + assertEquals(0, result.size()); + } + + @Test + public void testGetDrsPlanWithSkipDrsFlag() throws ConfigurationException { + ClusterVO cluster = Mockito.mock(ClusterVO.class); + Mockito.when(cluster.getId()).thenReturn(1L); + Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); + + HostVO host1 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(1L); + + VMInstanceVO skippedVm = Mockito.mock(VMInstanceVO.class); + Mockito.when(skippedVm.getId()).thenReturn(1L); + Mockito.when(skippedVm.getHostId()).thenReturn(1L); + Mockito.when(skippedVm.getType()).thenReturn(VirtualMachine.Type.User); + Mockito.when(skippedVm.getState()).thenReturn(VirtualMachine.State.Running); + Map details = new HashMap<>(); + details.put(VmDetailConstants.SKIP_DRS, "true"); + Mockito.when(skippedVm.getDetails()).thenReturn(details); + + List hostList = new ArrayList<>(); + hostList.add(host1); + + List vmList = new ArrayList<>(); + vmList.add(skippedVm); + + HostJoinVO hostJoin1 = Mockito.mock(HostJoinVO.class); + Mockito.when(hostJoin1.getId()).thenReturn(1L); + Mockito.when(hostJoin1.getCpuUsedCapacity()).thenReturn(1000L); + Mockito.when(hostJoin1.getCpuReservedCapacity()).thenReturn(0L); + Mockito.when(hostJoin1.getCpus()).thenReturn(4); + Mockito.when(hostJoin1.getSpeed()).thenReturn(1000L); + Mockito.when(hostJoin1.getMemUsedCapacity()).thenReturn(1024L); + Mockito.when(hostJoin1.getMemReservedCapacity()).thenReturn(0L); + Mockito.when(hostJoin1.getTotalMemory()).thenReturn(8192L); + + Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList); + Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList); + Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn(true); + Mockito.when(hostJoinDao.searchByIds(Mockito.any())).thenReturn(List.of(hostJoin1)); + + List> result = clusterDrsService.getDrsPlan(cluster, 5); + assertEquals(0, result.size()); + } + + @Test + public void testGetDrsPlanWithNoCompatibleHosts() throws ConfigurationException { + ClusterVO cluster = Mockito.mock(ClusterVO.class); + Mockito.when(cluster.getId()).thenReturn(1L); + Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); + + HostVO host1 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(1L); + + VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class); + Mockito.when(vm1.getId()).thenReturn(1L); + Mockito.when(vm1.getHostId()).thenReturn(1L); + Mockito.when(vm1.getType()).thenReturn(VirtualMachine.Type.User); + Mockito.when(vm1.getState()).thenReturn(VirtualMachine.State.Running); + Mockito.when(vm1.getDetails()).thenReturn(Collections.emptyMap()); + + List hostList = new ArrayList<>(); + hostList.add(host1); + + List vmList = new ArrayList<>(); + vmList.add(vm1); + + HostJoinVO hostJoin1 = Mockito.mock(HostJoinVO.class); + Mockito.when(hostJoin1.getId()).thenReturn(1L); + Mockito.when(hostJoin1.getCpuUsedCapacity()).thenReturn(1000L); + Mockito.when(hostJoin1.getCpuReservedCapacity()).thenReturn(0L); + Mockito.when(hostJoin1.getCpus()).thenReturn(4); + Mockito.when(hostJoin1.getSpeed()).thenReturn(1000L); + Mockito.when(hostJoin1.getMemUsedCapacity()).thenReturn(1024L); + Mockito.when(hostJoin1.getMemReservedCapacity()).thenReturn(0L); + Mockito.when(hostJoin1.getTotalMemory()).thenReturn(8192L); + + ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class); + + Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList); + Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList); + Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn(true); + Mockito.when(serviceOfferingDao.findByIdIncludingRemoved(Mockito.anyLong(), Mockito.anyLong())).thenReturn(serviceOffering); + Mockito.when(hostJoinDao.searchByIds(Mockito.any())).thenReturn(List.of(hostJoin1)); + + List> result = clusterDrsService.getDrsPlan(cluster, 5); + assertEquals(0, result.size()); + Mockito.verify(managementServer, Mockito.times(1)).listHostsForMigrationOfVM(Mockito.eq(vm1), Mockito.anyLong(), Mockito.anyLong(), Mockito.any(), Mockito.anyList()); + } + + @Test + public void testGetDrsPlanWithExceptionInCompatibilityCheck() throws ConfigurationException { + ClusterVO cluster = Mockito.mock(ClusterVO.class); + Mockito.when(cluster.getId()).thenReturn(1L); + Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); + + HostVO host1 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(1L); + + VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class); + Mockito.when(vm1.getId()).thenReturn(1L); + Mockito.when(vm1.getHostId()).thenReturn(1L); + Mockito.when(vm1.getType()).thenReturn(VirtualMachine.Type.User); + Mockito.when(vm1.getState()).thenReturn(VirtualMachine.State.Running); + Mockito.when(vm1.getDetails()).thenReturn(Collections.emptyMap()); + + List hostList = new ArrayList<>(); + hostList.add(host1); + + List vmList = new ArrayList<>(); + vmList.add(vm1); + + HostJoinVO hostJoin1 = Mockito.mock(HostJoinVO.class); + Mockito.when(hostJoin1.getId()).thenReturn(1L); + Mockito.when(hostJoin1.getCpuUsedCapacity()).thenReturn(1000L); + Mockito.when(hostJoin1.getCpuReservedCapacity()).thenReturn(0L); + Mockito.when(hostJoin1.getCpus()).thenReturn(4); + Mockito.when(hostJoin1.getSpeed()).thenReturn(1000L); + Mockito.when(hostJoin1.getMemUsedCapacity()).thenReturn(1024L); + Mockito.when(hostJoin1.getMemReservedCapacity()).thenReturn(0L); + Mockito.when(hostJoin1.getTotalMemory()).thenReturn(8192L); + + ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class); + + Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList); + Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList); + Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn(true); + Mockito.when(serviceOfferingDao.findByIdIncludingRemoved(Mockito.anyLong(), Mockito.anyLong())).thenReturn(serviceOffering); + Mockito.when(hostJoinDao.searchByIds(Mockito.any())).thenReturn(List.of(hostJoin1)); + + List> result = clusterDrsService.getDrsPlan(cluster, 5); + assertEquals(0, result.size()); + // Exception should be caught and logged, not propagated + Mockito.verify(managementServer, Mockito.times(1)).listHostsForMigrationOfVM(Mockito.eq(vm1), Mockito.anyLong(), Mockito.anyLong(), Mockito.any(), Mockito.anyList()); + } + + @Test + public void testGetDrsPlanWithNoBestMigration() throws ConfigurationException { + ClusterVO cluster = Mockito.mock(ClusterVO.class); + Mockito.when(cluster.getId()).thenReturn(1L); + Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); + + HostVO host1 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(1L); + + VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class); + Mockito.when(vm1.getId()).thenReturn(1L); + Mockito.when(vm1.getHostId()).thenReturn(1L); + Mockito.when(vm1.getType()).thenReturn(VirtualMachine.Type.User); + Mockito.when(vm1.getState()).thenReturn(VirtualMachine.State.Running); + Mockito.when(vm1.getDetails()).thenReturn(Collections.emptyMap()); + + List hostList = new ArrayList<>(); + hostList.add(host1); + + List vmList = new ArrayList<>(); + vmList.add(vm1); + + HostJoinVO hostJoin1 = Mockito.mock(HostJoinVO.class); + Mockito.when(hostJoin1.getId()).thenReturn(1L); + Mockito.when(hostJoin1.getCpuUsedCapacity()).thenReturn(1000L); + Mockito.when(hostJoin1.getCpuReservedCapacity()).thenReturn(0L); + Mockito.when(hostJoin1.getCpus()).thenReturn(4); + Mockito.when(hostJoin1.getSpeed()).thenReturn(1000L); + Mockito.when(hostJoin1.getMemUsedCapacity()).thenReturn(1024L); + Mockito.when(hostJoin1.getMemReservedCapacity()).thenReturn(0L); + Mockito.when(hostJoin1.getTotalMemory()).thenReturn(8192L); + + ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class); + + Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList); + Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList); + Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn(true); + Mockito.when(serviceOfferingDao.findByIdIncludingRemoved(Mockito.anyLong(), Mockito.anyLong())).thenReturn(serviceOffering); + Mockito.when(hostJoinDao.searchByIds(Mockito.any())).thenReturn(List.of(hostJoin1)); + + HostVO compatibleHost = Mockito.mock(HostVO.class); + + // Return null migration (no best migration found) + Mockito.doReturn(new Pair<>(null, null)).when(clusterDrsService).getBestMigration( + Mockito.any(Cluster.class), Mockito.any(ClusterDrsAlgorithm.class), + Mockito.anyList(), Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap(), + Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap()); + + List> result = clusterDrsService.getDrsPlan(cluster, 5); + assertEquals(0, result.size()); + } + + @Test + public void testGetDrsPlanWithMultipleIterations() throws ConfigurationException { + ClusterVO cluster = Mockito.mock(ClusterVO.class); + Mockito.when(cluster.getId()).thenReturn(1L); + Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); + + HostVO host1 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(1L); + + HostVO host2 = Mockito.mock(HostVO.class); + Mockito.when(host2.getId()).thenReturn(2L); + + VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class); + Mockito.when(vm1.getId()).thenReturn(1L); + Mockito.when(vm1.getHostId()).thenReturn(1L); + Mockito.when(vm1.getType()).thenReturn(VirtualMachine.Type.User); + Mockito.when(vm1.getState()).thenReturn(VirtualMachine.State.Running); + Mockito.when(vm1.getDetails()).thenReturn(Collections.emptyMap()); + + VMInstanceVO vm2 = Mockito.mock(VMInstanceVO.class); + Mockito.when(vm2.getId()).thenReturn(2L); + Mockito.when(vm2.getHostId()).thenReturn(1L); + Mockito.when(vm2.getType()).thenReturn(VirtualMachine.Type.User); + Mockito.when(vm2.getState()).thenReturn(VirtualMachine.State.Running); + Mockito.when(vm2.getDetails()).thenReturn(Collections.emptyMap()); + + List hostList = new ArrayList<>(); + hostList.add(host1); + hostList.add(host2); + + List vmList = new ArrayList<>(); + vmList.add(vm1); + vmList.add(vm2); + + HostJoinVO hostJoin1 = Mockito.mock(HostJoinVO.class); + Mockito.when(hostJoin1.getId()).thenReturn(1L); + Mockito.when(hostJoin1.getCpuUsedCapacity()).thenReturn(1000L); + Mockito.when(hostJoin1.getCpuReservedCapacity()).thenReturn(0L); + Mockito.when(hostJoin1.getCpus()).thenReturn(4); + Mockito.when(hostJoin1.getSpeed()).thenReturn(1000L); + Mockito.when(hostJoin1.getMemUsedCapacity()).thenReturn(1024L); + Mockito.when(hostJoin1.getMemReservedCapacity()).thenReturn(0L); + Mockito.when(hostJoin1.getTotalMemory()).thenReturn(8192L); + + HostJoinVO hostJoin2 = Mockito.mock(HostJoinVO.class); + Mockito.when(hostJoin2.getId()).thenReturn(2L); + Mockito.when(hostJoin2.getCpuUsedCapacity()).thenReturn(1000L); + Mockito.when(hostJoin2.getCpuReservedCapacity()).thenReturn(0L); + Mockito.when(hostJoin2.getCpus()).thenReturn(4); + Mockito.when(hostJoin2.getSpeed()).thenReturn(1000L); + Mockito.when(hostJoin2.getMemUsedCapacity()).thenReturn(1024L); + Mockito.when(hostJoin2.getMemReservedCapacity()).thenReturn(0L); + Mockito.when(hostJoin2.getTotalMemory()).thenReturn(8192L); + + ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class); + Mockito.when(serviceOffering.getCpu()).thenReturn(1); + Mockito.when(serviceOffering.getRamSize()).thenReturn(1024); + Mockito.when(serviceOffering.getSpeed()).thenReturn(1000); + + Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList); + Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList); + Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn( + true, true, false); + Mockito.when(serviceOfferingDao.findByIdIncludingRemoved(Mockito.anyLong(), Mockito.anyLong())).thenReturn(serviceOffering); + Mockito.when(hostJoinDao.searchByIds(1L, 2L)).thenReturn(List.of(hostJoin1, hostJoin2)); + + // Return migrations for first two iterations, then null + Mockito.doReturn(new Pair<>(vm1, host2), new Pair<>(vm2, host2), new Pair<>(null, null)) + .when(clusterDrsService).getBestMigration( + Mockito.any(Cluster.class), Mockito.any(ClusterDrsAlgorithm.class), + Mockito.anyList(), Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap(), + Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap()); + + List> result = clusterDrsService.getDrsPlan(cluster, 5); + assertEquals(2, result.size()); + Mockito.verify(balancedAlgorithm, Mockito.times(3)).needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList()); + } + + @Test + public void testGetDrsPlanWithMigrationToOriginalHost() throws ConfigurationException { + ClusterVO cluster = Mockito.mock(ClusterVO.class); + Mockito.when(cluster.getId()).thenReturn(1L); + Mockito.when(cluster.getAllocationState()).thenReturn(Grouping.AllocationState.Enabled); + + HostVO host1 = Mockito.mock(HostVO.class); + Mockito.when(host1.getId()).thenReturn(1L); + + HostVO host2 = Mockito.mock(HostVO.class); + Mockito.when(host2.getId()).thenReturn(2L); + + VMInstanceVO vm1 = Mockito.mock(VMInstanceVO.class); + Mockito.when(vm1.getId()).thenReturn(1L); + Mockito.when(vm1.getHostId()).thenReturn(1L); + Mockito.when(vm1.getType()).thenReturn(VirtualMachine.Type.User); + Mockito.when(vm1.getState()).thenReturn(VirtualMachine.State.Running); + Mockito.when(vm1.getDetails()).thenReturn(Collections.emptyMap()); + + List hostList = new ArrayList<>(); + hostList.add(host1); + hostList.add(host2); + + List vmList = new ArrayList<>(); + vmList.add(vm1); + + ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class); + + Mockito.when(hostDao.findByClusterId(1L)).thenReturn(hostList); + Mockito.when(vmInstanceDao.listByClusterId(1L)).thenReturn(vmList); + Mockito.when(balancedAlgorithm.needsDrs(Mockito.any(), Mockito.anyList(), Mockito.anyList())).thenReturn(true); + Mockito.when(serviceOfferingDao.findByIdIncludingRemoved(Mockito.anyLong(), Mockito.anyLong())).thenReturn(serviceOffering); + + // Return migration to original host (host1) - should break the loop + Mockito.doReturn(new Pair<>(vm1, host1)).when(clusterDrsService).getBestMigration( + Mockito.any(Cluster.class), Mockito.any(ClusterDrsAlgorithm.class), + Mockito.anyList(), Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap(), + Mockito.anyMap(), Mockito.anyMap(), Mockito.anyMap()); + + List> result = clusterDrsService.getDrsPlan(cluster, 5); + assertEquals(0, result.size()); + // Should break early when VM would migrate to original host + } + @Test(expected = InvalidParameterValueException.class) public void testGenerateDrsPlanClusterNotFound() { Mockito.when(clusterDao.findById(1L)).thenReturn(null); @@ -387,18 +812,41 @@ public class ClusterDrsServiceImplTest { vmIdServiceOfferingMap.put(vm.getId(), serviceOffering); } - Mockito.when(managementServer.listHostsForMigrationOfVM(vm1, 0L, 500L, null, vmList)).thenReturn( - new Ternary<>(new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, false))); - Mockito.when(managementServer.listHostsForMigrationOfVM(vm2, 0L, 500L, null, vmList)).thenReturn( - new Ternary<>(new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, false))); - Mockito.when(balancedAlgorithm.getMetrics(cluster, vm1, serviceOffering, destHost, new HashMap<>(), - new HashMap<>(), false)).thenReturn(new Ternary<>(1.0, 0.5, 1.5)); + // Create caches for the new method signature + Map> vmToCompatibleHostsCache = new HashMap<>(); + vmToCompatibleHostsCache.put(vm1.getId(), List.of(destHost)); + vmToCompatibleHostsCache.put(vm2.getId(), List.of(destHost)); - Mockito.when(balancedAlgorithm.getMetrics(cluster, vm2, serviceOffering, destHost, new HashMap<>(), - new HashMap<>(), false)).thenReturn(new Ternary<>(1.0, 2.5, 1.5)); + Map> vmToStorageMotionCache = new HashMap<>(); + vmToStorageMotionCache.put(vm1.getId(), Map.of(destHost, false)); + vmToStorageMotionCache.put(vm2.getId(), Map.of(destHost, false)); - Pair bestMigration = clusterDrsService.getBestMigration(cluster, balancedAlgorithm, - vmList, vmIdServiceOfferingMap, new HashMap<>(), new HashMap<>()); + Map vmToExcludesMap = new HashMap<>(); + vmToExcludesMap.put(vm1.getId(), Mockito.mock(com.cloud.deploy.DeploymentPlanner.ExcludeList.class)); + vmToExcludesMap.put(vm2.getId(), Mockito.mock(com.cloud.deploy.DeploymentPlanner.ExcludeList.class)); + + // Create capacity maps with dummy data for getClusterImbalance (include both source and dest hosts) + Map> hostCpuCapacityMap = new HashMap<>(); + hostCpuCapacityMap.put(host.getId(), new Ternary<>(2000L, 0L, 3000L)); // Source host + hostCpuCapacityMap.put(destHost.getId(), new Ternary<>(1000L, 0L, 2000L)); // Dest host + Map> hostMemoryCapacityMap = new HashMap<>(); + hostMemoryCapacityMap.put(host.getId(), new Ternary<>(2L * 1024L * 1024L * 1024L, 0L, 3L * 1024L * 1024L * 1024L)); // Source host + hostMemoryCapacityMap.put(destHost.getId(), new Ternary<>(1024L * 1024L * 1024L, 0L, 2L * 1024L * 1024L * 1024L)); // Dest host + + // Mock getMetrics for the optimized 10-parameter version used by getBestMigration + // Return better improvement for vm1, worse for vm2 + Mockito.doReturn(new Ternary<>(1.0, 0.5, 1.5)).when(balancedAlgorithm).getMetrics( + Mockito.eq(cluster), Mockito.eq(vm1), Mockito.any(ServiceOffering.class), + Mockito.eq(destHost), Mockito.eq(hostCpuCapacityMap), Mockito.eq(hostMemoryCapacityMap), Mockito.any(Boolean.class), + Mockito.any(Double.class), Mockito.any(double[].class), Mockito.any(Map.class)); + Mockito.doReturn(new Ternary<>(0.5, 2.5, 1.5)).when(balancedAlgorithm).getMetrics( + Mockito.eq(cluster), Mockito.eq(vm2), Mockito.any(ServiceOffering.class), + Mockito.eq(destHost), Mockito.eq(hostCpuCapacityMap), Mockito.eq(hostMemoryCapacityMap), Mockito.any(Boolean.class), + Mockito.any(Double.class), Mockito.any(double[].class), Mockito.any(Map.class)); + + Pair bestMigration = clusterDrsService.getBestMigration(cluster, balancedAlgorithm, + vmList, vmIdServiceOfferingMap, hostCpuCapacityMap, hostMemoryCapacityMap, + vmToCompatibleHostsCache, vmToStorageMotionCache, vmToExcludesMap); assertEquals(destHost, bestMigration.second()); assertEquals(vm1, bestMigration.first()); @@ -443,12 +891,28 @@ public class ClusterDrsServiceImplTest { vmIdServiceOfferingMap.put(vm.getId(), serviceOffering); } - Mockito.when(managementServer.listHostsForMigrationOfVM(vm1, 0L, 500L, null, vmList)).thenReturn( - new Ternary<>(new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, false))); - Mockito.when(managementServer.listHostsForMigrationOfVM(vm2, 0L, 500L, null, vmList)).thenReturn( - new Ternary<>(new Pair<>(List.of(destHost), 1), List.of(destHost), Map.of(destHost, false))); + // Create caches for the new method signature + Map> vmToCompatibleHostsCache = new HashMap<>(); + vmToCompatibleHostsCache.put(vm1.getId(), List.of(destHost)); + vmToCompatibleHostsCache.put(vm2.getId(), List.of(destHost)); + + Map> vmToStorageMotionCache = new HashMap<>(); + vmToStorageMotionCache.put(vm1.getId(), Map.of(destHost, false)); + vmToStorageMotionCache.put(vm2.getId(), Map.of(destHost, false)); + + Map vmToExcludesMap = new HashMap<>(); + vmToExcludesMap.put(vm1.getId(), Mockito.mock(com.cloud.deploy.DeploymentPlanner.ExcludeList.class)); + vmToExcludesMap.put(vm2.getId(), Mockito.mock(com.cloud.deploy.DeploymentPlanner.ExcludeList.class)); + + // Create capacity maps with dummy data for getClusterImbalance + Map> hostCpuCapacityMap = new HashMap<>(); + hostCpuCapacityMap.put(destHost.getId(), new Ternary<>(1000L, 0L, 2000L)); + Map> hostMemoryCapacityMap = new HashMap<>(); + hostMemoryCapacityMap.put(destHost.getId(), new Ternary<>(1024L * 1024L * 1024L, 0L, 2L * 1024L * 1024L * 1024L)); + Pair bestMigration = clusterDrsService.getBestMigration(cluster, balancedAlgorithm, - vmList, vmIdServiceOfferingMap, new HashMap<>(), new HashMap<>()); + vmList, vmIdServiceOfferingMap, hostCpuCapacityMap, hostMemoryCapacityMap, + vmToCompatibleHostsCache, vmToStorageMotionCache, vmToExcludesMap); assertNull(bestMigration.second()); assertNull(bestMigration.first()); diff --git a/test/integration/component/maint/test_primary_storage_nfsmountopts_kvm.py b/test/integration/component/maint/test_primary_storage_nfsmountopts_kvm.py index f2ad18188ca..21aee0cc717 100644 --- a/test/integration/component/maint/test_primary_storage_nfsmountopts_kvm.py +++ b/test/integration/component/maint/test_primary_storage_nfsmountopts_kvm.py @@ -110,7 +110,7 @@ class TestNFSMountOptsKVM(cloudstackTestCase): def getNFSMountOptionForPool(self, option, poolId): nfsstat_cmd = "nfsstat -m | sed -n '/%s/{ n; p }'" % poolId nfsstat = self.sshClient.execute(nfsstat_cmd) - if (nfsstat == None): + if nfsstat == None or len(nfsstat) == 0: return None stat = nfsstat[0] vers = stat[stat.find(option):].split("=")[1].split(",")[0] diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 028406bbc68..9dc2c60fd18 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -532,6 +532,7 @@ "label.checksum": "Checksum", "label.choose.resource.icon": "Choose icon", "label.choose.saml.identity": "Choose SAML identity provider", +"label.choose.isolation.method.public.ip.range": "Choose the proper isolation method for the public IP range in accordance with the zone. Valid options currently 'vlan' or 'vxlan', defaults to 'vlan'.", "label.cidr": "CIDR", "label.cidrsize": "CIDR size", "label.cidr.destination.network": "Destination Network CIDR", @@ -2048,6 +2049,7 @@ "label.release.dedicated.pod": "Release dedicated Pod", "label.release.dedicated.zone": "Release dedicated Zone", "label.releasing.ip": "Releasing IP", +"label.remote.access.vpn.specify.iprange": "Specify IP Range of remote VPN", "label.remote.instances": "Remote Instances", "label.remove": "Remove", "label.remove.annotation": "Remove comment", @@ -3349,6 +3351,7 @@ "message.enable.vpn.processing": "Enabling VPN...", "message.enabled.vpn": "Your remote access VPN is currently enabled and can be accessed via the IP", "message.enabled.vpn.ip.sec": "Your IPSec pre-shared key is", +"message.enabled.vpn.ip.range": "Your VPN IP Range is", "message.enabling.security.group.provider": "Enabling security group provider", "message.enter.valid.nic.ip": "Please enter a valid IP address for NIC", "message.error.access.key": "Please enter access key.", @@ -3693,6 +3696,7 @@ "message.releasing.dedicated.host": "Releasing dedicated host...", "message.releasing.dedicated.pod": "Releasing dedicated Pod...", "message.releasing.dedicated.zone": "Releasing dedicated Zone...", +"message.remote.access.vpn.iprange.description": "The range of IP addresses to allocate to VPN clients. The first IP in the range will be taken by the VPN server. (Optional)", "message.remove.annotation": "Are you sure you want to delete the comment?", "message.remove.egress.rule.failed": "Removing egress rule failed", "message.remove.egress.rule.processing": "Deleting egress rule...", diff --git a/ui/src/views/iam/EditAccount.vue b/ui/src/views/iam/EditAccount.vue index 011e1a93ec5..eecaf9a88ef 100644 --- a/ui/src/views/iam/EditAccount.vue +++ b/ui/src/views/iam/EditAccount.vue @@ -40,7 +40,7 @@ v-model:value="form.networkdomain" :placeholder="apiParams.networkdomain.description" /> - + @@ -145,11 +145,13 @@ export default { const params = { newname: values.newname, networkdomain: values.networkdomain, - roleid: values.roleid, apikeyaccess: values.apikeyaccess, account: this.account, domainid: this.domainId } + if (values.roleid) { + params.roleid = values.roleid + } if (this.isValidValueForKey(values, 'networkdomain') && values.networkdomain.length > 0) { params.networkdomain = values.networkdomain } diff --git a/ui/src/views/infra/network/IpRangesTabPublic.vue b/ui/src/views/infra/network/IpRangesTabPublic.vue index 61f15ddedc3..81f5656799e 100644 --- a/ui/src/views/infra/network/IpRangesTabPublic.vue +++ b/ui/src/views/infra/network/IpRangesTabPublic.vue @@ -220,6 +220,20 @@ {{ pod.name }} + + + + {{ }} + VLAN + VXLAN + + @@ -472,7 +486,8 @@ export default { initAddIpRangeForm () { this.formRef = ref() this.form = reactive({ - iptype: '' + iptype: '', + isolationmethod: '' }) this.rules = reactive({ podid: [{ required: true, message: this.$t('label.required') }], @@ -644,6 +659,15 @@ export default { if (!this.basicGuestNetwork) { params.zoneId = this.resource.zoneid params.vlan = values.vlan + const vlanInput = (values.vlan || '').toString().trim() + if (vlanInput) { + const vlanInputLower = vlanInput.toLowerCase() + const startsWithPrefix = vlanInputLower.startsWith('vlan') || vlanInputLower.startsWith('vxlan') + const isNumeric = /^[0-9]+$/.test(vlanInput) + if (!startsWithPrefix && isNumeric && values.isolationmethod) { + params.vlan = `${values.isolationmethod}://${vlanInput}` + } + } params.forsystemvms = values.forsystemvms params.account = values.forsystemvms ? null : values.account params.domainid = values.forsystemvms ? null : values.domain diff --git a/ui/src/views/network/NicsTab.vue b/ui/src/views/network/NicsTab.vue index 0c62da47536..9c16865e0c6 100644 --- a/ui/src/views/network/NicsTab.vue +++ b/ui/src/views/network/NicsTab.vue @@ -80,10 +80,16 @@ :footer="null" @cancel="closeModals"> {{ $t('message.network.addvm.desc') }} - -

+
{{ $t('label.cancel') }} @@ -229,6 +249,7 @@