diff --git a/api/src/com/cloud/deploy/DeploymentPlanner.java b/api/src/com/cloud/deploy/DeploymentPlanner.java index 97d1c3f3054..a891a129451 100644 --- a/api/src/com/cloud/deploy/DeploymentPlanner.java +++ b/api/src/com/cloud/deploy/DeploymentPlanner.java @@ -73,6 +73,13 @@ public interface DeploymentPlanner extends Adapter { */ boolean canHandle(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid); + public enum AllocationAlgorithm { + random, + firstfit, + userdispersing, + userconcentratedpod; + } + public static class ExcludeList { private Set _dcIds; private Set _podIds; diff --git a/client/tomcatconf/components.xml.in b/client/tomcatconf/components.xml.in index e01a5a6b60c..2c3c7bad7d3 100755 --- a/client/tomcatconf/components.xml.in +++ b/client/tomcatconf/components.xml.in @@ -93,6 +93,8 @@ + + diff --git a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java index ac68123a67a..c30ef58f421 100755 --- a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java +++ b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java @@ -19,6 +19,7 @@ package com.cloud.agent.manager.allocator.impl; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -31,6 +32,7 @@ import com.cloud.agent.manager.allocator.HostAllocator; import com.cloud.capacity.CapacityManager; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.deploy.DeploymentPlan; +import com.cloud.deploy.DeploymentPlanner; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.host.DetailVO; import com.cloud.host.Host; @@ -48,6 +50,7 @@ import com.cloud.storage.GuestOSVO; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.GuestOSCategoryDao; import com.cloud.storage.dao.GuestOSDao; +import com.cloud.user.Account; import com.cloud.uservm.UserVm; import com.cloud.utils.NumbersUtil; import com.cloud.utils.component.ComponentLocator; @@ -99,6 +102,7 @@ public class FirstFitAllocator implements HostAllocator { Long clusterId = plan.getClusterId(); ServiceOffering offering = vmProfile.getServiceOffering(); VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate(); + Account account = vmProfile.getOwner(); if (type == Host.Type.Storage) { // FirstFitAllocator should be used for user VMs only since it won't care whether the host is capable of routing or not @@ -158,13 +162,15 @@ public class FirstFitAllocator implements HostAllocator { } - return allocateTo(offering, template, avoid, clusterHosts, returnUpTo, considerReservedCapacity); + return allocateTo(plan, offering, template, avoid, clusterHosts, returnUpTo, considerReservedCapacity, account); } - protected List allocateTo(ServiceOffering offering, VMTemplateVO template, ExcludeList avoid, List hosts, int returnUpTo, boolean considerReservedCapacity) { + protected List allocateTo(DeploymentPlan plan, ServiceOffering offering, VMTemplateVO template, ExcludeList avoid, List hosts, int returnUpTo, boolean considerReservedCapacity, Account account) { if (_allocationAlgorithm.equals("random")) { // Shuffle this so that we don't check the hosts in the same order. Collections.shuffle(hosts); + }else if(_allocationAlgorithm.equals("userdispersing")){ + hosts = reorderHostsByNumberOfVms(plan, hosts, account); } if (s_logger.isDebugEnabled()) { @@ -230,6 +236,36 @@ public class FirstFitAllocator implements HostAllocator { return suitableHosts; } + private List reorderHostsByNumberOfVms(DeploymentPlan plan, List hosts, Account account) { + if(account == null){ + return hosts; + } + long dcId = plan.getDataCenterId(); + Long podId = plan.getPodId(); + Long clusterId = plan.getClusterId(); + + List hostIdsByVmCount = _vmInstanceDao.listHostIdsByVmCount(dcId, podId, clusterId, account.getAccountId()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("List of hosts in ascending order of number of VMs: "+ hostIdsByVmCount); + } + + //now filter the given list of Hosts by this ordered list + Map hostMap = new HashMap(); + for (HostVO host : hosts) { + hostMap.put(host.getId(), host); + } + List matchingHostIds = new ArrayList(hostMap.keySet()); + + hostIdsByVmCount.retainAll(matchingHostIds); + + List reorderedHosts = new ArrayList(); + for(Long id: hostIdsByVmCount){ + reorderedHosts.add(hostMap.get(id)); + } + + return reorderedHosts; + } + @Override public boolean isVirtualMachineUpgradable(UserVm vm, ServiceOffering offering) { // currently we do no special checks to rule out a VM being upgradable to an offering, so @@ -361,7 +397,9 @@ public class FirstFitAllocator implements HostAllocator { _factor = NumbersUtil.parseFloat(opFactor, 1); String allocationAlgorithm = configs.get("vm.allocation.algorithm"); - if (allocationAlgorithm != null && (allocationAlgorithm.equals("random") || allocationAlgorithm.equals("firstfit"))) { + if (allocationAlgorithm != null && (allocationAlgorithm.equals(DeploymentPlanner.AllocationAlgorithm.random.toString()) + || allocationAlgorithm.equals(DeploymentPlanner.AllocationAlgorithm.firstfit.toString()) + || allocationAlgorithm.equals(DeploymentPlanner.AllocationAlgorithm.userdispersing.toString()))) { _allocationAlgorithm = allocationAlgorithm; } } diff --git a/server/src/com/cloud/capacity/dao/CapacityDao.java b/server/src/com/cloud/capacity/dao/CapacityDao.java index a0ea5cc8af2..79325b6f48b 100755 --- a/server/src/com/cloud/capacity/dao/CapacityDao.java +++ b/server/src/com/cloud/capacity/dao/CapacityDao.java @@ -19,9 +19,11 @@ package com.cloud.capacity.dao; import java.util.List; +import java.util.Map; import com.cloud.capacity.CapacityVO; import com.cloud.capacity.dao.CapacityDaoImpl.SummedCapacity; +import com.cloud.utils.Pair; import com.cloud.utils.db.GenericDao; public interface CapacityDao extends GenericDao { @@ -31,6 +33,9 @@ public interface CapacityDao extends GenericDao { boolean removeBy(Short capacityType, Long zoneId, Long podId, Long clusterId, Long hostId); List findByClusterPodZone(Long zoneId, Long podId, Long clusterId); List findNonSharedStorageForClusterPodZone(Long zoneId,Long podId, Long clusterId); - List orderClustersByAggregateCapacity(long id, short capacityType, boolean isZone, float cpuOverprovisioningFactor); - List findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId); + Pair, Map> orderClustersByAggregateCapacity(long id, short capacityType, boolean isZone, float cpuOverprovisioningFactor); + List findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId); + + List listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType, float cpuOverprovisioningFactor); + Pair, Map> orderPodsByAggregateCapacity(long zoneId, short capacityType, float cpuOverprovisioningFactor); } diff --git a/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java b/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java index 34d69822a76..d6dbefd1197 100755 --- a/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java +++ b/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java @@ -22,7 +22,9 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import javax.ejb.Local; @@ -33,6 +35,7 @@ import com.cloud.capacity.CapacityVO; import com.cloud.storage.Storage; import com.cloud.storage.StoragePoolVO; import com.cloud.storage.dao.StoragePoolDaoImpl; +import com.cloud.utils.Pair; import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; @@ -68,9 +71,17 @@ public class CapacityDaoImpl extends GenericDaoBase implements "AND (a.total_capacity * ? - a.used_capacity) >= ? and a.capacity_type = 1) " + "JOIN op_host_capacity b ON a.host_id = b.host_id AND b.total_capacity - b.used_capacity >= ? AND b.capacity_type = 0"; - private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1 = "SELECT cluster_id FROM `cloud`.`op_host_capacity` WHERE " ; + private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1 = "SELECT cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) FROM `cloud`.`op_host_capacity` WHERE " ; private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2 = " AND capacity_type = ? GROUP BY cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) ASC"; + private static final String LIST_PODSINZONE_BY_HOST_CAPACITIES = "SELECT DISTINCT capacity.pod_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod " + + " ON (pod.id = capacity.pod_id AND pod.removed is NULL) WHERE " + + " capacity.data_center_id = ? AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ? " + + " AND pod_id IN (SELECT distinct pod_id FROM `cloud`.`op_host_capacity` WHERE " + + " capacity.data_center_id = ? AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ?) "; + + private static final String ORDER_PODS_BY_AGGREGATE_CAPACITY = "SELECT pod_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) FROM `cloud`.`op_host_capacity` WHERE data_center_id = ? " + + " AND capacity_type = ? GROUP BY pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) ASC"; public CapacityDaoImpl() { _hostIdTypeSearch = createSearchBuilder(); @@ -380,10 +391,11 @@ public class CapacityDaoImpl extends GenericDaoBase implements } @Override - public List orderClustersByAggregateCapacity(long id, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor){ + public Pair, Map> orderClustersByAggregateCapacity(long id, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor){ Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); + Map clusterCapacityMap = new HashMap(); StringBuilder sql = new StringBuilder(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1); @@ -395,15 +407,48 @@ public class CapacityDaoImpl extends GenericDaoBase implements sql.append(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2); try { pstmt = txn.prepareAutoCloseStatement(sql.toString()); - pstmt.setLong(1, id); - pstmt.setShort(2, capacityTypeForOrdering); - if(capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_CPU){ - pstmt.setFloat(3, cpuOverprovisioningFactor); + pstmt.setFloat(1, cpuOverprovisioningFactor); + pstmt.setFloat(4, cpuOverprovisioningFactor); }else{ - pstmt.setFloat(3, 1); + pstmt.setFloat(1, 1); + pstmt.setFloat(4, 1); } - + pstmt.setLong(2, id); + pstmt.setShort(3, capacityTypeForOrdering); + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + Long clusterId = rs.getLong(1); + result.add(clusterId); + clusterCapacityMap.put(clusterId, rs.getDouble(2)); + } + return new Pair, Map>(result, clusterCapacityMap); + } catch (SQLException e) { + throw new CloudRuntimeException("DB Exception on: " + sql, e); + } catch (Throwable e) { + throw new CloudRuntimeException("Caught: " + sql, e); + } + } + + @Override + public List listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType, float cpuOverprovisioningFactor) { + Transaction txn = Transaction.currentTxn(); + PreparedStatement pstmt = null; + List result = new ArrayList(); + + StringBuilder sql = new StringBuilder(LIST_PODSINZONE_BY_HOST_CAPACITIES); + + try { + pstmt = txn.prepareAutoCloseStatement(sql.toString()); + pstmt.setLong(1, zoneId); + pstmt.setShort(2, CapacityVO.CAPACITY_TYPE_CPU); + pstmt.setFloat(3, cpuOverprovisioningFactor); + pstmt.setLong(4, requiredCpu); + pstmt.setLong(5, zoneId); + pstmt.setShort(6, CapacityVO.CAPACITY_TYPE_MEMORY); + pstmt.setFloat(7, 1); + pstmt.setLong(8, requiredRam); + ResultSet rs = pstmt.executeQuery(); while (rs.next()) { result.add(rs.getLong(1)); @@ -414,5 +459,40 @@ public class CapacityDaoImpl extends GenericDaoBase implements } catch (Throwable e) { throw new CloudRuntimeException("Caught: " + sql, e); } + } + + @Override + public Pair, Map> orderPodsByAggregateCapacity(long zoneId, short capacityTypeForOrdering, float cpuOverprovisioningFactor) { + Transaction txn = Transaction.currentTxn(); + PreparedStatement pstmt = null; + List result = new ArrayList(); + Map podCapacityMap = new HashMap(); + + StringBuilder sql = new StringBuilder(ORDER_PODS_BY_AGGREGATE_CAPACITY); + try { + pstmt = txn.prepareAutoCloseStatement(sql.toString()); + pstmt.setLong(2, zoneId); + pstmt.setShort(3, capacityTypeForOrdering); + + if(capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_CPU){ + pstmt.setFloat(1, cpuOverprovisioningFactor); + pstmt.setFloat(4, cpuOverprovisioningFactor); + }else{ + pstmt.setFloat(1, 1); + pstmt.setFloat(4, 1); + } + + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + Long podId = rs.getLong(1); + result.add(podId); + podCapacityMap.put(podId, rs.getDouble(2)); + } + return new Pair, Map>(result, podCapacityMap); + } catch (SQLException e) { + throw new CloudRuntimeException("DB Exception on: " + sql, e); + } catch (Throwable e) { + throw new CloudRuntimeException("Caught: " + sql, e); + } } } diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index c29571a94a7..e154e95f8f1 100755 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -209,8 +209,10 @@ public enum Config { ControlCidr("Advanced", ManagementServer.class, String.class, "control.cidr", "169.254.0.0/16", "Changes the cidr for the control network traffic. Defaults to using link local. Must be unique within pods", null), ControlGateway("Advanced", ManagementServer.class, String.class, "control.gateway", "169.254.0.1", "gateway for the control network traffic", null), - UseUserConcentratedPodAllocation("Advanced", ManagementServer.class, Boolean.class, "use.user.concentrated.pod.allocation", "true", "If true, deployment planner applies the user concentration heuristic during VM resource allocation", "true,false"), HostCapacityTypeToOrderClusters("Advanced", ManagementServer.class, String.class, "host.capacityType.to.order.clusters", "CPU", "The host capacity type (CPU or RAM) is used by deployment planner to order clusters during VM resource allocation", "CPU,RAM"), + ApplyAllocationAlgorithmToPods("Advanced", ManagementServer.class, Boolean.class, "apply.allocation.algorithm.to.pods", "false", "If true, deployment planner applies the allocation heuristics at pods first in the given datacenter during VM resource allocation", "true,false"), + VmUserDispersionWeight("Advanced", ManagementServer.class, Float.class, "vm.user.dispersion.weight", "1", "Weight for user dispersion heuristic (as a value between 0 and 1) applied to resource allocation during vm deployment. Weight for capacity heuristic will be (1 – weight of user dispersion)", null), + VmAllocationAlgorithm("Advanced", ManagementServer.class, String.class, "vm.allocation.algorithm", "random", "'random', 'firstfit', 'userdispersing', 'userconcentratedpod' : Order in which hosts within a cluster will be considered for VM/volume allocation.", null), EndpointeUrl("Advanced", ManagementServer.class, String.class, "endpointe.url", "http://localhost:8080/client/api", "Endpointe Url", "The endpoint callback URL"), ElasticLoadBalancerEnabled("Advanced", ManagementServer.class, String.class, "network.loadbalancer.basiczone.elb.enabled", "false", "Whether the load balancing service is enabled for basic zones", "true,false"), ElasticLoadBalancerNetwork("Advanced", ManagementServer.class, String.class, "network.loadbalancer.basiczone.elb.network", "guest", "Whether the elastic load balancing service public ips are taken from the public or guest network", "guest,public"), @@ -226,7 +228,6 @@ public enum Config { OvmGuestNetwork("Advanced", ManagementServer.class, String.class, "ovm.guest.network.device", null, "Specify the private bridge on host for private network", null), // XenServer - VmAllocationAlgorithm("Advanced", ManagementServer.class, String.class, "vm.allocation.algorithm", "random", "If 'random', hosts within a pod will be randomly considered for VM/volume allocation. If 'firstfit', they will be considered on a first-fit basis.", null), XenPublicNetwork("Network", ManagementServer.class, String.class, "xen.public.network.device", null, "[ONLY IF THE PUBLIC NETWORK IS ON A DEDICATED NIC]:The network name label of the physical device dedicated to the public network on a XenServer host", null), XenStorageNetwork1("Network", ManagementServer.class, String.class, "xen.storage.network.device1", "cloud-stor1", "Specify when there are storage networks", null), XenStorageNetwork2("Network", ManagementServer.class, String.class, "xen.storage.network.device2", "cloud-stor2", "Specify when there are storage networks", null), diff --git a/server/src/com/cloud/dc/dao/HostPodDao.java b/server/src/com/cloud/dc/dao/HostPodDao.java index 01fb2864819..70e17f0207f 100644 --- a/server/src/com/cloud/dc/dao/HostPodDao.java +++ b/server/src/com/cloud/dc/dao/HostPodDao.java @@ -18,10 +18,9 @@ package com.cloud.dc.dao; -import java.util.HashMap; -import java.util.List; -import java.util.Vector; - +import java.util.HashMap; +import java.util.List; + import com.cloud.dc.HostPodVO; import com.cloud.utils.db.GenericDao; @@ -30,6 +29,8 @@ public interface HostPodDao extends GenericDao { public HostPodVO findByName(String name, long dcId); - public HashMap> getCurrentPodCidrSubnets(long zoneId, long podIdToSkip); + public HashMap> getCurrentPodCidrSubnets(long zoneId, long podIdToSkip); + + public List listDisabledPods(long zoneId); } diff --git a/server/src/com/cloud/dc/dao/HostPodDaoImpl.java b/server/src/com/cloud/dc/dao/HostPodDaoImpl.java index 17aa7ac9227..328b708beef 100644 --- a/server/src/com/cloud/dc/dao/HostPodDaoImpl.java +++ b/server/src/com/cloud/dc/dao/HostPodDaoImpl.java @@ -30,9 +30,12 @@ import javax.ejb.Local; import org.apache.log4j.Logger; import com.cloud.dc.HostPodVO; +import com.cloud.org.Grouping; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; @Local(value={HostPodDao.class}) @@ -40,7 +43,7 @@ public class HostPodDaoImpl extends GenericDaoBase implements H private static final Logger s_logger = Logger.getLogger(HostPodDaoImpl.class); protected SearchBuilder DataCenterAndNameSearch; - protected SearchBuilder DataCenterIdSearch; + protected SearchBuilder DataCenterIdSearch; protected HostPodDaoImpl() { DataCenterAndNameSearch = createSearchBuilder(); @@ -50,7 +53,7 @@ public class HostPodDaoImpl extends GenericDaoBase implements H DataCenterIdSearch = createSearchBuilder(); DataCenterIdSearch.and("dcId", DataCenterIdSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); - DataCenterIdSearch.done(); + DataCenterIdSearch.done(); } @Override @@ -111,5 +114,21 @@ public class HostPodDaoImpl extends GenericDaoBase implements H boolean result = super.remove(id); txn.commit(); return result; - } + } + + @Override + public List listDisabledPods(long zoneId) { + GenericSearchBuilder podIdSearch = createSearchBuilder(Long.class); + podIdSearch.selectField(podIdSearch.entity().getId()); + podIdSearch.and("dataCenterId", podIdSearch.entity().getDataCenterId(), Op.EQ); + podIdSearch.and("allocationState", podIdSearch.entity().getAllocationState(), Op.EQ); + podIdSearch.done(); + + + SearchCriteria sc = podIdSearch.create(); + sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId); + sc.addAnd("allocationState", SearchCriteria.Op.EQ, Grouping.AllocationState.Disabled); + return customSearch(sc, null); + } + } diff --git a/server/src/com/cloud/deploy/FirstFitPlanner.java b/server/src/com/cloud/deploy/FirstFitPlanner.java index b784e2690ce..c99f3fc293f 100755 --- a/server/src/com/cloud/deploy/FirstFitPlanner.java +++ b/server/src/com/cloud/deploy/FirstFitPlanner.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Map; import javax.ejb.Local; +import javax.naming.ConfigurationException; import org.apache.log4j.Logger; @@ -46,7 +47,6 @@ import com.cloud.dc.Pod; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; -import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.host.Host; import com.cloud.host.HostVO; @@ -105,13 +105,13 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { protected Adapters _storagePoolAllocators; @Inject(adapter=HostAllocator.class) protected Adapters _hostAllocators; + protected String _allocationAlgorithm = "random"; @Override public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException { - String _allocationAlgorithm = _configDao.getValue(Config.VmAllocationAlgorithm.key()); VirtualMachine vm = vmProfile.getVirtualMachine(); ServiceOffering offering = vmProfile.getServiceOffering(); DataCenter dc = _dcDao.findById(vm.getDataCenterIdToDeployIn()); @@ -123,7 +123,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { if (s_logger.isDebugEnabled()) { - s_logger.debug("In FirstFitPlanner:: plan"); + s_logger.debug("DeploymentPlanner allocation algorithm: "+_allocationAlgorithm); s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() + ", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested); @@ -228,7 +228,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { ClusterVO cluster = _clusterDao.findById(plan.getClusterId()); if (cluster != null ){ clusterList.add(clusterIdSpecified); - return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc, _allocationAlgorithm); + return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc); }else{ s_logger.debug("The specified cluster cannot be found, returning."); avoid.addCluster(plan.getClusterId()); @@ -241,98 +241,164 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { HostPodVO pod = _podDao.findById(podIdSpecified); if (pod != null) { - //list clusters under this pod by cpu and ram capacity - clusterList = listClustersByCapacity(podIdSpecified, cpu_requested, ram_requested, avoid, false, cpuOverprovisioningFactor); - if(!clusterList.isEmpty()){ - if(avoid.getClustersToAvoid() != null){ - if (s_logger.isDebugEnabled()) { - s_logger.debug("Removing from the clusterId list these clusters from avoid set: "+ avoid.getClustersToAvoid()); - } - clusterList.removeAll(avoid.getClustersToAvoid()); - } - - List disabledClusters = listDisabledClusters(plan.getDataCenterId(), podIdSpecified); - if(!disabledClusters.isEmpty()){ - if (s_logger.isDebugEnabled()) { - s_logger.debug("Removing from the clusterId list these clusters that are disabled: "+ disabledClusters); - } - clusterList.removeAll(disabledClusters); - } - - DeployDestination dest = checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc, _allocationAlgorithm); - if(dest == null){ - avoid.addPod(plan.getPodId()); - } - return dest; - }else{ - if (s_logger.isDebugEnabled()) { - s_logger.debug("No clusters found under this pod, having a host with enough capacity, returning."); - } + DeployDestination dest = scanClustersForDestinationInZoneOrPod(podIdSpecified, false, vmProfile, plan, avoid); + if(dest == null){ avoid.addPod(plan.getPodId()); - return null; } + return dest; } else { s_logger.debug("The specified Pod cannot be found, returning."); avoid.addPod(plan.getPodId()); return null; } }else{ - //consider all clusters under this zone. s_logger.debug("Searching all possible resources under this Zone: "+ plan.getDataCenterId()); - //list clusters under this zone by cpu and ram capacity - List prioritizedClusterIds = listClustersByCapacity(plan.getDataCenterId(), cpu_requested, ram_requested, avoid, true, cpuOverprovisioningFactor); - if(!prioritizedClusterIds.isEmpty()){ - if(avoid.getClustersToAvoid() != null){ - if (s_logger.isDebugEnabled()) { - s_logger.debug("Removing from the clusterId list these clusters from avoid set: "+ avoid.getClustersToAvoid()); - } - prioritizedClusterIds.removeAll(avoid.getClustersToAvoid()); - } - List disabledClusters = listDisabledClusters(plan.getDataCenterId(), null); - if(!disabledClusters.isEmpty()){ - if (s_logger.isDebugEnabled()) { - s_logger.debug("Removing from the clusterId list these clusters that are disabled/clusters under disabled pods: "+ disabledClusters); - } - prioritizedClusterIds.removeAll(disabledClusters); - } + + boolean applyAllocationAtPods = Boolean.parseBoolean(_configDao.getValue(Config.ApplyAllocationAlgorithmToPods.key())); + if(applyAllocationAtPods){ + //start scan at all pods under this zone. + return scanPodsForDestination(vmProfile, plan, avoid); }else{ - if (s_logger.isDebugEnabled()) { - s_logger.debug("No clusters found having a host with enough capacity, returning."); - } - return null; - } - if(!prioritizedClusterIds.isEmpty()){ - boolean applyUserConcentrationPodHeuristic = Boolean.parseBoolean(_configDao.getValue(Config.UseUserConcentratedPodAllocation.key())); - if(applyUserConcentrationPodHeuristic && vmProfile.getOwner() != null){ - //user has VMs in certain pods. - prioritize those pods first - //UserConcentratedPod strategy - long accountId = vmProfile.getOwner().getAccountId(); - List podIds = listPodsByUserConcentration(plan.getDataCenterId(), accountId); - if(!podIds.isEmpty()){ - if(avoid.getPodsToAvoid() != null){ - if (s_logger.isDebugEnabled()) { - s_logger.debug("Removing from the pod list these pods from avoid set: "+ avoid.getPodsToAvoid()); - } - podIds.removeAll(avoid.getPodsToAvoid()); - } - clusterList = reorderClustersByPods(prioritizedClusterIds, podIds); - }else{ - clusterList = prioritizedClusterIds; - } - }else{ - clusterList = prioritizedClusterIds; - } - return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc, _allocationAlgorithm); - }else{ - if (s_logger.isDebugEnabled()) { - s_logger.debug("No clusters found after removing disabled clusters and clusters in avoid list, returning."); - } - return null; + //start scan at clusters under this zone. + return scanClustersForDestinationInZoneOrPod(plan.getDataCenterId(), true, vmProfile, plan, avoid); } } } + + private DeployDestination scanPodsForDestination(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid){ + + ServiceOffering offering = vmProfile.getServiceOffering(); + int requiredCpu = offering.getCpu() * offering.getSpeed(); + long requiredRam = offering.getRamSize() * 1024L * 1024L; + String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); + float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1); + + //list pods under this zone by cpu and ram capacity + List prioritizedPodIds = new ArrayList(); + Pair, Map> podCapacityInfo = listPodsByCapacity(plan.getDataCenterId(), requiredCpu, requiredRam, cpuOverprovisioningFactor); + List podsWithCapacity = podCapacityInfo.first(); + if(!podsWithCapacity.isEmpty()){ + if(avoid.getPodsToAvoid() != null){ + if (s_logger.isDebugEnabled()) { + s_logger.debug("Removing from the podId list these pods from avoid set: "+ avoid.getPodsToAvoid()); + } + podsWithCapacity.removeAll(avoid.getPodsToAvoid()); + } + List disabledPods = listDisabledPods(plan.getDataCenterId()); + if(!disabledPods.isEmpty()){ + if (s_logger.isDebugEnabled()) { + s_logger.debug("Removing from the podId list these pods that are disabled: "+ disabledPods); + } + podsWithCapacity.removeAll(disabledPods); + } + }else{ + if (s_logger.isDebugEnabled()) { + s_logger.debug("No pods found having a host with enough capacity, returning."); + } + return null; + } + + if(!podsWithCapacity.isEmpty()){ + + prioritizedPodIds = reorderPods(podCapacityInfo, vmProfile, plan); + + //loop over pods + for(Long podId : prioritizedPodIds){ + s_logger.debug("Checking resources under Pod: "+podId); + DeployDestination dest = scanClustersForDestinationInZoneOrPod(podId, false, vmProfile, plan, avoid); + if(dest != null){ + return dest; + } + avoid.addPod(podId); + } + if (s_logger.isDebugEnabled()) { + s_logger.debug("No Pods found for destination, returning."); + } + return null; + }else{ + if (s_logger.isDebugEnabled()) { + s_logger.debug("No Pods found after removing disabled pods and pods in avoid list, returning."); + } + return null; + } + } + + private DeployDestination scanClustersForDestinationInZoneOrPod(long id, boolean isZone, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid){ + + VirtualMachine vm = vmProfile.getVirtualMachine(); + ServiceOffering offering = vmProfile.getServiceOffering(); + DataCenter dc = _dcDao.findById(vm.getDataCenterIdToDeployIn()); + int requiredCpu = offering.getCpu() * offering.getSpeed(); + long requiredRam = offering.getRamSize() * 1024L * 1024L; + String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); + float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1); + + //list clusters under this zone by cpu and ram capacity + Pair, Map> clusterCapacityInfo = listClustersByCapacity(id, requiredCpu, requiredRam, avoid, isZone, cpuOverprovisioningFactor); + List prioritizedClusterIds = clusterCapacityInfo.first(); + if(!prioritizedClusterIds.isEmpty()){ + if(avoid.getClustersToAvoid() != null){ + if (s_logger.isDebugEnabled()) { + s_logger.debug("Removing from the clusterId list these clusters from avoid set: "+ avoid.getClustersToAvoid()); + } + prioritizedClusterIds.removeAll(avoid.getClustersToAvoid()); + } + + List disabledClusters = new ArrayList(); + if(isZone){ + disabledClusters = listDisabledClusters(plan.getDataCenterId(), null); + }else{ + disabledClusters = listDisabledClusters(plan.getDataCenterId(), id); + } + if(!disabledClusters.isEmpty()){ + if (s_logger.isDebugEnabled()) { + s_logger.debug("Removing from the clusterId list these clusters that are disabled/clusters under disabled pods: "+ disabledClusters); + } + prioritizedClusterIds.removeAll(disabledClusters); + } + }else{ + if (s_logger.isDebugEnabled()) { + s_logger.debug("No clusters found having a host with enough capacity, returning."); + } + return null; + } + if(!prioritizedClusterIds.isEmpty()){ + List clusterList = reorderClusters(id, isZone, clusterCapacityInfo, vmProfile, plan); + return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc); + }else{ + if (s_logger.isDebugEnabled()) { + s_logger.debug("No clusters found after removing disabled clusters and clusters in avoid list, returning."); + } + return null; + } + } + + /** + * This method should reorder the given list of Cluster Ids by applying any necessary heuristic + * for this planner + * For FirstFitPlanner there is no specific heuristic to be applied + * other than the capacity based ordering which is done by default. + * @return List ordered list of Cluster Ids + */ + protected List reorderClusters(long id, boolean isZone, Pair, Map> clusterCapacityInfo, VirtualMachineProfile vmProfile, DeploymentPlan plan){ + List reordersClusterIds = clusterCapacityInfo.first(); + return reordersClusterIds; + } + + /** + * This method should reorder the given list of Pod Ids by applying any necessary heuristic + * for this planner + * For FirstFitPlanner there is no specific heuristic to be applied + * other than the capacity based ordering which is done by default. + * @return List ordered list of Pod Ids + */ + protected List reorderPods(Pair, Map> podCapacityInfo, VirtualMachineProfile vmProfile, DeploymentPlan plan){ + List podIdsByCapacity = podCapacityInfo.first(); + return podIdsByCapacity; + } + private List listDisabledClusters(long zoneId, Long podId){ List disabledClusters = _clusterDao.listDisabledClusters(zoneId, podId); if(podId == null){ @@ -343,6 +409,11 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { return disabledClusters; } + private List listDisabledPods(long zoneId){ + List disabledPods = _podDao.listDisabledPods(zoneId); + return disabledPods; + } + private Map getCapacityThresholdMap(){ // Lets build this real time so that the admin wont have to restart MS if he changes these values Map disableThresholdMap = new HashMap(); @@ -409,7 +480,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } private DeployDestination checkClustersforDestination(List clusterList, VirtualMachineProfile vmProfile, - DeploymentPlan plan, ExcludeList avoid, DataCenter dc, String _allocationAlgorithm){ + DeploymentPlan plan, ExcludeList avoid, DataCenter dc){ if (s_logger.isTraceEnabled()) { s_logger.trace("ClusterId List to consider: " + clusterList); @@ -475,56 +546,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { return null; } - private List reorderClustersByPods(List clusterIds, List podIds) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Reordering cluster list as per pods ordered by user concentration"); - } - - Map> podClusterMap = _clusterDao.getPodClusterIdMap(clusterIds); - - if (s_logger.isTraceEnabled()) { - s_logger.trace("Pod To cluster Map is: "+podClusterMap ); - } - - List reorderedClusters = new ArrayList(); - for (Long pod : podIds){ - if(podClusterMap.containsKey(pod)){ - List clustersOfThisPod = podClusterMap.get(pod); - if(clustersOfThisPod != null){ - for(Long clusterId : clusterIds){ - if(clustersOfThisPod.contains(clusterId)){ - reorderedClusters.add(clusterId); - } - } - clusterIds.removeAll(clustersOfThisPod); - } - } - } - reorderedClusters.addAll(clusterIds); - - if (s_logger.isTraceEnabled()) { - s_logger.trace("Reordered cluster list: " + reorderedClusters); - } - return reorderedClusters; - } - - protected List listPodsByUserConcentration(long zoneId, long accountId){ - - if (s_logger.isDebugEnabled()) { - s_logger.debug("Applying UserConcentratedPod heuristic for account: "+ accountId); - } - - List prioritizedPods = _vmDao.listPodIdsHavingVmsforAccount(zoneId, accountId); - - if (s_logger.isDebugEnabled()) { - s_logger.debug("List of pods to be considered, after applying UserConcentratedPod heuristic: "+ prioritizedPods); - } - - return prioritizedPods; - } - - protected List listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone, float cpuOverprovisioningFactor){ + protected Pair, Map> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone, float cpuOverprovisioningFactor){ //look at the aggregate available cpu and ram per cluster //although an aggregate value may be false indicator that a cluster can host a vm, it will at the least eliminate those clusters which definitely cannot @@ -545,7 +568,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { if (s_logger.isTraceEnabled()) { s_logger.trace("ClusterId List having enough CPU and RAM capacity: " + clusterIdswithEnoughCapacity); } - List clusterIdsOrderedByAggregateCapacity = _capacityDao.orderClustersByAggregateCapacity(id, capacityType, isZone, cpuOverprovisioningFactor); + Pair, Map> result = _capacityDao.orderClustersByAggregateCapacity(id, capacityType, isZone, cpuOverprovisioningFactor); + List clusterIdsOrderedByAggregateCapacity = result.first(); //only keep the clusters that have enough capacity to host this VM if (s_logger.isTraceEnabled()) { s_logger.trace("ClusterId List in order of aggregate capacity: " + clusterIdsOrderedByAggregateCapacity); @@ -556,9 +580,47 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { s_logger.trace("ClusterId List having enough CPU and RAM capacity & in order of aggregate capacity: " + clusterIdsOrderedByAggregateCapacity); } - return clusterIdsOrderedByAggregateCapacity; + return result; } + + protected Pair, Map> listPodsByCapacity(long zoneId, int requiredCpu, long requiredRam, float cpuOverprovisioningFactor){ + //look at the aggregate available cpu and ram per pod + //although an aggregate value may be false indicator that a pod can host a vm, it will at the least eliminate those pods which definitely cannot + + //we need pods having enough cpu AND RAM to host this particular VM and order them by aggregate pod capacity + if (s_logger.isDebugEnabled()) { + s_logger.debug("Listing pods in order of aggregate capacity, that have (atleast one host with) enough CPU and RAM capacity under this Zone: "+zoneId); + } + String capacityTypeToOrder = _configDao.getValue(Config.HostCapacityTypeToOrderClusters.key()); + short capacityType = CapacityVO.CAPACITY_TYPE_CPU; + if("RAM".equalsIgnoreCase(capacityTypeToOrder)){ + capacityType = CapacityVO.CAPACITY_TYPE_MEMORY; + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("CPUOverprovisioningFactor considered: " + cpuOverprovisioningFactor); + } + List podIdswithEnoughCapacity = _capacityDao.listPodsByHostCapacities(zoneId, requiredCpu, requiredRam, capacityType, cpuOverprovisioningFactor); + if (s_logger.isTraceEnabled()) { + s_logger.trace("PodId List having enough CPU and RAM capacity: " + podIdswithEnoughCapacity); + } + Pair, Map> result = _capacityDao.orderPodsByAggregateCapacity(zoneId, capacityType, cpuOverprovisioningFactor); + List podIdsOrderedByAggregateCapacity = result.first(); + //only keep the clusters that have enough capacity to host this VM + if (s_logger.isTraceEnabled()) { + s_logger.trace("PodId List in order of aggregate capacity: " + podIdsOrderedByAggregateCapacity); + } + podIdsOrderedByAggregateCapacity.retainAll(podIdswithEnoughCapacity); + + if (s_logger.isTraceEnabled()) { + s_logger.trace("PodId List having enough CPU and RAM capacity & in order of aggregate capacity: " + podIdsOrderedByAggregateCapacity); + } + + return result; + + } + protected Pair> findPotentialDeploymentResources(List suitableHosts, Map> suitableVolumeStoragePools){ s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM"); @@ -706,7 +768,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { Enumeration enPool = _storagePoolAllocators.enumeration(); while (enPool.hasMoreElements()) { final StoragePoolAllocator allocator = enPool.nextElement(); - final List suitablePools = allocator.allocateToPool(diskProfile, vmProfile.getTemplate(), plan, avoid, returnUpTo); + final List suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo); if (suitablePools != null && !suitablePools.isEmpty()) { suitableVolumeStoragePools.put(toBeCreated, suitablePools); foundPotentialPools = true; @@ -740,8 +802,21 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { @Override public boolean canHandle(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) { - return vm.getHypervisorType() != HypervisorType.BareMetal; + if(vm.getHypervisorType() != HypervisorType.BareMetal){ + //check the allocation strategy + if (_allocationAlgorithm != null && (_allocationAlgorithm.equals(AllocationAlgorithm.random.toString()) || _allocationAlgorithm.equals(AllocationAlgorithm.firstfit.toString()))) { + return true; + } + } + return false; } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + super.configure(name, params); + _allocationAlgorithm = _configDao.getValue(Config.VmAllocationAlgorithm.key()); + return true; + } private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId){ // Check if the zone exists in the system diff --git a/server/src/com/cloud/deploy/UserConcentratedPodPlanner.java b/server/src/com/cloud/deploy/UserConcentratedPodPlanner.java new file mode 100644 index 00000000000..4ba79dff72d --- /dev/null +++ b/server/src/com/cloud/deploy/UserConcentratedPodPlanner.java @@ -0,0 +1,157 @@ +/** + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package com.cloud.deploy; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import javax.ejb.Local; + +import org.apache.log4j.Logger; + +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.utils.Pair; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; + +@Local(value=DeploymentPlanner.class) +public class UserConcentratedPodPlanner extends FirstFitPlanner implements DeploymentPlanner { + + private static final Logger s_logger = Logger.getLogger(UserConcentratedPodPlanner.class); + + /** + * This method should reorder the given list of Cluster Ids by applying any necessary heuristic + * for this planner + * For UserConcentratedPodPlanner we need to order the clusters in a zone across pods, by considering those pods first which have more number of VMs for this account + * This reordering is not done incase the clusters within single pod are passed when the allocation is applied at pod-level. + * @return List ordered list of Cluster Ids + */ + @Override + protected List reorderClusters(long id, boolean isZone, Pair, Map> clusterCapacityInfo, VirtualMachineProfile vmProfile, DeploymentPlan plan){ + List clusterIdsByCapacity = clusterCapacityInfo.first(); + if(vmProfile.getOwner() == null || !isZone){ + return clusterIdsByCapacity; + } + return applyUserConcentrationPodHeuristicToClusters(id, clusterIdsByCapacity, vmProfile.getOwner().getAccountId()); + } + + private List applyUserConcentrationPodHeuristicToClusters(long zoneId, List prioritizedClusterIds, long accountId){ + //user has VMs in certain pods. - prioritize those pods first + //UserConcentratedPod strategy + List clusterList = new ArrayList(); + List podIds = listPodsByUserConcentration(zoneId, accountId); + if(!podIds.isEmpty()){ + clusterList = reorderClustersByPods(prioritizedClusterIds, podIds); + }else{ + clusterList = prioritizedClusterIds; + } + return clusterList; + } + + private List reorderClustersByPods(List clusterIds, List podIds) { + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Reordering cluster list as per pods ordered by user concentration"); + } + + Map> podClusterMap = _clusterDao.getPodClusterIdMap(clusterIds); + + if (s_logger.isTraceEnabled()) { + s_logger.trace("Pod To cluster Map is: "+podClusterMap ); + } + + List reorderedClusters = new ArrayList(); + for (Long pod : podIds){ + if(podClusterMap.containsKey(pod)){ + List clustersOfThisPod = podClusterMap.get(pod); + if(clustersOfThisPod != null){ + for(Long clusterId : clusterIds){ + if(clustersOfThisPod.contains(clusterId)){ + reorderedClusters.add(clusterId); + } + } + clusterIds.removeAll(clustersOfThisPod); + } + } + } + reorderedClusters.addAll(clusterIds); + + if (s_logger.isTraceEnabled()) { + s_logger.trace("Reordered cluster list: " + reorderedClusters); + } + return reorderedClusters; + } + + protected List listPodsByUserConcentration(long zoneId, long accountId){ + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Applying UserConcentratedPod heuristic for account: "+ accountId); + } + + List prioritizedPods = _vmDao.listPodIdsHavingVmsforAccount(zoneId, accountId); + + if (s_logger.isTraceEnabled()) { + s_logger.trace("List of pods to be considered, after applying UserConcentratedPod heuristic: "+ prioritizedPods); + } + + return prioritizedPods; + } + + /** + * This method should reorder the given list of Pod Ids by applying any necessary heuristic + * for this planner + * For UserConcentratedPodPlanner we need to order the pods by considering those pods first which have more number of VMs for this account + * @return List ordered list of Pod Ids + */ + @Override + protected List reorderPods(Pair, Map> podCapacityInfo, VirtualMachineProfile vmProfile, DeploymentPlan plan){ + List podIdsByCapacity = podCapacityInfo.first(); + if(vmProfile.getOwner() == null){ + return podIdsByCapacity; + } + long accountId = vmProfile.getOwner().getAccountId(); + + //user has VMs in certain pods. - prioritize those pods first + //UserConcentratedPod strategy + List podIds = listPodsByUserConcentration(plan.getDataCenterId(), accountId); + if(!podIds.isEmpty()){ + //remove pods that dont have capacity for this vm + podIds.retainAll(podIdsByCapacity); + podIdsByCapacity.removeAll(podIds); + podIds.addAll(podIdsByCapacity); + return podIds; + }else{ + return podIdsByCapacity; + } + + } + + @Override + public boolean canHandle(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) { + if(vm.getHypervisorType() != HypervisorType.BareMetal){ + //check the allocation strategy + if (_allocationAlgorithm != null && _allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod.toString())) { + return true; + } + } + return false; + } + +} diff --git a/server/src/com/cloud/deploy/UserDispersingPlanner.java b/server/src/com/cloud/deploy/UserDispersingPlanner.java new file mode 100644 index 00000000000..1d82300c5a9 --- /dev/null +++ b/server/src/com/cloud/deploy/UserDispersingPlanner.java @@ -0,0 +1,221 @@ +/** + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved. + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package com.cloud.deploy; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + +import javax.ejb.Local; +import javax.naming.ConfigurationException; + +import org.apache.log4j.Logger; + +import com.cloud.configuration.Config; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; + +@Local(value=DeploymentPlanner.class) +public class UserDispersingPlanner extends FirstFitPlanner implements DeploymentPlanner { + + private static final Logger s_logger = Logger.getLogger(UserDispersingPlanner.class); + + /** + * This method should reorder the given list of Cluster Ids by applying any necessary heuristic + * for this planner + * For UserDispersingPlanner we need to order the clusters by considering the number of VMs for this account + * @return List ordered list of Cluster Ids + */ + @Override + protected List reorderClusters(long id, boolean isZone, Pair, Map> clusterCapacityInfo, VirtualMachineProfile vmProfile, DeploymentPlan plan){ + List clusterIdsByCapacity = clusterCapacityInfo.first(); + if(vmProfile.getOwner() == null){ + return clusterIdsByCapacity; + } + long accountId = vmProfile.getOwner().getAccountId(); + Pair, Map> clusterIdsVmCountInfo = listClustersByUserDispersion(id, isZone, accountId); + + //now we have 2 cluster lists - one ordered by capacity and the other by number of VMs for this account + //need to apply weights to these to find the correct ordering to follow + + if(_userDispersionWeight == 1.0f){ + List clusterIds = clusterIdsVmCountInfo.first(); + clusterIds.retainAll(clusterIdsByCapacity); + return clusterIds; + }else{ + //apply weights to the two lists + return orderByApplyingWeights(clusterCapacityInfo, clusterIdsVmCountInfo, accountId); + } + + + } + + /** + * This method should reorder the given list of Pod Ids by applying any necessary heuristic + * for this planner + * For UserDispersingPlanner we need to order the pods by considering the number of VMs for this account + * @return List ordered list of Pod Ids + */ + @Override + protected List reorderPods(Pair, Map> podCapacityInfo, VirtualMachineProfile vmProfile, DeploymentPlan plan){ + List podIdsByCapacity = podCapacityInfo.first(); + if(vmProfile.getOwner() == null){ + return podIdsByCapacity; + } + long accountId = vmProfile.getOwner().getAccountId(); + + Pair, Map> podIdsVmCountInfo = listPodsByUserDispersion(plan.getDataCenterId(), accountId); + + //now we have 2 pod lists - one ordered by capacity and the other by number of VMs for this account + //need to apply weights to these to find the correct ordering to follow + + if(_userDispersionWeight == 1.0f){ + List podIds = podIdsVmCountInfo.first(); + podIds.retainAll(podIdsByCapacity); + return podIds; + }else{ + //apply weights to the two lists + return orderByApplyingWeights(podCapacityInfo, podIdsVmCountInfo, accountId); + } + + } + + protected Pair, Map> listClustersByUserDispersion(long id, boolean isZone, long accountId){ + if (s_logger.isDebugEnabled()) { + s_logger.debug("Applying Userdispersion heuristic to clusters for account: "+ accountId); + } + Pair, Map> clusterIdsVmCountInfo; + if(isZone){ + clusterIdsVmCountInfo = _vmInstanceDao.listClusterIdsInZoneByVmCount(id, accountId); + }else{ + clusterIdsVmCountInfo = _vmInstanceDao.listClusterIdsInPodByVmCount(id, accountId); + } + if (s_logger.isTraceEnabled()) { + s_logger.trace("List of clusters in ascending order of number of VMs: "+ clusterIdsVmCountInfo.first()); + } + return clusterIdsVmCountInfo; + } + + protected Pair, Map> listPodsByUserDispersion(long dataCenterId, long accountId) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Applying Userdispersion heuristic to pods for account: "+ accountId); + } + Pair, Map> podIdsVmCountInfo = _vmInstanceDao.listPodIdsInZoneByVmCount(dataCenterId, accountId); + if (s_logger.isTraceEnabled()) { + s_logger.trace("List of pods in ascending order of number of VMs: "+ podIdsVmCountInfo.first()); + } + + return podIdsVmCountInfo; + } + + + private List orderByApplyingWeights(Pair, Map> capacityInfo, Pair, Map> vmCountInfo, long accountId){ + List capacityOrderedIds = capacityInfo.first(); + List vmCountOrderedIds = vmCountInfo.first(); + Map capacityMap = capacityInfo.second(); + Map vmCountMap = vmCountInfo.second(); + + if (s_logger.isTraceEnabled()) { + s_logger.trace("Capacity Id list: "+ capacityOrderedIds + " , capacityMap:"+capacityMap); + } + if (s_logger.isTraceEnabled()) { + s_logger.trace("Vm Count Id list: "+ vmCountOrderedIds + " , vmCountMap:"+vmCountMap); + } + + + List idsReorderedByWeights = new ArrayList(); + float capacityWeight = (1.0f -_userDispersionWeight); + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Applying userDispersionWeight: "+ _userDispersionWeight); + } + //normalize the vmCountMap + LinkedHashMap normalisedVmCountIdMap= new LinkedHashMap(); + + Long totalVmsOfAccount = _vmInstanceDao.countRunningByAccount(accountId); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Total VMs for account: "+ totalVmsOfAccount); + } + for(Long id : vmCountOrderedIds){ + Double normalisedCount = vmCountMap.get(id) / totalVmsOfAccount; + normalisedVmCountIdMap.put(id, normalisedCount); + } + + //consider only those ids that are in capacity map. + + SortedMap> sortedMap= new TreeMap>(); + for(Long id : capacityOrderedIds){ + Double weightedCapacityValue = capacityMap.get(id) * capacityWeight; + Double weightedVmCountValue = normalisedVmCountIdMap.get(id) * _userDispersionWeight; + Double totalWeight = weightedCapacityValue + weightedVmCountValue; + if(sortedMap.containsKey(totalWeight)){ + List idList = sortedMap.get(totalWeight); + idList.add(id); + sortedMap.put(totalWeight, idList); + }else{ + List idList = new ArrayList(); + idList.add(id); + sortedMap.put(totalWeight, idList); + } + } + + for(List idList : sortedMap.values()){ + idsReorderedByWeights.addAll(idList); + } + + if (s_logger.isTraceEnabled()) { + s_logger.trace("Reordered Id list: "+ idsReorderedByWeights); + } + + return idsReorderedByWeights; + } + + + @Override + public boolean canHandle(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) { + if(vm.getHypervisorType() != HypervisorType.BareMetal){ + //check the allocation strategy + if (_allocationAlgorithm != null && _allocationAlgorithm.equals(AllocationAlgorithm.userdispersing.toString())) { + return true; + } + } + return false; + } + + float _userDispersionWeight; + + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + super.configure(name, params); + + String weight = _configDao.getValue(Config.VmUserDispersionWeight.key()); + _userDispersionWeight = NumbersUtil.parseFloat(weight, 1.0f); + + + return true; + } + +} diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index a947df43431..860ecad7f8d 100755 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -113,8 +113,8 @@ import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDetailsDao; -import com.cloud.hypervisor.HypervisorGuruManager; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.HypervisorGuruManager; import com.cloud.network.NetworkManager; import com.cloud.network.router.VirtualNetworkApplianceManager; import com.cloud.org.Grouping; @@ -124,7 +124,6 @@ import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.StoragePoolType; -import com.cloud.storage.Volume.Event; import com.cloud.storage.Volume.Type; import com.cloud.storage.allocator.StoragePoolAllocator; import com.cloud.storage.dao.DiskOfferingDao; @@ -162,11 +161,11 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.JoinBuilder; +import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.ExecutionException; import com.cloud.utils.fsm.NoTransitionException; @@ -179,9 +178,10 @@ import com.cloud.vm.UserVmManager; import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.VirtualMachineProfile; -import com.cloud.vm.VirtualMachine.State; +import com.cloud.vm.VirtualMachineProfileImpl; import com.cloud.vm.dao.ConsoleProxyDao; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.SecondaryStorageVmDao; @@ -419,12 +419,13 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag return false; } - protected StoragePoolVO findStoragePool(DiskProfile dskCh, final DataCenterVO dc, HostPodVO pod, Long clusterId, final VMTemplateVO template, final Set avoid) { + protected StoragePoolVO findStoragePool(DiskProfile dskCh, final DataCenterVO dc, HostPodVO pod, Long clusterId, VMInstanceVO vm, final Set avoid) { + VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); Enumeration en = _storagePoolAllocators.enumeration(); while (en.hasMoreElements()) { final StoragePoolAllocator allocator = en.nextElement(); - final List poolList = allocator.allocateToPool(dskCh, template, dc.getId(), pod.getId(), clusterId, avoid, 1); + final List poolList = allocator.allocateToPool(dskCh, profile, dc.getId(), pod.getId(), clusterId, avoid, 1); if (poolList != null && !poolList.isEmpty()) { return (StoragePoolVO) poolList.get(0); } @@ -727,7 +728,7 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag break; } - pool = findStoragePool(dskCh, dc, pod, clusterId, template, avoidPools); + pool = findStoragePool(dskCh, dc, pod, clusterId, vm, avoidPools); if (pool == null) { s_logger.warn("Unable to find storage poll when create volume " + volume.getName()); break; diff --git a/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java index 5df1c07eb8f..88bc676cafd 100755 --- a/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java +++ b/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java @@ -35,7 +35,6 @@ import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.host.Host; -import com.cloud.host.HostVO; import com.cloud.server.StatsCollector; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageManager; @@ -56,13 +55,13 @@ import com.cloud.storage.dao.VMTemplateHostDao; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.template.TemplateManager; -import com.cloud.template.VirtualMachineTemplate; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.component.Inject; import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; public abstract class AbstractStoragePoolAllocator extends AdapterBase implements StoragePoolAllocator { private static final Logger s_logger = Logger.getLogger(AbstractStoragePoolAllocator.class); @@ -287,7 +286,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement @Override - public List allocateToPool(DiskProfile dskCh, VirtualMachineTemplate VMtemplate, long dcId, long podId, Long clusterId, Set avoids, int returnUpTo) { + public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile vmProfile, long dcId, long podId, Long clusterId, Set avoids, int returnUpTo) { ExcludeList avoid = new ExcludeList(); for(StoragePool pool : avoids){ @@ -295,7 +294,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement } DataCenterDeployment plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null); - return allocateToPool(dskCh, VMtemplate, plan, avoid, returnUpTo); + return allocateToPool(dskCh, vmProfile, plan, avoid, returnUpTo); } } diff --git a/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java index fdf0e3e2e21..b34d7a9c893 100644 --- a/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java +++ b/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java @@ -20,37 +20,43 @@ package com.cloud.storage.allocator; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; + import javax.ejb.Local; +import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import com.cloud.deploy.DeploymentPlan; +import com.cloud.deploy.DeploymentPlanner; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.server.StatsCollector; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolVO; import com.cloud.storage.VMTemplateVO; -import com.cloud.template.VirtualMachineTemplate; +import com.cloud.user.Account; import com.cloud.vm.DiskProfile; -import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @Local(value=StoragePoolAllocator.class) public class FirstFitStoragePoolAllocator extends AbstractStoragePoolAllocator { private static final Logger s_logger = Logger.getLogger(FirstFitStoragePoolAllocator.class); - + protected String _allocationAlgorithm = "random"; + @Override public boolean allocatorIsCorrectType(DiskProfile dskCh) { return !localStorageAllocationNeeded(dskCh); } @Override - public List allocateToPool(DiskProfile dskCh, VirtualMachineTemplate VMtemplate, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { + public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { - - VMTemplateVO template = (VMTemplateVO)VMtemplate; + + VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate(); + Account account = vmProfile.getOwner(); List suitablePools = new ArrayList(); @@ -78,7 +84,12 @@ public class FirstFitStoragePoolAllocator extends AbstractStoragePoolAllocator { StatsCollector sc = StatsCollector.getInstance(); - Collections.shuffle(pools); + if(_allocationAlgorithm.equals("random")) { + // Shuffle this so that we don't check the pools in the same order. + Collections.shuffle(pools); + }else if(_allocationAlgorithm.equals("userdispersing")){ + pools = reorderPoolsByNumberOfVolumes(plan, pools, account); + } if (s_logger.isDebugEnabled()) { s_logger.debug("FirstFitStoragePoolAllocator has " + pools.size() + " pools to check for allocation"); @@ -99,4 +110,50 @@ public class FirstFitStoragePoolAllocator extends AbstractStoragePoolAllocator { return suitablePools; } + + private List reorderPoolsByNumberOfVolumes(DeploymentPlan plan, List pools, Account account) { + if(account == null){ + return pools; + } + long dcId = plan.getDataCenterId(); + Long podId = plan.getPodId(); + Long clusterId = plan.getClusterId(); + + List poolIdsByVolCount = _volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("List of pools in ascending order of number of volumes for account id: "+ account.getAccountId() + " is: "+ poolIdsByVolCount); + } + + //now filter the given list of Pools by this ordered list + Map poolMap = new HashMap(); + for (StoragePoolVO pool : pools) { + poolMap.put(pool.getId(), pool); + } + List matchingPoolIds = new ArrayList(poolMap.keySet()); + + poolIdsByVolCount.retainAll(matchingPoolIds); + + List reorderedPools = new ArrayList(); + for(Long id: poolIdsByVolCount){ + reorderedPools.add(poolMap.get(id)); + } + + return reorderedPools; + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + super.configure(name, params); + + if (_configDao != null) { + Map configs = _configDao.getConfiguration(params); + String allocationAlgorithm = configs.get("vm.allocation.algorithm"); + if (allocationAlgorithm != null && (allocationAlgorithm.equals(DeploymentPlanner.AllocationAlgorithm.random.toString()) + || allocationAlgorithm.equals(DeploymentPlanner.AllocationAlgorithm.firstfit.toString()) + || allocationAlgorithm.equals(DeploymentPlanner.AllocationAlgorithm.userdispersing.toString()))) { + _allocationAlgorithm = allocationAlgorithm; + } + } + return true; + } } diff --git a/server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java index 2f899435e55..e2975d42199 100644 --- a/server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java +++ b/server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java @@ -30,10 +30,8 @@ import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; -import com.cloud.template.VirtualMachineTemplate; import com.cloud.utils.component.ComponentLocator; import com.cloud.vm.DiskProfile; -import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @@ -61,7 +59,7 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl } @Override - public List allocateToPool(DiskProfile dskCh, VirtualMachineTemplate VMtemplate, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { + public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { if (!_storagePoolCleanupEnabled) { s_logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped."); @@ -81,7 +79,7 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl // Try to find a storage pool after cleanup ExcludeList myAvoids = new ExcludeList(avoid.getDataCentersToAvoid(), avoid.getPodsToAvoid(), avoid.getClustersToAvoid(), avoid.getHostsToAvoid(), avoid.getPoolsToAvoid()); - return allocator.allocateToPool(dskCh, VMtemplate, plan, myAvoids, returnUpTo); + return allocator.allocateToPool(dskCh, vmProfile, plan, myAvoids, returnUpTo); } @Override diff --git a/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java index 25afbf07bbb..e28b3be41f9 100644 --- a/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java +++ b/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java @@ -38,7 +38,6 @@ import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.StoragePoolHostDao; -import com.cloud.template.VirtualMachineTemplate; import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.component.Inject; @@ -52,6 +51,7 @@ import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.State; +import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; @@ -90,7 +90,7 @@ public class LocalStoragePoolAllocator extends FirstFitStoragePoolAllocator { } @Override - public List allocateToPool(DiskProfile dskCh, VirtualMachineTemplate VMtemplate, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { + public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { List suitablePools = new ArrayList(); @@ -106,7 +106,7 @@ public class LocalStoragePoolAllocator extends FirstFitStoragePoolAllocator { } List availablePool; - while (!(availablePool = super.allocateToPool(dskCh, VMtemplate, plan, myAvoids, 1)).isEmpty()) { + while (!(availablePool = super.allocateToPool(dskCh, vmProfile, plan, myAvoids, 1)).isEmpty()) { StoragePool pool = availablePool.get(0); myAvoids.addPool(pool.getId()); List hostsInSPool = _poolHostDao.listByPoolId(pool.getId()); diff --git a/server/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java index c4c6410dc19..de4148a5479 100644 --- a/server/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java +++ b/server/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java @@ -31,9 +31,7 @@ import com.cloud.server.StatsCollector; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolVO; import com.cloud.storage.VMTemplateVO; -import com.cloud.template.VirtualMachineTemplate; import com.cloud.vm.DiskProfile; -import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @@ -47,11 +45,11 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator { } @Override - public List allocateToPool(DiskProfile dskCh, VirtualMachineTemplate VMtemplate, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { + public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { List suitablePools = new ArrayList(); - VMTemplateVO template = (VMTemplateVO)VMtemplate; + VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate(); // Check that the allocator type is correct if (!allocatorIsCorrectType(dskCh)) { return suitablePools; diff --git a/server/src/com/cloud/storage/allocator/StoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/StoragePoolAllocator.java index 8d611cdf343..e2daf4c7370 100644 --- a/server/src/com/cloud/storage/allocator/StoragePoolAllocator.java +++ b/server/src/com/cloud/storage/allocator/StoragePoolAllocator.java @@ -24,10 +24,10 @@ import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.host.Host; import com.cloud.storage.StoragePool; -import com.cloud.template.VirtualMachineTemplate; import com.cloud.utils.component.Adapter; import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; /** * Allocator for a disk. This determines which StoragePool should @@ -36,7 +36,7 @@ import com.cloud.vm.VirtualMachine; public interface StoragePoolAllocator extends Adapter { //keeping since storageMgr is using this API for some existing functionalities - List allocateToPool(DiskProfile dskCh, VirtualMachineTemplate VMtemplate, long dcId, long podId, Long clusterId, Set avoids, int returnUpTo); + List allocateToPool(DiskProfile dskCh, VirtualMachineProfile vmProfile, long dcId, long podId, Long clusterId, Set avoids, int returnUpTo); String chooseStorageIp(VirtualMachine vm, Host host, Host storage); @@ -50,7 +50,7 @@ public interface StoragePoolAllocator extends Adapter { * @param int returnUpTo (use -1 to return all possible pools) * @return List List of storage pools that are suitable for the VM **/ - List allocateToPool(DiskProfile dskCh, VirtualMachineTemplate VMtemplate, DeploymentPlan plan, ExcludeList avoid, int returnUpTo); + List allocateToPool(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo); public static int RETURN_UPTO_ALL = -1; } diff --git a/server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java b/server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java index 6f49e527515..3a2e2e62574 100644 --- a/server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java +++ b/server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java @@ -30,10 +30,8 @@ import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.host.Host; import com.cloud.storage.StoragePool; import com.cloud.storage.Volume.Type; -import com.cloud.template.VirtualMachineTemplate; import com.cloud.utils.component.ComponentLocator; import com.cloud.vm.DiskProfile; -import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @@ -42,12 +40,12 @@ public class UseLocalForRootAllocator extends LocalStoragePoolAllocator implemen boolean _useLocalStorage; @Override - public List allocateToPool(DiskProfile dskCh, VirtualMachineTemplate VMtemplate, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { + public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { if (!_useLocalStorage) { return null; } - return super.allocateToPool(dskCh, VMtemplate, plan, avoid, returnUpTo); + return super.allocateToPool(dskCh, vmProfile, plan, avoid, returnUpTo); } @Override diff --git a/server/src/com/cloud/storage/dao/VolumeDao.java b/server/src/com/cloud/storage/dao/VolumeDao.java index 1078cfb424e..b665e34bc66 100755 --- a/server/src/com/cloud/storage/dao/VolumeDao.java +++ b/server/src/com/cloud/storage/dao/VolumeDao.java @@ -19,7 +19,6 @@ package com.cloud.storage.dao; import java.util.List; -import com.cloud.exception.ConcurrentOperationException; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Volume; @@ -53,4 +52,5 @@ public interface VolumeDao extends GenericDao, StateDao findReadyRootVolumesByInstance(long instanceId); + List listPoolIdsByVolumeCount(long dcId, Long podId, Long clusterId, long accountId); } diff --git a/server/src/com/cloud/storage/dao/VolumeDaoImpl.java b/server/src/com/cloud/storage/dao/VolumeDaoImpl.java index 1f0fc694706..b72cbd1eefc 100755 --- a/server/src/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/server/src/com/cloud/storage/dao/VolumeDaoImpl.java @@ -20,6 +20,7 @@ package com.cloud.storage.dao; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import java.util.ArrayList; import java.util.Date; import java.util.List; @@ -27,7 +28,6 @@ import javax.ejb.Local; import org.apache.log4j.Logger; -import com.cloud.exception.ConcurrentOperationException; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Volume; @@ -35,7 +35,6 @@ import com.cloud.storage.Volume.Event; import com.cloud.storage.Volume.Type; import com.cloud.storage.VolumeVO; import com.cloud.utils.Pair; -import com.cloud.utils.db.Attribute; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; @@ -46,10 +45,8 @@ import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.vm.UserVmVO; -import com.cloud.vm.VirtualMachine.State; -@Local(value=VolumeDao.class) @DB(txn=false) +@Local(value=VolumeDao.class) public class VolumeDaoImpl extends GenericDaoBase implements VolumeDao { private static final Logger s_logger = Logger.getLogger(VolumeDaoImpl.class); protected final SearchBuilder DetachedAccountIdSearch; @@ -63,6 +60,10 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol protected static final String SELECT_VM_SQL = "SELECT DISTINCT instance_id from volumes v where v.host_id = ? and v.mirror_state = ?"; protected static final String SELECT_HYPERTYPE_FROM_VOLUME = "SELECT c.hypervisor_type from volumes v, storage_pool s, cluster c where v.pool_id = s.id and s.cluster_id = c.id and v.id = ?"; + private static final String ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT = "SELECT pool.id, SUM(IF(vol.state='Ready' AND vol.account_id = ?, 1, 0)) FROM `cloud`.`storage_pool` pool LEFT JOIN `cloud`.`volumes` vol ON pool.id = vol.pool_id WHERE pool.data_center_id = ? " + + " AND pool.pod_id = ? AND pool.cluster_id = ? " + + " GROUP BY pool.id ORDER BY 2 ASC "; + @Override public List findDetachedByAccount(long accountId) { SearchCriteria sc = DetachedAccountIdSearch.create(); @@ -358,4 +359,29 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol } return rows > 0; } + + @Override + public List listPoolIdsByVolumeCount(long dcId, Long podId, Long clusterId, long accountId) { + Transaction txn = Transaction.currentTxn(); + PreparedStatement pstmt = null; + List result = new ArrayList(); + try { + String sql = ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT; + pstmt = txn.prepareAutoCloseStatement(sql); + pstmt.setLong(1, accountId); + pstmt.setLong(2, dcId); + pstmt.setLong(3, podId); + pstmt.setLong(4, clusterId); + + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + result.add(rs.getLong(1)); + } + return result; + } catch (SQLException e) { + throw new CloudRuntimeException("DB Exception on: " + ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT, e); + } catch (Throwable e) { + throw new CloudRuntimeException("Caught: " + ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT, e); + } + } } diff --git a/server/src/com/cloud/vm/dao/VMInstanceDao.java b/server/src/com/cloud/vm/dao/VMInstanceDao.java index c4e7f33c8c4..17796d89dd6 100644 --- a/server/src/com/cloud/vm/dao/VMInstanceDao.java +++ b/server/src/com/cloud/vm/dao/VMInstanceDao.java @@ -20,7 +20,9 @@ package com.cloud.vm.dao; import java.util.Date; import java.util.List; +import java.util.Map; +import com.cloud.utils.Pair; import com.cloud.utils.db.GenericDao; import com.cloud.utils.fsm.StateDao; import com.cloud.vm.VMInstanceVO; @@ -84,5 +86,15 @@ public interface VMInstanceDao extends GenericDao, StateDao< List listByClusterId(long clusterId); List listVmsMigratingFromHost(Long hostId); - public Long countRunningByHostId(long hostId); + public Long countRunningByHostId(long hostId); + + Pair, Map> listClusterIdsInZoneByVmCount(long zoneId, long accountId); + + Pair, Map> listClusterIdsInPodByVmCount(long podId, long accountId); + + Pair, Map> listPodIdsInZoneByVmCount(long dataCenterId, long accountId); + + List listHostIdsByVmCount(long dcId, Long podId, Long clusterId, long accountId); + + Long countRunningByAccount(long accountId); } diff --git a/server/src/com/cloud/vm/dao/VMInstanceDaoImpl.java b/server/src/com/cloud/vm/dao/VMInstanceDaoImpl.java index 098fa33c569..d0edac30511 100644 --- a/server/src/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/server/src/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -19,8 +19,14 @@ package com.cloud.vm.dao; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; import java.util.Date; +import java.util.HashMap; import java.util.List; +import java.util.Map; import javax.ejb.Local; @@ -38,7 +44,9 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.Transaction; import com.cloud.utils.db.UpdateBuilder; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Event; @@ -64,9 +72,19 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem protected final SearchBuilder HostUpSearch; protected final GenericSearchBuilder CountVirtualRoutersByAccount; protected GenericSearchBuilder CountRunningByHost; - + protected GenericSearchBuilder CountRunningByAccount; protected final Attribute _updateTimeAttr; + + private static final String ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART1 = "SELECT host.cluster_id, SUM(IF(vm.state='Running' AND vm.account_id = ?, 1, 0)) FROM `cloud`.`host` host LEFT JOIN `cloud`.`vm_instance` vm ON host.id = vm.host_id WHERE "; + private static final String ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART2 = " AND host.type = 'Routing' GROUP BY host.cluster_id ORDER BY 2 ASC "; + + private static final String ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT = "SELECT pod.id, SUM(IF(vm.state='Running' AND vm.account_id = ?, 1, 0)) FROM `cloud`.`host_pod_ref` pod LEFT JOIN `cloud`.`vm_instance` vm ON pod.id = vm.pod_id WHERE pod.data_center_id = ? " + + " GROUP BY pod.id ORDER BY 2 ASC "; + + private static final String ORDER_HOSTS_NUMBER_OF_VMS_FOR_ACCOUNT = "SELECT host.id, SUM(IF(vm.state='Running' AND vm.account_id = ?, 1, 0)) FROM `cloud`.`host` host LEFT JOIN `cloud`.`vm_instance` vm ON host.id = vm.host_id WHERE host.data_center_id = ? " + + " AND host.pod_id = ? AND host.cluster_id = ? AND host.type = 'Routing' " + + " GROUP BY host.id ORDER BY 2 ASC "; protected final HostDaoImpl _hostDao = ComponentLocator.inject(HostDaoImpl.class); protected VMInstanceDaoImpl() { @@ -150,6 +168,12 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem CountRunningByHost.and("state", CountRunningByHost.entity().getState(), SearchCriteria.Op.EQ); CountRunningByHost.done(); + CountRunningByAccount = createSearchBuilder(Long.class); + CountRunningByAccount.select(null, Func.COUNT, null); + CountRunningByAccount.and("account", CountRunningByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); + CountRunningByAccount.and("state", CountRunningByAccount.entity().getState(), SearchCriteria.Op.EQ); + CountRunningByAccount.done(); + _updateTimeAttr = _allAttributes.get("updateTime"); assert _updateTimeAttr != null : "Couldn't get this updateTime attribute"; } @@ -361,5 +385,123 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("host", hostId); sc.setParameters("state", State.Running); return customSearch(sc, null).get(0); + } + + @Override + public Pair, Map> listClusterIdsInZoneByVmCount(long zoneId, long accountId) { + Transaction txn = Transaction.currentTxn(); + PreparedStatement pstmt = null; + List result = new ArrayList(); + Map clusterVmCountMap = new HashMap(); + + StringBuilder sql = new StringBuilder(ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART1); + sql.append("host.data_center_id = ?"); + sql.append(ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART2); + try { + pstmt = txn.prepareAutoCloseStatement(sql.toString()); + pstmt.setLong(1, accountId); + pstmt.setLong(2, zoneId); + + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + Long clusterId = rs.getLong(1); + result.add(clusterId); + clusterVmCountMap.put(clusterId, rs.getDouble(2)); + } + return new Pair, Map>(result, clusterVmCountMap); + } catch (SQLException e) { + throw new CloudRuntimeException("DB Exception on: " + sql, e); + } catch (Throwable e) { + throw new CloudRuntimeException("Caught: " + sql, e); + } + } + + @Override + public Pair, Map> listClusterIdsInPodByVmCount(long podId, long accountId) { + Transaction txn = Transaction.currentTxn(); + PreparedStatement pstmt = null; + List result = new ArrayList(); + Map clusterVmCountMap = new HashMap(); + + StringBuilder sql = new StringBuilder(ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART1); + sql.append("host.pod_id = ?"); + sql.append(ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART2); + try { + pstmt = txn.prepareAutoCloseStatement(sql.toString()); + pstmt.setLong(1, accountId); + pstmt.setLong(2, podId); + + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + Long clusterId = rs.getLong(1); + result.add(clusterId); + clusterVmCountMap.put(clusterId, rs.getDouble(2)); + } + return new Pair, Map>(result, clusterVmCountMap); + } catch (SQLException e) { + throw new CloudRuntimeException("DB Exception on: " + sql, e); + } catch (Throwable e) { + throw new CloudRuntimeException("Caught: " + sql, e); + } + + } + + @Override + public Pair, Map> listPodIdsInZoneByVmCount(long dataCenterId, long accountId) { + Transaction txn = Transaction.currentTxn(); + PreparedStatement pstmt = null; + List result = new ArrayList(); + Map podVmCountMap = new HashMap(); + try { + String sql = ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT; + pstmt = txn.prepareAutoCloseStatement(sql); + pstmt.setLong(1, accountId); + pstmt.setLong(2, dataCenterId); + + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + Long podId = rs.getLong(1); + result.add(podId); + podVmCountMap.put(podId, rs.getDouble(2)); + } + return new Pair, Map>(result, podVmCountMap); + } catch (SQLException e) { + throw new CloudRuntimeException("DB Exception on: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e); + } catch (Throwable e) { + throw new CloudRuntimeException("Caught: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e); + } + } + + @Override + public List listHostIdsByVmCount(long dcId, Long podId, Long clusterId, long accountId) { + Transaction txn = Transaction.currentTxn(); + PreparedStatement pstmt = null; + List result = new ArrayList(); + try { + String sql = ORDER_HOSTS_NUMBER_OF_VMS_FOR_ACCOUNT; + pstmt = txn.prepareAutoCloseStatement(sql); + pstmt.setLong(1, accountId); + pstmt.setLong(2, dcId); + pstmt.setLong(3, podId); + pstmt.setLong(4, clusterId); + + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + result.add(rs.getLong(1)); + } + return result; + } catch (SQLException e) { + throw new CloudRuntimeException("DB Exception on: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e); + } catch (Throwable e) { + throw new CloudRuntimeException("Caught: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e); + } + } + + @Override + public Long countRunningByAccount(long accountId){ + SearchCriteria sc = CountRunningByAccount.create(); + sc.setParameters("account", accountId); + sc.setParameters("state", State.Running); + return customSearch(sc, null).get(0); } } diff --git a/setup/db/db/schema-2213to30.sql b/setup/db/db/schema-2213to30.sql index 612ac951c2e..351d21d233f 100755 --- a/setup/db/db/schema-2213to30.sql +++ b/setup/db/db/schema-2213to30.sql @@ -229,3 +229,7 @@ CREATE TABLE `cloud`.`vm_template_details` ( ALTER TABLE `cloud`.`op_host_capacity` ADD COLUMN `created` datetime; ALTER TABLE `cloud`.`op_host_capacity` ADD COLUMN `update_time` datetime; +INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-server', 'apply.allocation.algorithm.to.pods', 'false', 'If true, deployment planner applies the allocation heuristics at pods first in the given datacenter during VM resource allocation'); +INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-server', 'vm.user.dispersion.weight', 1, 'Weight for user dispersion heuristic (as a value between 0 and 1) applied to resource allocation during vm deployment. Weight for capacity heuristic will be (1 – weight of user dispersion)'); +DELETE FROM configuration WHERE name='use.user.concentrated.pod.allocation'; +UPDATE configuration SET description = '[''random'', ''firstfit'', ''userdispersing'', ''userconcentratedpod''] : Order in which hosts within a cluster will be considered for VM/volume allocation.' WHERE name = 'vm.allocation.algorithm'; \ No newline at end of file