mirror of https://github.com/apache/cloudstack.git
Bug 8791 user dispersing allocator
Changes:
- Added a two new deployment planners 'UserDispersingPlanner' and 'UserConcentratedPodPlanner' to the DeploymentPlanners
- Planner can be chosen by setting the global config variable 'vm.allocation.algorithm' to either of the following values:
('random', 'firstfit', 'userdispersing', 'userconcentratedpod')
- By default, the value is 'random'. When the value is 'random', FirstFitPlanner is invoked as before that shuffles the resource lists.
- Now Admin can choose whether the deployment heuristic should be applied starting at cluster or pod level. This can be done by using the
global config variable 'apply.allocation.algorithm.to.pods' which is false by default. Thus by default as earlier, planner starts at clusters directly.
'UserConcentratedPodPlanner' changes:
- Earlier to 3.0, FirstFitPlanner used to reorder the clusters in case this heuristic was chosen.
- Now this is done by a separate planner and is applied only when 'vm.allocation.algorithm' is set to this planner
- It reorders the capacity based clusters/pods such that those pods having more number of Running Vms for the given account are tried first.
- Note that this userconcentration is applied only to pods and clusters. Not to hosts or storagepools within a cluster.
'UserDispersingPlanner' changes:
- 'UserDispersingPlanner' reorders the capacity ordered pods and clusters based on number of 'Running' VMs for the given account in ascending order. Aim is to choose thodes pods/clusters first which have less number of Running VMs for the given account
- Admin can provide weights to capacity and user dispersion so that both parameters get considered in reordering the pods/clusters. This can be done by setting
the global config parameter 'vm.user.dispersion.weight'. Default value is 1. Thus if this planner is chosen, by default, ordering will be done only by number of Running Vms, unless the weight is changed.
- HostAlllocators and StoragePoolAllocators also reorder the hosts and pools by ascending order of number of Running VMS/ Ready Volumes respectively for the given account. Thus try to choose that host or pool within a cluster with less number of VMs for the account.
This commit is contained in:
parent
36f67762d0
commit
313e6ca284
|
|
@ -73,6 +73,13 @@ public interface DeploymentPlanner extends Adapter {
|
|||
*/
|
||||
boolean canHandle(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid);
|
||||
|
||||
public enum AllocationAlgorithm {
|
||||
random,
|
||||
firstfit,
|
||||
userdispersing,
|
||||
userconcentratedpod;
|
||||
}
|
||||
|
||||
public static class ExcludeList {
|
||||
private Set<Long> _dcIds;
|
||||
private Set<Long> _podIds;
|
||||
|
|
|
|||
|
|
@ -93,6 +93,8 @@
|
|||
</adapters>
|
||||
<adapters key="com.cloud.deploy.DeploymentPlanner">
|
||||
<adapter name="First Fit" class="com.cloud.deploy.FirstFitPlanner"/>
|
||||
<adapter name="UserDispersing" class="com.cloud.deploy.UserDispersingPlanner"/>
|
||||
<adapter name="UserConcentratedPod" class="com.cloud.deploy.UserConcentratedPodPlanner"/>
|
||||
<adapter name="BareMetal Fit" class="com.cloud.deploy.BareMetalPlanner"/>
|
||||
</adapters>
|
||||
<adapters key="com.cloud.network.element.NetworkElement">
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ package com.cloud.agent.manager.allocator.impl;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
|
@ -31,6 +32,7 @@ import com.cloud.agent.manager.allocator.HostAllocator;
|
|||
import com.cloud.capacity.CapacityManager;
|
||||
import com.cloud.configuration.dao.ConfigurationDao;
|
||||
import com.cloud.deploy.DeploymentPlan;
|
||||
import com.cloud.deploy.DeploymentPlanner;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.host.DetailVO;
|
||||
import com.cloud.host.Host;
|
||||
|
|
@ -48,6 +50,7 @@ import com.cloud.storage.GuestOSVO;
|
|||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.storage.dao.GuestOSCategoryDao;
|
||||
import com.cloud.storage.dao.GuestOSDao;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.uservm.UserVm;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.utils.component.ComponentLocator;
|
||||
|
|
@ -99,6 +102,7 @@ public class FirstFitAllocator implements HostAllocator {
|
|||
Long clusterId = plan.getClusterId();
|
||||
ServiceOffering offering = vmProfile.getServiceOffering();
|
||||
VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate();
|
||||
Account account = vmProfile.getOwner();
|
||||
|
||||
if (type == Host.Type.Storage) {
|
||||
// FirstFitAllocator should be used for user VMs only since it won't care whether the host is capable of routing or not
|
||||
|
|
@ -158,13 +162,15 @@ public class FirstFitAllocator implements HostAllocator {
|
|||
|
||||
}
|
||||
|
||||
return allocateTo(offering, template, avoid, clusterHosts, returnUpTo, considerReservedCapacity);
|
||||
return allocateTo(plan, offering, template, avoid, clusterHosts, returnUpTo, considerReservedCapacity, account);
|
||||
}
|
||||
|
||||
protected List<Host> allocateTo(ServiceOffering offering, VMTemplateVO template, ExcludeList avoid, List<HostVO> hosts, int returnUpTo, boolean considerReservedCapacity) {
|
||||
protected List<Host> allocateTo(DeploymentPlan plan, ServiceOffering offering, VMTemplateVO template, ExcludeList avoid, List<HostVO> hosts, int returnUpTo, boolean considerReservedCapacity, Account account) {
|
||||
if (_allocationAlgorithm.equals("random")) {
|
||||
// Shuffle this so that we don't check the hosts in the same order.
|
||||
Collections.shuffle(hosts);
|
||||
}else if(_allocationAlgorithm.equals("userdispersing")){
|
||||
hosts = reorderHostsByNumberOfVms(plan, hosts, account);
|
||||
}
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
|
|
@ -230,6 +236,36 @@ public class FirstFitAllocator implements HostAllocator {
|
|||
return suitableHosts;
|
||||
}
|
||||
|
||||
private List<HostVO> reorderHostsByNumberOfVms(DeploymentPlan plan, List<HostVO> hosts, Account account) {
|
||||
if(account == null){
|
||||
return hosts;
|
||||
}
|
||||
long dcId = plan.getDataCenterId();
|
||||
Long podId = plan.getPodId();
|
||||
Long clusterId = plan.getClusterId();
|
||||
|
||||
List<Long> hostIdsByVmCount = _vmInstanceDao.listHostIdsByVmCount(dcId, podId, clusterId, account.getAccountId());
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("List of hosts in ascending order of number of VMs: "+ hostIdsByVmCount);
|
||||
}
|
||||
|
||||
//now filter the given list of Hosts by this ordered list
|
||||
Map<Long, HostVO> hostMap = new HashMap<Long, HostVO>();
|
||||
for (HostVO host : hosts) {
|
||||
hostMap.put(host.getId(), host);
|
||||
}
|
||||
List<Long> matchingHostIds = new ArrayList<Long>(hostMap.keySet());
|
||||
|
||||
hostIdsByVmCount.retainAll(matchingHostIds);
|
||||
|
||||
List<HostVO> reorderedHosts = new ArrayList<HostVO>();
|
||||
for(Long id: hostIdsByVmCount){
|
||||
reorderedHosts.add(hostMap.get(id));
|
||||
}
|
||||
|
||||
return reorderedHosts;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isVirtualMachineUpgradable(UserVm vm, ServiceOffering offering) {
|
||||
// currently we do no special checks to rule out a VM being upgradable to an offering, so
|
||||
|
|
@ -361,7 +397,9 @@ public class FirstFitAllocator implements HostAllocator {
|
|||
_factor = NumbersUtil.parseFloat(opFactor, 1);
|
||||
|
||||
String allocationAlgorithm = configs.get("vm.allocation.algorithm");
|
||||
if (allocationAlgorithm != null && (allocationAlgorithm.equals("random") || allocationAlgorithm.equals("firstfit"))) {
|
||||
if (allocationAlgorithm != null && (allocationAlgorithm.equals(DeploymentPlanner.AllocationAlgorithm.random.toString())
|
||||
|| allocationAlgorithm.equals(DeploymentPlanner.AllocationAlgorithm.firstfit.toString())
|
||||
|| allocationAlgorithm.equals(DeploymentPlanner.AllocationAlgorithm.userdispersing.toString()))) {
|
||||
_allocationAlgorithm = allocationAlgorithm;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,9 +19,11 @@
|
|||
package com.cloud.capacity.dao;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.capacity.CapacityVO;
|
||||
import com.cloud.capacity.dao.CapacityDaoImpl.SummedCapacity;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
|
||||
public interface CapacityDao extends GenericDao<CapacityVO, Long> {
|
||||
|
|
@ -31,6 +33,9 @@ public interface CapacityDao extends GenericDao<CapacityVO, Long> {
|
|||
boolean removeBy(Short capacityType, Long zoneId, Long podId, Long clusterId, Long hostId);
|
||||
List<SummedCapacity> findByClusterPodZone(Long zoneId, Long podId, Long clusterId);
|
||||
List<SummedCapacity> findNonSharedStorageForClusterPodZone(Long zoneId,Long podId, Long clusterId);
|
||||
List<Long> orderClustersByAggregateCapacity(long id, short capacityType, boolean isZone, float cpuOverprovisioningFactor);
|
||||
List<SummedCapacity> findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId);
|
||||
Pair<List<Long>, Map<Long, Double>> orderClustersByAggregateCapacity(long id, short capacityType, boolean isZone, float cpuOverprovisioningFactor);
|
||||
List<SummedCapacity> findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId);
|
||||
|
||||
List<Long> listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType, float cpuOverprovisioningFactor);
|
||||
Pair<List<Long>, Map<Long, Double>> orderPodsByAggregateCapacity(long zoneId, short capacityType, float cpuOverprovisioningFactor);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,7 +22,9 @@ import java.sql.PreparedStatement;
|
|||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.ejb.Local;
|
||||
|
||||
|
|
@ -33,6 +35,7 @@ import com.cloud.capacity.CapacityVO;
|
|||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.StoragePoolVO;
|
||||
import com.cloud.storage.dao.StoragePoolDaoImpl;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.component.ComponentLocator;
|
||||
import com.cloud.utils.db.Filter;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
|
|
@ -68,9 +71,17 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
|
|||
"AND (a.total_capacity * ? - a.used_capacity) >= ? and a.capacity_type = 1) " +
|
||||
"JOIN op_host_capacity b ON a.host_id = b.host_id AND b.total_capacity - b.used_capacity >= ? AND b.capacity_type = 0";
|
||||
|
||||
private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1 = "SELECT cluster_id FROM `cloud`.`op_host_capacity` WHERE " ;
|
||||
private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1 = "SELECT cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) FROM `cloud`.`op_host_capacity` WHERE " ;
|
||||
private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2 = " AND capacity_type = ? GROUP BY cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) ASC";
|
||||
|
||||
private static final String LIST_PODSINZONE_BY_HOST_CAPACITIES = "SELECT DISTINCT capacity.pod_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod " +
|
||||
" ON (pod.id = capacity.pod_id AND pod.removed is NULL) WHERE " +
|
||||
" capacity.data_center_id = ? AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ? " +
|
||||
" AND pod_id IN (SELECT distinct pod_id FROM `cloud`.`op_host_capacity` WHERE " +
|
||||
" capacity.data_center_id = ? AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ?) ";
|
||||
|
||||
private static final String ORDER_PODS_BY_AGGREGATE_CAPACITY = "SELECT pod_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) FROM `cloud`.`op_host_capacity` WHERE data_center_id = ? " +
|
||||
" AND capacity_type = ? GROUP BY pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) ASC";
|
||||
|
||||
public CapacityDaoImpl() {
|
||||
_hostIdTypeSearch = createSearchBuilder();
|
||||
|
|
@ -380,10 +391,11 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<Long> orderClustersByAggregateCapacity(long id, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor){
|
||||
public Pair<List<Long>, Map<Long, Double>> orderClustersByAggregateCapacity(long id, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor){
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = null;
|
||||
List<Long> result = new ArrayList<Long>();
|
||||
Map<Long, Double> clusterCapacityMap = new HashMap<Long, Double>();
|
||||
|
||||
StringBuilder sql = new StringBuilder(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1);
|
||||
|
||||
|
|
@ -395,15 +407,48 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
|
|||
sql.append(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2);
|
||||
try {
|
||||
pstmt = txn.prepareAutoCloseStatement(sql.toString());
|
||||
pstmt.setLong(1, id);
|
||||
pstmt.setShort(2, capacityTypeForOrdering);
|
||||
|
||||
if(capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_CPU){
|
||||
pstmt.setFloat(3, cpuOverprovisioningFactor);
|
||||
pstmt.setFloat(1, cpuOverprovisioningFactor);
|
||||
pstmt.setFloat(4, cpuOverprovisioningFactor);
|
||||
}else{
|
||||
pstmt.setFloat(3, 1);
|
||||
pstmt.setFloat(1, 1);
|
||||
pstmt.setFloat(4, 1);
|
||||
}
|
||||
|
||||
pstmt.setLong(2, id);
|
||||
pstmt.setShort(3, capacityTypeForOrdering);
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
Long clusterId = rs.getLong(1);
|
||||
result.add(clusterId);
|
||||
clusterCapacityMap.put(clusterId, rs.getDouble(2));
|
||||
}
|
||||
return new Pair<List<Long>, Map<Long, Double>>(result, clusterCapacityMap);
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("DB Exception on: " + sql, e);
|
||||
} catch (Throwable e) {
|
||||
throw new CloudRuntimeException("Caught: " + sql, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Long> listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType, float cpuOverprovisioningFactor) {
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = null;
|
||||
List<Long> result = new ArrayList<Long>();
|
||||
|
||||
StringBuilder sql = new StringBuilder(LIST_PODSINZONE_BY_HOST_CAPACITIES);
|
||||
|
||||
try {
|
||||
pstmt = txn.prepareAutoCloseStatement(sql.toString());
|
||||
pstmt.setLong(1, zoneId);
|
||||
pstmt.setShort(2, CapacityVO.CAPACITY_TYPE_CPU);
|
||||
pstmt.setFloat(3, cpuOverprovisioningFactor);
|
||||
pstmt.setLong(4, requiredCpu);
|
||||
pstmt.setLong(5, zoneId);
|
||||
pstmt.setShort(6, CapacityVO.CAPACITY_TYPE_MEMORY);
|
||||
pstmt.setFloat(7, 1);
|
||||
pstmt.setLong(8, requiredRam);
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
result.add(rs.getLong(1));
|
||||
|
|
@ -414,5 +459,40 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
|
|||
} catch (Throwable e) {
|
||||
throw new CloudRuntimeException("Caught: " + sql, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<List<Long>, Map<Long, Double>> orderPodsByAggregateCapacity(long zoneId, short capacityTypeForOrdering, float cpuOverprovisioningFactor) {
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = null;
|
||||
List<Long> result = new ArrayList<Long>();
|
||||
Map<Long, Double> podCapacityMap = new HashMap<Long, Double>();
|
||||
|
||||
StringBuilder sql = new StringBuilder(ORDER_PODS_BY_AGGREGATE_CAPACITY);
|
||||
try {
|
||||
pstmt = txn.prepareAutoCloseStatement(sql.toString());
|
||||
pstmt.setLong(2, zoneId);
|
||||
pstmt.setShort(3, capacityTypeForOrdering);
|
||||
|
||||
if(capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_CPU){
|
||||
pstmt.setFloat(1, cpuOverprovisioningFactor);
|
||||
pstmt.setFloat(4, cpuOverprovisioningFactor);
|
||||
}else{
|
||||
pstmt.setFloat(1, 1);
|
||||
pstmt.setFloat(4, 1);
|
||||
}
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
Long podId = rs.getLong(1);
|
||||
result.add(podId);
|
||||
podCapacityMap.put(podId, rs.getDouble(2));
|
||||
}
|
||||
return new Pair<List<Long>, Map<Long, Double>>(result, podCapacityMap);
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("DB Exception on: " + sql, e);
|
||||
} catch (Throwable e) {
|
||||
throw new CloudRuntimeException("Caught: " + sql, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -209,8 +209,10 @@ public enum Config {
|
|||
|
||||
ControlCidr("Advanced", ManagementServer.class, String.class, "control.cidr", "169.254.0.0/16", "Changes the cidr for the control network traffic. Defaults to using link local. Must be unique within pods", null),
|
||||
ControlGateway("Advanced", ManagementServer.class, String.class, "control.gateway", "169.254.0.1", "gateway for the control network traffic", null),
|
||||
UseUserConcentratedPodAllocation("Advanced", ManagementServer.class, Boolean.class, "use.user.concentrated.pod.allocation", "true", "If true, deployment planner applies the user concentration heuristic during VM resource allocation", "true,false"),
|
||||
HostCapacityTypeToOrderClusters("Advanced", ManagementServer.class, String.class, "host.capacityType.to.order.clusters", "CPU", "The host capacity type (CPU or RAM) is used by deployment planner to order clusters during VM resource allocation", "CPU,RAM"),
|
||||
ApplyAllocationAlgorithmToPods("Advanced", ManagementServer.class, Boolean.class, "apply.allocation.algorithm.to.pods", "false", "If true, deployment planner applies the allocation heuristics at pods first in the given datacenter during VM resource allocation", "true,false"),
|
||||
VmUserDispersionWeight("Advanced", ManagementServer.class, Float.class, "vm.user.dispersion.weight", "1", "Weight for user dispersion heuristic (as a value between 0 and 1) applied to resource allocation during vm deployment. Weight for capacity heuristic will be (1 – weight of user dispersion)", null),
|
||||
VmAllocationAlgorithm("Advanced", ManagementServer.class, String.class, "vm.allocation.algorithm", "random", "'random', 'firstfit', 'userdispersing', 'userconcentratedpod' : Order in which hosts within a cluster will be considered for VM/volume allocation.", null),
|
||||
EndpointeUrl("Advanced", ManagementServer.class, String.class, "endpointe.url", "http://localhost:8080/client/api", "Endpointe Url", "The endpoint callback URL"),
|
||||
ElasticLoadBalancerEnabled("Advanced", ManagementServer.class, String.class, "network.loadbalancer.basiczone.elb.enabled", "false", "Whether the load balancing service is enabled for basic zones", "true,false"),
|
||||
ElasticLoadBalancerNetwork("Advanced", ManagementServer.class, String.class, "network.loadbalancer.basiczone.elb.network", "guest", "Whether the elastic load balancing service public ips are taken from the public or guest network", "guest,public"),
|
||||
|
|
@ -226,7 +228,6 @@ public enum Config {
|
|||
OvmGuestNetwork("Advanced", ManagementServer.class, String.class, "ovm.guest.network.device", null, "Specify the private bridge on host for private network", null),
|
||||
|
||||
// XenServer
|
||||
VmAllocationAlgorithm("Advanced", ManagementServer.class, String.class, "vm.allocation.algorithm", "random", "If 'random', hosts within a pod will be randomly considered for VM/volume allocation. If 'firstfit', they will be considered on a first-fit basis.", null),
|
||||
XenPublicNetwork("Network", ManagementServer.class, String.class, "xen.public.network.device", null, "[ONLY IF THE PUBLIC NETWORK IS ON A DEDICATED NIC]:The network name label of the physical device dedicated to the public network on a XenServer host", null),
|
||||
XenStorageNetwork1("Network", ManagementServer.class, String.class, "xen.storage.network.device1", "cloud-stor1", "Specify when there are storage networks", null),
|
||||
XenStorageNetwork2("Network", ManagementServer.class, String.class, "xen.storage.network.device2", "cloud-stor2", "Specify when there are storage networks", null),
|
||||
|
|
|
|||
|
|
@ -18,10 +18,9 @@
|
|||
|
||||
package com.cloud.dc.dao;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Vector;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
||||
import com.cloud.dc.HostPodVO;
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
|
||||
|
|
@ -30,6 +29,8 @@ public interface HostPodDao extends GenericDao<HostPodVO, Long> {
|
|||
|
||||
public HostPodVO findByName(String name, long dcId);
|
||||
|
||||
public HashMap<Long, List<Object>> getCurrentPodCidrSubnets(long zoneId, long podIdToSkip);
|
||||
public HashMap<Long, List<Object>> getCurrentPodCidrSubnets(long zoneId, long podIdToSkip);
|
||||
|
||||
public List<Long> listDisabledPods(long zoneId);
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,9 +30,12 @@ import javax.ejb.Local;
|
|||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.dc.HostPodVO;
|
||||
import com.cloud.org.Grouping;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.GenericSearchBuilder;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
|
||||
@Local(value={HostPodDao.class})
|
||||
|
|
@ -40,7 +43,7 @@ public class HostPodDaoImpl extends GenericDaoBase<HostPodVO, Long> implements H
|
|||
private static final Logger s_logger = Logger.getLogger(HostPodDaoImpl.class);
|
||||
|
||||
protected SearchBuilder<HostPodVO> DataCenterAndNameSearch;
|
||||
protected SearchBuilder<HostPodVO> DataCenterIdSearch;
|
||||
protected SearchBuilder<HostPodVO> DataCenterIdSearch;
|
||||
|
||||
protected HostPodDaoImpl() {
|
||||
DataCenterAndNameSearch = createSearchBuilder();
|
||||
|
|
@ -50,7 +53,7 @@ public class HostPodDaoImpl extends GenericDaoBase<HostPodVO, Long> implements H
|
|||
|
||||
DataCenterIdSearch = createSearchBuilder();
|
||||
DataCenterIdSearch.and("dcId", DataCenterIdSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
|
||||
DataCenterIdSearch.done();
|
||||
DataCenterIdSearch.done();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -111,5 +114,21 @@ public class HostPodDaoImpl extends GenericDaoBase<HostPodVO, Long> implements H
|
|||
boolean result = super.remove(id);
|
||||
txn.commit();
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Long> listDisabledPods(long zoneId) {
|
||||
GenericSearchBuilder<HostPodVO, Long> podIdSearch = createSearchBuilder(Long.class);
|
||||
podIdSearch.selectField(podIdSearch.entity().getId());
|
||||
podIdSearch.and("dataCenterId", podIdSearch.entity().getDataCenterId(), Op.EQ);
|
||||
podIdSearch.and("allocationState", podIdSearch.entity().getAllocationState(), Op.EQ);
|
||||
podIdSearch.done();
|
||||
|
||||
|
||||
SearchCriteria<Long> sc = podIdSearch.create();
|
||||
sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId);
|
||||
sc.addAnd("allocationState", SearchCriteria.Op.EQ, Grouping.AllocationState.Disabled);
|
||||
return customSearch(sc, null);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
import javax.ejb.Local;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
|
|
@ -46,7 +47,6 @@ import com.cloud.dc.Pod;
|
|||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.dc.dao.DataCenterDao;
|
||||
import com.cloud.dc.dao.HostPodDao;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.exception.InsufficientServerCapacityException;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.HostVO;
|
||||
|
|
@ -105,13 +105,13 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
protected Adapters<StoragePoolAllocator> _storagePoolAllocators;
|
||||
@Inject(adapter=HostAllocator.class)
|
||||
protected Adapters<HostAllocator> _hostAllocators;
|
||||
protected String _allocationAlgorithm = "random";
|
||||
|
||||
|
||||
@Override
|
||||
public DeployDestination plan(VirtualMachineProfile<? extends VirtualMachine> vmProfile,
|
||||
DeploymentPlan plan, ExcludeList avoid)
|
||||
throws InsufficientServerCapacityException {
|
||||
String _allocationAlgorithm = _configDao.getValue(Config.VmAllocationAlgorithm.key());
|
||||
VirtualMachine vm = vmProfile.getVirtualMachine();
|
||||
ServiceOffering offering = vmProfile.getServiceOffering();
|
||||
DataCenter dc = _dcDao.findById(vm.getDataCenterIdToDeployIn());
|
||||
|
|
@ -123,7 +123,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("In FirstFitPlanner:: plan");
|
||||
s_logger.debug("DeploymentPlanner allocation algorithm: "+_allocationAlgorithm);
|
||||
|
||||
s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() +
|
||||
", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested);
|
||||
|
|
@ -228,7 +228,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
ClusterVO cluster = _clusterDao.findById(plan.getClusterId());
|
||||
if (cluster != null ){
|
||||
clusterList.add(clusterIdSpecified);
|
||||
return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc, _allocationAlgorithm);
|
||||
return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc);
|
||||
}else{
|
||||
s_logger.debug("The specified cluster cannot be found, returning.");
|
||||
avoid.addCluster(plan.getClusterId());
|
||||
|
|
@ -241,98 +241,164 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
|
||||
HostPodVO pod = _podDao.findById(podIdSpecified);
|
||||
if (pod != null) {
|
||||
//list clusters under this pod by cpu and ram capacity
|
||||
clusterList = listClustersByCapacity(podIdSpecified, cpu_requested, ram_requested, avoid, false, cpuOverprovisioningFactor);
|
||||
if(!clusterList.isEmpty()){
|
||||
if(avoid.getClustersToAvoid() != null){
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Removing from the clusterId list these clusters from avoid set: "+ avoid.getClustersToAvoid());
|
||||
}
|
||||
clusterList.removeAll(avoid.getClustersToAvoid());
|
||||
}
|
||||
|
||||
List<Long> disabledClusters = listDisabledClusters(plan.getDataCenterId(), podIdSpecified);
|
||||
if(!disabledClusters.isEmpty()){
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Removing from the clusterId list these clusters that are disabled: "+ disabledClusters);
|
||||
}
|
||||
clusterList.removeAll(disabledClusters);
|
||||
}
|
||||
|
||||
DeployDestination dest = checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc, _allocationAlgorithm);
|
||||
if(dest == null){
|
||||
avoid.addPod(plan.getPodId());
|
||||
}
|
||||
return dest;
|
||||
}else{
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("No clusters found under this pod, having a host with enough capacity, returning.");
|
||||
}
|
||||
DeployDestination dest = scanClustersForDestinationInZoneOrPod(podIdSpecified, false, vmProfile, plan, avoid);
|
||||
if(dest == null){
|
||||
avoid.addPod(plan.getPodId());
|
||||
return null;
|
||||
}
|
||||
return dest;
|
||||
} else {
|
||||
s_logger.debug("The specified Pod cannot be found, returning.");
|
||||
avoid.addPod(plan.getPodId());
|
||||
return null;
|
||||
}
|
||||
}else{
|
||||
//consider all clusters under this zone.
|
||||
s_logger.debug("Searching all possible resources under this Zone: "+ plan.getDataCenterId());
|
||||
//list clusters under this zone by cpu and ram capacity
|
||||
List<Long> prioritizedClusterIds = listClustersByCapacity(plan.getDataCenterId(), cpu_requested, ram_requested, avoid, true, cpuOverprovisioningFactor);
|
||||
if(!prioritizedClusterIds.isEmpty()){
|
||||
if(avoid.getClustersToAvoid() != null){
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Removing from the clusterId list these clusters from avoid set: "+ avoid.getClustersToAvoid());
|
||||
}
|
||||
prioritizedClusterIds.removeAll(avoid.getClustersToAvoid());
|
||||
}
|
||||
List<Long> disabledClusters = listDisabledClusters(plan.getDataCenterId(), null);
|
||||
if(!disabledClusters.isEmpty()){
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Removing from the clusterId list these clusters that are disabled/clusters under disabled pods: "+ disabledClusters);
|
||||
}
|
||||
prioritizedClusterIds.removeAll(disabledClusters);
|
||||
}
|
||||
|
||||
boolean applyAllocationAtPods = Boolean.parseBoolean(_configDao.getValue(Config.ApplyAllocationAlgorithmToPods.key()));
|
||||
if(applyAllocationAtPods){
|
||||
//start scan at all pods under this zone.
|
||||
return scanPodsForDestination(vmProfile, plan, avoid);
|
||||
}else{
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("No clusters found having a host with enough capacity, returning.");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
if(!prioritizedClusterIds.isEmpty()){
|
||||
boolean applyUserConcentrationPodHeuristic = Boolean.parseBoolean(_configDao.getValue(Config.UseUserConcentratedPodAllocation.key()));
|
||||
if(applyUserConcentrationPodHeuristic && vmProfile.getOwner() != null){
|
||||
//user has VMs in certain pods. - prioritize those pods first
|
||||
//UserConcentratedPod strategy
|
||||
long accountId = vmProfile.getOwner().getAccountId();
|
||||
List<Long> podIds = listPodsByUserConcentration(plan.getDataCenterId(), accountId);
|
||||
if(!podIds.isEmpty()){
|
||||
if(avoid.getPodsToAvoid() != null){
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Removing from the pod list these pods from avoid set: "+ avoid.getPodsToAvoid());
|
||||
}
|
||||
podIds.removeAll(avoid.getPodsToAvoid());
|
||||
}
|
||||
clusterList = reorderClustersByPods(prioritizedClusterIds, podIds);
|
||||
}else{
|
||||
clusterList = prioritizedClusterIds;
|
||||
}
|
||||
}else{
|
||||
clusterList = prioritizedClusterIds;
|
||||
}
|
||||
return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc, _allocationAlgorithm);
|
||||
}else{
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("No clusters found after removing disabled clusters and clusters in avoid list, returning.");
|
||||
}
|
||||
return null;
|
||||
//start scan at clusters under this zone.
|
||||
return scanClustersForDestinationInZoneOrPod(plan.getDataCenterId(), true, vmProfile, plan, avoid);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private DeployDestination scanPodsForDestination(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid){
|
||||
|
||||
ServiceOffering offering = vmProfile.getServiceOffering();
|
||||
int requiredCpu = offering.getCpu() * offering.getSpeed();
|
||||
long requiredRam = offering.getRamSize() * 1024L * 1024L;
|
||||
String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key());
|
||||
float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1);
|
||||
|
||||
//list pods under this zone by cpu and ram capacity
|
||||
List<Long> prioritizedPodIds = new ArrayList<Long>();
|
||||
Pair<List<Long>, Map<Long, Double>> podCapacityInfo = listPodsByCapacity(plan.getDataCenterId(), requiredCpu, requiredRam, cpuOverprovisioningFactor);
|
||||
List<Long> podsWithCapacity = podCapacityInfo.first();
|
||||
|
||||
if(!podsWithCapacity.isEmpty()){
|
||||
if(avoid.getPodsToAvoid() != null){
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Removing from the podId list these pods from avoid set: "+ avoid.getPodsToAvoid());
|
||||
}
|
||||
podsWithCapacity.removeAll(avoid.getPodsToAvoid());
|
||||
}
|
||||
List<Long> disabledPods = listDisabledPods(plan.getDataCenterId());
|
||||
if(!disabledPods.isEmpty()){
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Removing from the podId list these pods that are disabled: "+ disabledPods);
|
||||
}
|
||||
podsWithCapacity.removeAll(disabledPods);
|
||||
}
|
||||
}else{
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("No pods found having a host with enough capacity, returning.");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
if(!podsWithCapacity.isEmpty()){
|
||||
|
||||
prioritizedPodIds = reorderPods(podCapacityInfo, vmProfile, plan);
|
||||
|
||||
//loop over pods
|
||||
for(Long podId : prioritizedPodIds){
|
||||
s_logger.debug("Checking resources under Pod: "+podId);
|
||||
DeployDestination dest = scanClustersForDestinationInZoneOrPod(podId, false, vmProfile, plan, avoid);
|
||||
if(dest != null){
|
||||
return dest;
|
||||
}
|
||||
avoid.addPod(podId);
|
||||
}
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("No Pods found for destination, returning.");
|
||||
}
|
||||
return null;
|
||||
}else{
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("No Pods found after removing disabled pods and pods in avoid list, returning.");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private DeployDestination scanClustersForDestinationInZoneOrPod(long id, boolean isZone, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid){
|
||||
|
||||
VirtualMachine vm = vmProfile.getVirtualMachine();
|
||||
ServiceOffering offering = vmProfile.getServiceOffering();
|
||||
DataCenter dc = _dcDao.findById(vm.getDataCenterIdToDeployIn());
|
||||
int requiredCpu = offering.getCpu() * offering.getSpeed();
|
||||
long requiredRam = offering.getRamSize() * 1024L * 1024L;
|
||||
String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key());
|
||||
float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1);
|
||||
|
||||
//list clusters under this zone by cpu and ram capacity
|
||||
Pair<List<Long>, Map<Long, Double>> clusterCapacityInfo = listClustersByCapacity(id, requiredCpu, requiredRam, avoid, isZone, cpuOverprovisioningFactor);
|
||||
List<Long> prioritizedClusterIds = clusterCapacityInfo.first();
|
||||
if(!prioritizedClusterIds.isEmpty()){
|
||||
if(avoid.getClustersToAvoid() != null){
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Removing from the clusterId list these clusters from avoid set: "+ avoid.getClustersToAvoid());
|
||||
}
|
||||
prioritizedClusterIds.removeAll(avoid.getClustersToAvoid());
|
||||
}
|
||||
|
||||
List<Long> disabledClusters = new ArrayList<Long>();
|
||||
if(isZone){
|
||||
disabledClusters = listDisabledClusters(plan.getDataCenterId(), null);
|
||||
}else{
|
||||
disabledClusters = listDisabledClusters(plan.getDataCenterId(), id);
|
||||
}
|
||||
if(!disabledClusters.isEmpty()){
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Removing from the clusterId list these clusters that are disabled/clusters under disabled pods: "+ disabledClusters);
|
||||
}
|
||||
prioritizedClusterIds.removeAll(disabledClusters);
|
||||
}
|
||||
}else{
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("No clusters found having a host with enough capacity, returning.");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
if(!prioritizedClusterIds.isEmpty()){
|
||||
List<Long> clusterList = reorderClusters(id, isZone, clusterCapacityInfo, vmProfile, plan);
|
||||
return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc);
|
||||
}else{
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("No clusters found after removing disabled clusters and clusters in avoid list, returning.");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This method should reorder the given list of Cluster Ids by applying any necessary heuristic
|
||||
* for this planner
|
||||
* For FirstFitPlanner there is no specific heuristic to be applied
|
||||
* other than the capacity based ordering which is done by default.
|
||||
* @return List<Long> ordered list of Cluster Ids
|
||||
*/
|
||||
protected List<Long> reorderClusters(long id, boolean isZone, Pair<List<Long>, Map<Long, Double>> clusterCapacityInfo, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
|
||||
List<Long> reordersClusterIds = clusterCapacityInfo.first();
|
||||
return reordersClusterIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* This method should reorder the given list of Pod Ids by applying any necessary heuristic
|
||||
* for this planner
|
||||
* For FirstFitPlanner there is no specific heuristic to be applied
|
||||
* other than the capacity based ordering which is done by default.
|
||||
* @return List<Long> ordered list of Pod Ids
|
||||
*/
|
||||
protected List<Long> reorderPods(Pair<List<Long>, Map<Long, Double>> podCapacityInfo, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
|
||||
List<Long> podIdsByCapacity = podCapacityInfo.first();
|
||||
return podIdsByCapacity;
|
||||
}
|
||||
|
||||
private List<Long> listDisabledClusters(long zoneId, Long podId){
|
||||
List<Long> disabledClusters = _clusterDao.listDisabledClusters(zoneId, podId);
|
||||
if(podId == null){
|
||||
|
|
@ -343,6 +409,11 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
return disabledClusters;
|
||||
}
|
||||
|
||||
private List<Long> listDisabledPods(long zoneId){
|
||||
List<Long> disabledPods = _podDao.listDisabledPods(zoneId);
|
||||
return disabledPods;
|
||||
}
|
||||
|
||||
private Map<Short,Float> getCapacityThresholdMap(){
|
||||
// Lets build this real time so that the admin wont have to restart MS if he changes these values
|
||||
Map<Short,Float> disableThresholdMap = new HashMap<Short, Float>();
|
||||
|
|
@ -409,7 +480,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
}
|
||||
|
||||
private DeployDestination checkClustersforDestination(List<Long> clusterList, VirtualMachineProfile<? extends VirtualMachine> vmProfile,
|
||||
DeploymentPlan plan, ExcludeList avoid, DataCenter dc, String _allocationAlgorithm){
|
||||
DeploymentPlan plan, ExcludeList avoid, DataCenter dc){
|
||||
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("ClusterId List to consider: " + clusterList);
|
||||
|
|
@ -475,56 +546,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
return null;
|
||||
}
|
||||
|
||||
private List<Long> reorderClustersByPods(List<Long> clusterIds, List<Long> podIds) {
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Reordering cluster list as per pods ordered by user concentration");
|
||||
}
|
||||
|
||||
Map<Long, List<Long>> podClusterMap = _clusterDao.getPodClusterIdMap(clusterIds);
|
||||
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Pod To cluster Map is: "+podClusterMap );
|
||||
}
|
||||
|
||||
List<Long> reorderedClusters = new ArrayList<Long>();
|
||||
for (Long pod : podIds){
|
||||
if(podClusterMap.containsKey(pod)){
|
||||
List<Long> clustersOfThisPod = podClusterMap.get(pod);
|
||||
if(clustersOfThisPod != null){
|
||||
for(Long clusterId : clusterIds){
|
||||
if(clustersOfThisPod.contains(clusterId)){
|
||||
reorderedClusters.add(clusterId);
|
||||
}
|
||||
}
|
||||
clusterIds.removeAll(clustersOfThisPod);
|
||||
}
|
||||
}
|
||||
}
|
||||
reorderedClusters.addAll(clusterIds);
|
||||
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Reordered cluster list: " + reorderedClusters);
|
||||
}
|
||||
return reorderedClusters;
|
||||
}
|
||||
|
||||
protected List<Long> listPodsByUserConcentration(long zoneId, long accountId){
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Applying UserConcentratedPod heuristic for account: "+ accountId);
|
||||
}
|
||||
|
||||
List<Long> prioritizedPods = _vmDao.listPodIdsHavingVmsforAccount(zoneId, accountId);
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("List of pods to be considered, after applying UserConcentratedPod heuristic: "+ prioritizedPods);
|
||||
}
|
||||
|
||||
return prioritizedPods;
|
||||
}
|
||||
|
||||
protected List<Long> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone, float cpuOverprovisioningFactor){
|
||||
protected Pair<List<Long>, Map<Long, Double>> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone, float cpuOverprovisioningFactor){
|
||||
//look at the aggregate available cpu and ram per cluster
|
||||
//although an aggregate value may be false indicator that a cluster can host a vm, it will at the least eliminate those clusters which definitely cannot
|
||||
|
||||
|
|
@ -545,7 +568,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("ClusterId List having enough CPU and RAM capacity: " + clusterIdswithEnoughCapacity);
|
||||
}
|
||||
List<Long> clusterIdsOrderedByAggregateCapacity = _capacityDao.orderClustersByAggregateCapacity(id, capacityType, isZone, cpuOverprovisioningFactor);
|
||||
Pair<List<Long>, Map<Long, Double>> result = _capacityDao.orderClustersByAggregateCapacity(id, capacityType, isZone, cpuOverprovisioningFactor);
|
||||
List<Long> clusterIdsOrderedByAggregateCapacity = result.first();
|
||||
//only keep the clusters that have enough capacity to host this VM
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("ClusterId List in order of aggregate capacity: " + clusterIdsOrderedByAggregateCapacity);
|
||||
|
|
@ -556,9 +580,47 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
s_logger.trace("ClusterId List having enough CPU and RAM capacity & in order of aggregate capacity: " + clusterIdsOrderedByAggregateCapacity);
|
||||
}
|
||||
|
||||
return clusterIdsOrderedByAggregateCapacity;
|
||||
return result;
|
||||
|
||||
}
|
||||
|
||||
protected Pair<List<Long>, Map<Long, Double>> listPodsByCapacity(long zoneId, int requiredCpu, long requiredRam, float cpuOverprovisioningFactor){
|
||||
//look at the aggregate available cpu and ram per pod
|
||||
//although an aggregate value may be false indicator that a pod can host a vm, it will at the least eliminate those pods which definitely cannot
|
||||
|
||||
//we need pods having enough cpu AND RAM to host this particular VM and order them by aggregate pod capacity
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Listing pods in order of aggregate capacity, that have (atleast one host with) enough CPU and RAM capacity under this Zone: "+zoneId);
|
||||
}
|
||||
String capacityTypeToOrder = _configDao.getValue(Config.HostCapacityTypeToOrderClusters.key());
|
||||
short capacityType = CapacityVO.CAPACITY_TYPE_CPU;
|
||||
if("RAM".equalsIgnoreCase(capacityTypeToOrder)){
|
||||
capacityType = CapacityVO.CAPACITY_TYPE_MEMORY;
|
||||
}
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("CPUOverprovisioningFactor considered: " + cpuOverprovisioningFactor);
|
||||
}
|
||||
List<Long> podIdswithEnoughCapacity = _capacityDao.listPodsByHostCapacities(zoneId, requiredCpu, requiredRam, capacityType, cpuOverprovisioningFactor);
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("PodId List having enough CPU and RAM capacity: " + podIdswithEnoughCapacity);
|
||||
}
|
||||
Pair<List<Long>, Map<Long, Double>> result = _capacityDao.orderPodsByAggregateCapacity(zoneId, capacityType, cpuOverprovisioningFactor);
|
||||
List<Long> podIdsOrderedByAggregateCapacity = result.first();
|
||||
//only keep the clusters that have enough capacity to host this VM
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("PodId List in order of aggregate capacity: " + podIdsOrderedByAggregateCapacity);
|
||||
}
|
||||
podIdsOrderedByAggregateCapacity.retainAll(podIdswithEnoughCapacity);
|
||||
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("PodId List having enough CPU and RAM capacity & in order of aggregate capacity: " + podIdsOrderedByAggregateCapacity);
|
||||
}
|
||||
|
||||
return result;
|
||||
|
||||
}
|
||||
|
||||
|
||||
protected Pair<Host, Map<Volume, StoragePool>> findPotentialDeploymentResources(List<Host> suitableHosts, Map<Volume, List<StoragePool>> suitableVolumeStoragePools){
|
||||
s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM");
|
||||
|
|
@ -706,7 +768,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
Enumeration<StoragePoolAllocator> enPool = _storagePoolAllocators.enumeration();
|
||||
while (enPool.hasMoreElements()) {
|
||||
final StoragePoolAllocator allocator = enPool.nextElement();
|
||||
final List<StoragePool> suitablePools = allocator.allocateToPool(diskProfile, vmProfile.getTemplate(), plan, avoid, returnUpTo);
|
||||
final List<StoragePool> suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo);
|
||||
if (suitablePools != null && !suitablePools.isEmpty()) {
|
||||
suitableVolumeStoragePools.put(toBeCreated, suitablePools);
|
||||
foundPotentialPools = true;
|
||||
|
|
@ -740,8 +802,21 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
|||
|
||||
@Override
|
||||
public boolean canHandle(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid) {
|
||||
return vm.getHypervisorType() != HypervisorType.BareMetal;
|
||||
if(vm.getHypervisorType() != HypervisorType.BareMetal){
|
||||
//check the allocation strategy
|
||||
if (_allocationAlgorithm != null && (_allocationAlgorithm.equals(AllocationAlgorithm.random.toString()) || _allocationAlgorithm.equals(AllocationAlgorithm.firstfit.toString()))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||
super.configure(name, params);
|
||||
_allocationAlgorithm = _configDao.getValue(Config.VmAllocationAlgorithm.key());
|
||||
return true;
|
||||
}
|
||||
|
||||
private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId){
|
||||
// Check if the zone exists in the system
|
||||
|
|
|
|||
|
|
@ -0,0 +1,157 @@
|
|||
/**
|
||||
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the GNU General Public License v3 or later.
|
||||
*
|
||||
* It is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or any later version.
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
|
||||
package com.cloud.deploy;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.ejb.Local;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
|
||||
@Local(value=DeploymentPlanner.class)
|
||||
public class UserConcentratedPodPlanner extends FirstFitPlanner implements DeploymentPlanner {
|
||||
|
||||
private static final Logger s_logger = Logger.getLogger(UserConcentratedPodPlanner.class);
|
||||
|
||||
/**
|
||||
* This method should reorder the given list of Cluster Ids by applying any necessary heuristic
|
||||
* for this planner
|
||||
* For UserConcentratedPodPlanner we need to order the clusters in a zone across pods, by considering those pods first which have more number of VMs for this account
|
||||
* This reordering is not done incase the clusters within single pod are passed when the allocation is applied at pod-level.
|
||||
* @return List<Long> ordered list of Cluster Ids
|
||||
*/
|
||||
@Override
|
||||
protected List<Long> reorderClusters(long id, boolean isZone, Pair<List<Long>, Map<Long, Double>> clusterCapacityInfo, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
|
||||
List<Long> clusterIdsByCapacity = clusterCapacityInfo.first();
|
||||
if(vmProfile.getOwner() == null || !isZone){
|
||||
return clusterIdsByCapacity;
|
||||
}
|
||||
return applyUserConcentrationPodHeuristicToClusters(id, clusterIdsByCapacity, vmProfile.getOwner().getAccountId());
|
||||
}
|
||||
|
||||
private List<Long> applyUserConcentrationPodHeuristicToClusters(long zoneId, List<Long> prioritizedClusterIds, long accountId){
|
||||
//user has VMs in certain pods. - prioritize those pods first
|
||||
//UserConcentratedPod strategy
|
||||
List<Long> clusterList = new ArrayList<Long>();
|
||||
List<Long> podIds = listPodsByUserConcentration(zoneId, accountId);
|
||||
if(!podIds.isEmpty()){
|
||||
clusterList = reorderClustersByPods(prioritizedClusterIds, podIds);
|
||||
}else{
|
||||
clusterList = prioritizedClusterIds;
|
||||
}
|
||||
return clusterList;
|
||||
}
|
||||
|
||||
private List<Long> reorderClustersByPods(List<Long> clusterIds, List<Long> podIds) {
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Reordering cluster list as per pods ordered by user concentration");
|
||||
}
|
||||
|
||||
Map<Long, List<Long>> podClusterMap = _clusterDao.getPodClusterIdMap(clusterIds);
|
||||
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Pod To cluster Map is: "+podClusterMap );
|
||||
}
|
||||
|
||||
List<Long> reorderedClusters = new ArrayList<Long>();
|
||||
for (Long pod : podIds){
|
||||
if(podClusterMap.containsKey(pod)){
|
||||
List<Long> clustersOfThisPod = podClusterMap.get(pod);
|
||||
if(clustersOfThisPod != null){
|
||||
for(Long clusterId : clusterIds){
|
||||
if(clustersOfThisPod.contains(clusterId)){
|
||||
reorderedClusters.add(clusterId);
|
||||
}
|
||||
}
|
||||
clusterIds.removeAll(clustersOfThisPod);
|
||||
}
|
||||
}
|
||||
}
|
||||
reorderedClusters.addAll(clusterIds);
|
||||
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Reordered cluster list: " + reorderedClusters);
|
||||
}
|
||||
return reorderedClusters;
|
||||
}
|
||||
|
||||
protected List<Long> listPodsByUserConcentration(long zoneId, long accountId){
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Applying UserConcentratedPod heuristic for account: "+ accountId);
|
||||
}
|
||||
|
||||
List<Long> prioritizedPods = _vmDao.listPodIdsHavingVmsforAccount(zoneId, accountId);
|
||||
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("List of pods to be considered, after applying UserConcentratedPod heuristic: "+ prioritizedPods);
|
||||
}
|
||||
|
||||
return prioritizedPods;
|
||||
}
|
||||
|
||||
/**
|
||||
* This method should reorder the given list of Pod Ids by applying any necessary heuristic
|
||||
* for this planner
|
||||
* For UserConcentratedPodPlanner we need to order the pods by considering those pods first which have more number of VMs for this account
|
||||
* @return List<Long> ordered list of Pod Ids
|
||||
*/
|
||||
@Override
|
||||
protected List<Long> reorderPods(Pair<List<Long>, Map<Long, Double>> podCapacityInfo, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
|
||||
List<Long> podIdsByCapacity = podCapacityInfo.first();
|
||||
if(vmProfile.getOwner() == null){
|
||||
return podIdsByCapacity;
|
||||
}
|
||||
long accountId = vmProfile.getOwner().getAccountId();
|
||||
|
||||
//user has VMs in certain pods. - prioritize those pods first
|
||||
//UserConcentratedPod strategy
|
||||
List<Long> podIds = listPodsByUserConcentration(plan.getDataCenterId(), accountId);
|
||||
if(!podIds.isEmpty()){
|
||||
//remove pods that dont have capacity for this vm
|
||||
podIds.retainAll(podIdsByCapacity);
|
||||
podIdsByCapacity.removeAll(podIds);
|
||||
podIds.addAll(podIdsByCapacity);
|
||||
return podIds;
|
||||
}else{
|
||||
return podIdsByCapacity;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canHandle(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid) {
|
||||
if(vm.getHypervisorType() != HypervisorType.BareMetal){
|
||||
//check the allocation strategy
|
||||
if (_allocationAlgorithm != null && _allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod.toString())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,221 @@
|
|||
/**
|
||||
* Copyright (C) 2011 Citrix Systems, Inc. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the GNU General Public License v3 or later.
|
||||
*
|
||||
* It is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or any later version.
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
|
||||
package com.cloud.deploy;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import javax.ejb.Local;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
|
||||
@Local(value=DeploymentPlanner.class)
|
||||
public class UserDispersingPlanner extends FirstFitPlanner implements DeploymentPlanner {
|
||||
|
||||
private static final Logger s_logger = Logger.getLogger(UserDispersingPlanner.class);
|
||||
|
||||
/**
|
||||
* This method should reorder the given list of Cluster Ids by applying any necessary heuristic
|
||||
* for this planner
|
||||
* For UserDispersingPlanner we need to order the clusters by considering the number of VMs for this account
|
||||
* @return List<Long> ordered list of Cluster Ids
|
||||
*/
|
||||
@Override
|
||||
protected List<Long> reorderClusters(long id, boolean isZone, Pair<List<Long>, Map<Long, Double>> clusterCapacityInfo, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
|
||||
List<Long> clusterIdsByCapacity = clusterCapacityInfo.first();
|
||||
if(vmProfile.getOwner() == null){
|
||||
return clusterIdsByCapacity;
|
||||
}
|
||||
long accountId = vmProfile.getOwner().getAccountId();
|
||||
Pair<List<Long>, Map<Long, Double>> clusterIdsVmCountInfo = listClustersByUserDispersion(id, isZone, accountId);
|
||||
|
||||
//now we have 2 cluster lists - one ordered by capacity and the other by number of VMs for this account
|
||||
//need to apply weights to these to find the correct ordering to follow
|
||||
|
||||
if(_userDispersionWeight == 1.0f){
|
||||
List<Long> clusterIds = clusterIdsVmCountInfo.first();
|
||||
clusterIds.retainAll(clusterIdsByCapacity);
|
||||
return clusterIds;
|
||||
}else{
|
||||
//apply weights to the two lists
|
||||
return orderByApplyingWeights(clusterCapacityInfo, clusterIdsVmCountInfo, accountId);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* This method should reorder the given list of Pod Ids by applying any necessary heuristic
|
||||
* for this planner
|
||||
* For UserDispersingPlanner we need to order the pods by considering the number of VMs for this account
|
||||
* @return List<Long> ordered list of Pod Ids
|
||||
*/
|
||||
@Override
|
||||
protected List<Long> reorderPods(Pair<List<Long>, Map<Long, Double>> podCapacityInfo, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
|
||||
List<Long> podIdsByCapacity = podCapacityInfo.first();
|
||||
if(vmProfile.getOwner() == null){
|
||||
return podIdsByCapacity;
|
||||
}
|
||||
long accountId = vmProfile.getOwner().getAccountId();
|
||||
|
||||
Pair<List<Long>, Map<Long, Double>> podIdsVmCountInfo = listPodsByUserDispersion(plan.getDataCenterId(), accountId);
|
||||
|
||||
//now we have 2 pod lists - one ordered by capacity and the other by number of VMs for this account
|
||||
//need to apply weights to these to find the correct ordering to follow
|
||||
|
||||
if(_userDispersionWeight == 1.0f){
|
||||
List<Long> podIds = podIdsVmCountInfo.first();
|
||||
podIds.retainAll(podIdsByCapacity);
|
||||
return podIds;
|
||||
}else{
|
||||
//apply weights to the two lists
|
||||
return orderByApplyingWeights(podCapacityInfo, podIdsVmCountInfo, accountId);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
protected Pair<List<Long>, Map<Long, Double>> listClustersByUserDispersion(long id, boolean isZone, long accountId){
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Applying Userdispersion heuristic to clusters for account: "+ accountId);
|
||||
}
|
||||
Pair<List<Long>, Map<Long, Double>> clusterIdsVmCountInfo;
|
||||
if(isZone){
|
||||
clusterIdsVmCountInfo = _vmInstanceDao.listClusterIdsInZoneByVmCount(id, accountId);
|
||||
}else{
|
||||
clusterIdsVmCountInfo = _vmInstanceDao.listClusterIdsInPodByVmCount(id, accountId);
|
||||
}
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("List of clusters in ascending order of number of VMs: "+ clusterIdsVmCountInfo.first());
|
||||
}
|
||||
return clusterIdsVmCountInfo;
|
||||
}
|
||||
|
||||
protected Pair<List<Long>, Map<Long, Double>> listPodsByUserDispersion(long dataCenterId, long accountId) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Applying Userdispersion heuristic to pods for account: "+ accountId);
|
||||
}
|
||||
Pair<List<Long>, Map<Long, Double>> podIdsVmCountInfo = _vmInstanceDao.listPodIdsInZoneByVmCount(dataCenterId, accountId);
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("List of pods in ascending order of number of VMs: "+ podIdsVmCountInfo.first());
|
||||
}
|
||||
|
||||
return podIdsVmCountInfo;
|
||||
}
|
||||
|
||||
|
||||
private List<Long> orderByApplyingWeights(Pair<List<Long>, Map<Long, Double>> capacityInfo, Pair<List<Long>, Map<Long, Double>> vmCountInfo, long accountId){
|
||||
List<Long> capacityOrderedIds = capacityInfo.first();
|
||||
List<Long> vmCountOrderedIds = vmCountInfo.first();
|
||||
Map<Long, Double> capacityMap = capacityInfo.second();
|
||||
Map<Long, Double> vmCountMap = vmCountInfo.second();
|
||||
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Capacity Id list: "+ capacityOrderedIds + " , capacityMap:"+capacityMap);
|
||||
}
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Vm Count Id list: "+ vmCountOrderedIds + " , vmCountMap:"+vmCountMap);
|
||||
}
|
||||
|
||||
|
||||
List<Long> idsReorderedByWeights = new ArrayList<Long>();
|
||||
float capacityWeight = (1.0f -_userDispersionWeight);
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Applying userDispersionWeight: "+ _userDispersionWeight);
|
||||
}
|
||||
//normalize the vmCountMap
|
||||
LinkedHashMap<Long, Double> normalisedVmCountIdMap= new LinkedHashMap<Long, Double>();
|
||||
|
||||
Long totalVmsOfAccount = _vmInstanceDao.countRunningByAccount(accountId);
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Total VMs for account: "+ totalVmsOfAccount);
|
||||
}
|
||||
for(Long id : vmCountOrderedIds){
|
||||
Double normalisedCount = vmCountMap.get(id) / totalVmsOfAccount;
|
||||
normalisedVmCountIdMap.put(id, normalisedCount);
|
||||
}
|
||||
|
||||
//consider only those ids that are in capacity map.
|
||||
|
||||
SortedMap<Double, List<Long>> sortedMap= new TreeMap<Double, List<Long>>();
|
||||
for(Long id : capacityOrderedIds){
|
||||
Double weightedCapacityValue = capacityMap.get(id) * capacityWeight;
|
||||
Double weightedVmCountValue = normalisedVmCountIdMap.get(id) * _userDispersionWeight;
|
||||
Double totalWeight = weightedCapacityValue + weightedVmCountValue;
|
||||
if(sortedMap.containsKey(totalWeight)){
|
||||
List<Long> idList = sortedMap.get(totalWeight);
|
||||
idList.add(id);
|
||||
sortedMap.put(totalWeight, idList);
|
||||
}else{
|
||||
List<Long> idList = new ArrayList<Long>();
|
||||
idList.add(id);
|
||||
sortedMap.put(totalWeight, idList);
|
||||
}
|
||||
}
|
||||
|
||||
for(List<Long> idList : sortedMap.values()){
|
||||
idsReorderedByWeights.addAll(idList);
|
||||
}
|
||||
|
||||
if (s_logger.isTraceEnabled()) {
|
||||
s_logger.trace("Reordered Id list: "+ idsReorderedByWeights);
|
||||
}
|
||||
|
||||
return idsReorderedByWeights;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean canHandle(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid) {
|
||||
if(vm.getHypervisorType() != HypervisorType.BareMetal){
|
||||
//check the allocation strategy
|
||||
if (_allocationAlgorithm != null && _allocationAlgorithm.equals(AllocationAlgorithm.userdispersing.toString())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
float _userDispersionWeight;
|
||||
|
||||
|
||||
@Override
|
||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||
super.configure(name, params);
|
||||
|
||||
String weight = _configDao.getValue(Config.VmUserDispersionWeight.key());
|
||||
_userDispersionWeight = NumbersUtil.parseFloat(weight, 1.0f);
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -113,8 +113,8 @@ import com.cloud.host.HostVO;
|
|||
import com.cloud.host.Status;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.host.dao.HostDetailsDao;
|
||||
import com.cloud.hypervisor.HypervisorGuruManager;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.hypervisor.HypervisorGuruManager;
|
||||
import com.cloud.network.NetworkManager;
|
||||
import com.cloud.network.router.VirtualNetworkApplianceManager;
|
||||
import com.cloud.org.Grouping;
|
||||
|
|
@ -124,7 +124,6 @@ import com.cloud.service.ServiceOfferingVO;
|
|||
import com.cloud.service.dao.ServiceOfferingDao;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
import com.cloud.storage.Volume.Event;
|
||||
import com.cloud.storage.Volume.Type;
|
||||
import com.cloud.storage.allocator.StoragePoolAllocator;
|
||||
import com.cloud.storage.dao.DiskOfferingDao;
|
||||
|
|
@ -162,11 +161,11 @@ import com.cloud.utils.db.DB;
|
|||
import com.cloud.utils.db.GenericSearchBuilder;
|
||||
import com.cloud.utils.db.GlobalLock;
|
||||
import com.cloud.utils.db.JoinBuilder;
|
||||
import com.cloud.utils.db.JoinBuilder.JoinType;
|
||||
import com.cloud.utils.db.SearchBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
import com.cloud.utils.db.JoinBuilder.JoinType;
|
||||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.exception.ExecutionException;
|
||||
import com.cloud.utils.fsm.NoTransitionException;
|
||||
|
|
@ -179,9 +178,10 @@ import com.cloud.vm.UserVmManager;
|
|||
import com.cloud.vm.UserVmVO;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachine.State;
|
||||
import com.cloud.vm.VirtualMachineManager;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
import com.cloud.vm.VirtualMachine.State;
|
||||
import com.cloud.vm.VirtualMachineProfileImpl;
|
||||
import com.cloud.vm.dao.ConsoleProxyDao;
|
||||
import com.cloud.vm.dao.DomainRouterDao;
|
||||
import com.cloud.vm.dao.SecondaryStorageVmDao;
|
||||
|
|
@ -419,12 +419,13 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag
|
|||
return false;
|
||||
}
|
||||
|
||||
protected StoragePoolVO findStoragePool(DiskProfile dskCh, final DataCenterVO dc, HostPodVO pod, Long clusterId, final VMTemplateVO template, final Set<StoragePool> avoid) {
|
||||
protected StoragePoolVO findStoragePool(DiskProfile dskCh, final DataCenterVO dc, HostPodVO pod, Long clusterId, VMInstanceVO vm, final Set<StoragePool> avoid) {
|
||||
|
||||
VirtualMachineProfile<VMInstanceVO> profile = new VirtualMachineProfileImpl<VMInstanceVO>(vm);
|
||||
Enumeration<StoragePoolAllocator> en = _storagePoolAllocators.enumeration();
|
||||
while (en.hasMoreElements()) {
|
||||
final StoragePoolAllocator allocator = en.nextElement();
|
||||
final List<StoragePool> poolList = allocator.allocateToPool(dskCh, template, dc.getId(), pod.getId(), clusterId, avoid, 1);
|
||||
final List<StoragePool> poolList = allocator.allocateToPool(dskCh, profile, dc.getId(), pod.getId(), clusterId, avoid, 1);
|
||||
if (poolList != null && !poolList.isEmpty()) {
|
||||
return (StoragePoolVO) poolList.get(0);
|
||||
}
|
||||
|
|
@ -727,7 +728,7 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag
|
|||
break;
|
||||
}
|
||||
|
||||
pool = findStoragePool(dskCh, dc, pod, clusterId, template, avoidPools);
|
||||
pool = findStoragePool(dskCh, dc, pod, clusterId, vm, avoidPools);
|
||||
if (pool == null) {
|
||||
s_logger.warn("Unable to find storage poll when create volume " + volume.getName());
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -35,7 +35,6 @@ import com.cloud.deploy.DataCenterDeployment;
|
|||
import com.cloud.deploy.DeploymentPlan;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.server.StatsCollector;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
import com.cloud.storage.StorageManager;
|
||||
|
|
@ -56,13 +55,13 @@ import com.cloud.storage.dao.VMTemplateHostDao;
|
|||
import com.cloud.storage.dao.VMTemplatePoolDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.template.TemplateManager;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.component.AdapterBase;
|
||||
import com.cloud.utils.component.Inject;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
|
||||
public abstract class AbstractStoragePoolAllocator extends AdapterBase implements StoragePoolAllocator {
|
||||
private static final Logger s_logger = Logger.getLogger(AbstractStoragePoolAllocator.class);
|
||||
|
|
@ -287,7 +286,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
|
||||
|
||||
@Override
|
||||
public List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineTemplate VMtemplate, long dcId, long podId, Long clusterId, Set<? extends StoragePool> avoids, int returnUpTo) {
|
||||
public List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, long dcId, long podId, Long clusterId, Set<? extends StoragePool> avoids, int returnUpTo) {
|
||||
|
||||
ExcludeList avoid = new ExcludeList();
|
||||
for(StoragePool pool : avoids){
|
||||
|
|
@ -295,7 +294,7 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
|||
}
|
||||
|
||||
DataCenterDeployment plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
|
||||
return allocateToPool(dskCh, VMtemplate, plan, avoid, returnUpTo);
|
||||
return allocateToPool(dskCh, vmProfile, plan, avoid, returnUpTo);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,37 +20,43 @@ package com.cloud.storage.allocator;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.ejb.Local;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.deploy.DeploymentPlan;
|
||||
import com.cloud.deploy.DeploymentPlanner;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.server.StatsCollector;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.StoragePoolVO;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
|
||||
@Local(value=StoragePoolAllocator.class)
|
||||
public class FirstFitStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
||||
private static final Logger s_logger = Logger.getLogger(FirstFitStoragePoolAllocator.class);
|
||||
|
||||
protected String _allocationAlgorithm = "random";
|
||||
|
||||
@Override
|
||||
public boolean allocatorIsCorrectType(DiskProfile dskCh) {
|
||||
return !localStorageAllocationNeeded(dskCh);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineTemplate VMtemplate, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
|
||||
public List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
|
||||
|
||||
|
||||
VMTemplateVO template = (VMTemplateVO)VMtemplate;
|
||||
|
||||
VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate();
|
||||
Account account = vmProfile.getOwner();
|
||||
|
||||
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
|
||||
|
||||
|
|
@ -78,7 +84,12 @@ public class FirstFitStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||
|
||||
StatsCollector sc = StatsCollector.getInstance();
|
||||
|
||||
Collections.shuffle(pools);
|
||||
if(_allocationAlgorithm.equals("random")) {
|
||||
// Shuffle this so that we don't check the pools in the same order.
|
||||
Collections.shuffle(pools);
|
||||
}else if(_allocationAlgorithm.equals("userdispersing")){
|
||||
pools = reorderPoolsByNumberOfVolumes(plan, pools, account);
|
||||
}
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("FirstFitStoragePoolAllocator has " + pools.size() + " pools to check for allocation");
|
||||
|
|
@ -99,4 +110,50 @@ public class FirstFitStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||
|
||||
return suitablePools;
|
||||
}
|
||||
|
||||
private List<StoragePoolVO> reorderPoolsByNumberOfVolumes(DeploymentPlan plan, List<StoragePoolVO> pools, Account account) {
|
||||
if(account == null){
|
||||
return pools;
|
||||
}
|
||||
long dcId = plan.getDataCenterId();
|
||||
Long podId = plan.getPodId();
|
||||
Long clusterId = plan.getClusterId();
|
||||
|
||||
List<Long> poolIdsByVolCount = _volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId());
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("List of pools in ascending order of number of volumes for account id: "+ account.getAccountId() + " is: "+ poolIdsByVolCount);
|
||||
}
|
||||
|
||||
//now filter the given list of Pools by this ordered list
|
||||
Map<Long, StoragePoolVO> poolMap = new HashMap<Long, StoragePoolVO>();
|
||||
for (StoragePoolVO pool : pools) {
|
||||
poolMap.put(pool.getId(), pool);
|
||||
}
|
||||
List<Long> matchingPoolIds = new ArrayList<Long>(poolMap.keySet());
|
||||
|
||||
poolIdsByVolCount.retainAll(matchingPoolIds);
|
||||
|
||||
List<StoragePoolVO> reorderedPools = new ArrayList<StoragePoolVO>();
|
||||
for(Long id: poolIdsByVolCount){
|
||||
reorderedPools.add(poolMap.get(id));
|
||||
}
|
||||
|
||||
return reorderedPools;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||
super.configure(name, params);
|
||||
|
||||
if (_configDao != null) {
|
||||
Map<String, String> configs = _configDao.getConfiguration(params);
|
||||
String allocationAlgorithm = configs.get("vm.allocation.algorithm");
|
||||
if (allocationAlgorithm != null && (allocationAlgorithm.equals(DeploymentPlanner.AllocationAlgorithm.random.toString())
|
||||
|| allocationAlgorithm.equals(DeploymentPlanner.AllocationAlgorithm.firstfit.toString())
|
||||
|| allocationAlgorithm.equals(DeploymentPlanner.AllocationAlgorithm.userdispersing.toString()))) {
|
||||
_allocationAlgorithm = allocationAlgorithm;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,10 +30,8 @@ import com.cloud.deploy.DeploymentPlan;
|
|||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.storage.StorageManager;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.utils.component.ComponentLocator;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
|
||||
|
|
@ -61,7 +59,7 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineTemplate VMtemplate, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
|
||||
public List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
|
||||
|
||||
if (!_storagePoolCleanupEnabled) {
|
||||
s_logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped.");
|
||||
|
|
@ -81,7 +79,7 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl
|
|||
// Try to find a storage pool after cleanup
|
||||
ExcludeList myAvoids = new ExcludeList(avoid.getDataCentersToAvoid(), avoid.getPodsToAvoid(), avoid.getClustersToAvoid(), avoid.getHostsToAvoid(), avoid.getPoolsToAvoid());
|
||||
|
||||
return allocator.allocateToPool(dskCh, VMtemplate, plan, myAvoids, returnUpTo);
|
||||
return allocator.allocateToPool(dskCh, vmProfile, plan, myAvoids, returnUpTo);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
|||
|
|
@ -38,7 +38,6 @@ import com.cloud.storage.StoragePool;
|
|||
import com.cloud.storage.StoragePoolHostVO;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.utils.DateUtil;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.utils.component.Inject;
|
||||
|
|
@ -52,6 +51,7 @@ import com.cloud.vm.UserVmVO;
|
|||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachine.State;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
import com.cloud.vm.dao.UserVmDao;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
|
||||
|
|
@ -90,7 +90,7 @@ public class LocalStoragePoolAllocator extends FirstFitStoragePoolAllocator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineTemplate VMtemplate, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
|
||||
public List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
|
||||
|
||||
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
|
||||
|
||||
|
|
@ -106,7 +106,7 @@ public class LocalStoragePoolAllocator extends FirstFitStoragePoolAllocator {
|
|||
}
|
||||
|
||||
List<StoragePool> availablePool;
|
||||
while (!(availablePool = super.allocateToPool(dskCh, VMtemplate, plan, myAvoids, 1)).isEmpty()) {
|
||||
while (!(availablePool = super.allocateToPool(dskCh, vmProfile, plan, myAvoids, 1)).isEmpty()) {
|
||||
StoragePool pool = availablePool.get(0);
|
||||
myAvoids.addPool(pool.getId());
|
||||
List<StoragePoolHostVO> hostsInSPool = _poolHostDao.listByPoolId(pool.getId());
|
||||
|
|
|
|||
|
|
@ -31,9 +31,7 @@ import com.cloud.server.StatsCollector;
|
|||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.StoragePoolVO;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
|
||||
|
|
@ -47,11 +45,11 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineTemplate VMtemplate, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
|
||||
public List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
|
||||
|
||||
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
|
||||
|
||||
VMTemplateVO template = (VMTemplateVO)VMtemplate;
|
||||
VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate();
|
||||
// Check that the allocator type is correct
|
||||
if (!allocatorIsCorrectType(dskCh)) {
|
||||
return suitablePools;
|
||||
|
|
|
|||
|
|
@ -24,10 +24,10 @@ import com.cloud.deploy.DeploymentPlan;
|
|||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.utils.component.Adapter;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
|
||||
/**
|
||||
* Allocator for a disk. This determines which StoragePool should
|
||||
|
|
@ -36,7 +36,7 @@ import com.cloud.vm.VirtualMachine;
|
|||
public interface StoragePoolAllocator extends Adapter {
|
||||
|
||||
//keeping since storageMgr is using this API for some existing functionalities
|
||||
List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineTemplate VMtemplate, long dcId, long podId, Long clusterId, Set<? extends StoragePool> avoids, int returnUpTo);
|
||||
List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, long dcId, long podId, Long clusterId, Set<? extends StoragePool> avoids, int returnUpTo);
|
||||
|
||||
String chooseStorageIp(VirtualMachine vm, Host host, Host storage);
|
||||
|
||||
|
|
@ -50,7 +50,7 @@ public interface StoragePoolAllocator extends Adapter {
|
|||
* @param int returnUpTo (use -1 to return all possible pools)
|
||||
* @return List<StoragePool> List of storage pools that are suitable for the VM
|
||||
**/
|
||||
List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineTemplate VMtemplate, DeploymentPlan plan, ExcludeList avoid, int returnUpTo);
|
||||
List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo);
|
||||
|
||||
public static int RETURN_UPTO_ALL = -1;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,10 +30,8 @@ import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
|||
import com.cloud.host.Host;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.Volume.Type;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.utils.component.ComponentLocator;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
|
||||
|
|
@ -42,12 +40,12 @@ public class UseLocalForRootAllocator extends LocalStoragePoolAllocator implemen
|
|||
boolean _useLocalStorage;
|
||||
|
||||
@Override
|
||||
public List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineTemplate VMtemplate, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
|
||||
public List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
|
||||
if (!_useLocalStorage) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return super.allocateToPool(dskCh, VMtemplate, plan, avoid, returnUpTo);
|
||||
return super.allocateToPool(dskCh, vmProfile, plan, avoid, returnUpTo);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ package com.cloud.storage.dao;
|
|||
|
||||
import java.util.List;
|
||||
|
||||
import com.cloud.exception.ConcurrentOperationException;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
import com.cloud.storage.Volume;
|
||||
|
|
@ -53,4 +52,5 @@ public interface VolumeDao extends GenericDao<VolumeVO, Long>, StateDao<Volume.S
|
|||
ImageFormat getImageFormat(Long volumeId);
|
||||
|
||||
List<VolumeVO> findReadyRootVolumesByInstance(long instanceId);
|
||||
List<Long> listPoolIdsByVolumeCount(long dcId, Long podId, Long clusterId, long accountId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ package com.cloud.storage.dao;
|
|||
import java.sql.PreparedStatement;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
|
||||
|
|
@ -27,7 +28,6 @@ import javax.ejb.Local;
|
|||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.exception.ConcurrentOperationException;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
import com.cloud.storage.Volume;
|
||||
|
|
@ -35,7 +35,6 @@ import com.cloud.storage.Volume.Event;
|
|||
import com.cloud.storage.Volume.Type;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.db.Attribute;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.GenericDaoBase;
|
||||
import com.cloud.utils.db.GenericSearchBuilder;
|
||||
|
|
@ -46,10 +45,8 @@ import com.cloud.utils.db.SearchCriteria.Op;
|
|||
import com.cloud.utils.db.Transaction;
|
||||
import com.cloud.utils.db.UpdateBuilder;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.UserVmVO;
|
||||
import com.cloud.vm.VirtualMachine.State;
|
||||
|
||||
@Local(value=VolumeDao.class) @DB(txn=false)
|
||||
@Local(value=VolumeDao.class)
|
||||
public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements VolumeDao {
|
||||
private static final Logger s_logger = Logger.getLogger(VolumeDaoImpl.class);
|
||||
protected final SearchBuilder<VolumeVO> DetachedAccountIdSearch;
|
||||
|
|
@ -63,6 +60,10 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
|
|||
protected static final String SELECT_VM_SQL = "SELECT DISTINCT instance_id from volumes v where v.host_id = ? and v.mirror_state = ?";
|
||||
protected static final String SELECT_HYPERTYPE_FROM_VOLUME = "SELECT c.hypervisor_type from volumes v, storage_pool s, cluster c where v.pool_id = s.id and s.cluster_id = c.id and v.id = ?";
|
||||
|
||||
private static final String ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT = "SELECT pool.id, SUM(IF(vol.state='Ready' AND vol.account_id = ?, 1, 0)) FROM `cloud`.`storage_pool` pool LEFT JOIN `cloud`.`volumes` vol ON pool.id = vol.pool_id WHERE pool.data_center_id = ? " +
|
||||
" AND pool.pod_id = ? AND pool.cluster_id = ? " +
|
||||
" GROUP BY pool.id ORDER BY 2 ASC ";
|
||||
|
||||
@Override
|
||||
public List<VolumeVO> findDetachedByAccount(long accountId) {
|
||||
SearchCriteria<VolumeVO> sc = DetachedAccountIdSearch.create();
|
||||
|
|
@ -358,4 +359,29 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
|
|||
}
|
||||
return rows > 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Long> listPoolIdsByVolumeCount(long dcId, Long podId, Long clusterId, long accountId) {
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = null;
|
||||
List<Long> result = new ArrayList<Long>();
|
||||
try {
|
||||
String sql = ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT;
|
||||
pstmt = txn.prepareAutoCloseStatement(sql);
|
||||
pstmt.setLong(1, accountId);
|
||||
pstmt.setLong(2, dcId);
|
||||
pstmt.setLong(3, podId);
|
||||
pstmt.setLong(4, clusterId);
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
result.add(rs.getLong(1));
|
||||
}
|
||||
return result;
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("DB Exception on: " + ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT, e);
|
||||
} catch (Throwable e) {
|
||||
throw new CloudRuntimeException("Caught: " + ORDER_POOLS_NUMBER_OF_VOLUMES_FOR_ACCOUNT, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,9 @@ package com.cloud.vm.dao;
|
|||
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.db.GenericDao;
|
||||
import com.cloud.utils.fsm.StateDao;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
|
|
@ -84,5 +86,15 @@ public interface VMInstanceDao extends GenericDao<VMInstanceVO, Long>, StateDao<
|
|||
List<VMInstanceVO> listByClusterId(long clusterId);
|
||||
List<VMInstanceVO> listVmsMigratingFromHost(Long hostId);
|
||||
|
||||
public Long countRunningByHostId(long hostId);
|
||||
public Long countRunningByHostId(long hostId);
|
||||
|
||||
Pair<List<Long>, Map<Long, Double>> listClusterIdsInZoneByVmCount(long zoneId, long accountId);
|
||||
|
||||
Pair<List<Long>, Map<Long, Double>> listClusterIdsInPodByVmCount(long podId, long accountId);
|
||||
|
||||
Pair<List<Long>, Map<Long, Double>> listPodIdsInZoneByVmCount(long dataCenterId, long accountId);
|
||||
|
||||
List<Long> listHostIdsByVmCount(long dcId, Long podId, Long clusterId, long accountId);
|
||||
|
||||
Long countRunningByAccount(long accountId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,8 +19,14 @@
|
|||
package com.cloud.vm.dao;
|
||||
|
||||
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.ejb.Local;
|
||||
|
||||
|
|
@ -38,7 +44,9 @@ import com.cloud.utils.db.SearchBuilder;
|
|||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.SearchCriteria.Func;
|
||||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
import com.cloud.utils.db.UpdateBuilder;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachine.Event;
|
||||
|
|
@ -64,9 +72,19 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
protected final SearchBuilder<VMInstanceVO> HostUpSearch;
|
||||
protected final GenericSearchBuilder<VMInstanceVO, Long> CountVirtualRoutersByAccount;
|
||||
protected GenericSearchBuilder<VMInstanceVO, Long> CountRunningByHost;
|
||||
|
||||
protected GenericSearchBuilder<VMInstanceVO, Long> CountRunningByAccount;
|
||||
|
||||
protected final Attribute _updateTimeAttr;
|
||||
|
||||
private static final String ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART1 = "SELECT host.cluster_id, SUM(IF(vm.state='Running' AND vm.account_id = ?, 1, 0)) FROM `cloud`.`host` host LEFT JOIN `cloud`.`vm_instance` vm ON host.id = vm.host_id WHERE ";
|
||||
private static final String ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART2 = " AND host.type = 'Routing' GROUP BY host.cluster_id ORDER BY 2 ASC ";
|
||||
|
||||
private static final String ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT = "SELECT pod.id, SUM(IF(vm.state='Running' AND vm.account_id = ?, 1, 0)) FROM `cloud`.`host_pod_ref` pod LEFT JOIN `cloud`.`vm_instance` vm ON pod.id = vm.pod_id WHERE pod.data_center_id = ? " +
|
||||
" GROUP BY pod.id ORDER BY 2 ASC ";
|
||||
|
||||
private static final String ORDER_HOSTS_NUMBER_OF_VMS_FOR_ACCOUNT = "SELECT host.id, SUM(IF(vm.state='Running' AND vm.account_id = ?, 1, 0)) FROM `cloud`.`host` host LEFT JOIN `cloud`.`vm_instance` vm ON host.id = vm.host_id WHERE host.data_center_id = ? " +
|
||||
" AND host.pod_id = ? AND host.cluster_id = ? AND host.type = 'Routing' " +
|
||||
" GROUP BY host.id ORDER BY 2 ASC ";
|
||||
|
||||
protected final HostDaoImpl _hostDao = ComponentLocator.inject(HostDaoImpl.class);
|
||||
protected VMInstanceDaoImpl() {
|
||||
|
|
@ -150,6 +168,12 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
CountRunningByHost.and("state", CountRunningByHost.entity().getState(), SearchCriteria.Op.EQ);
|
||||
CountRunningByHost.done();
|
||||
|
||||
CountRunningByAccount = createSearchBuilder(Long.class);
|
||||
CountRunningByAccount.select(null, Func.COUNT, null);
|
||||
CountRunningByAccount.and("account", CountRunningByAccount.entity().getAccountId(), SearchCriteria.Op.EQ);
|
||||
CountRunningByAccount.and("state", CountRunningByAccount.entity().getState(), SearchCriteria.Op.EQ);
|
||||
CountRunningByAccount.done();
|
||||
|
||||
_updateTimeAttr = _allAttributes.get("updateTime");
|
||||
assert _updateTimeAttr != null : "Couldn't get this updateTime attribute";
|
||||
}
|
||||
|
|
@ -361,5 +385,123 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
|
|||
sc.setParameters("host", hostId);
|
||||
sc.setParameters("state", State.Running);
|
||||
return customSearch(sc, null).get(0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<List<Long>, Map<Long, Double>> listClusterIdsInZoneByVmCount(long zoneId, long accountId) {
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = null;
|
||||
List<Long> result = new ArrayList<Long>();
|
||||
Map<Long, Double> clusterVmCountMap = new HashMap<Long, Double>();
|
||||
|
||||
StringBuilder sql = new StringBuilder(ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART1);
|
||||
sql.append("host.data_center_id = ?");
|
||||
sql.append(ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART2);
|
||||
try {
|
||||
pstmt = txn.prepareAutoCloseStatement(sql.toString());
|
||||
pstmt.setLong(1, accountId);
|
||||
pstmt.setLong(2, zoneId);
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
Long clusterId = rs.getLong(1);
|
||||
result.add(clusterId);
|
||||
clusterVmCountMap.put(clusterId, rs.getDouble(2));
|
||||
}
|
||||
return new Pair<List<Long>, Map<Long, Double>>(result, clusterVmCountMap);
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("DB Exception on: " + sql, e);
|
||||
} catch (Throwable e) {
|
||||
throw new CloudRuntimeException("Caught: " + sql, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<List<Long>, Map<Long, Double>> listClusterIdsInPodByVmCount(long podId, long accountId) {
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = null;
|
||||
List<Long> result = new ArrayList<Long>();
|
||||
Map<Long, Double> clusterVmCountMap = new HashMap<Long, Double>();
|
||||
|
||||
StringBuilder sql = new StringBuilder(ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART1);
|
||||
sql.append("host.pod_id = ?");
|
||||
sql.append(ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART2);
|
||||
try {
|
||||
pstmt = txn.prepareAutoCloseStatement(sql.toString());
|
||||
pstmt.setLong(1, accountId);
|
||||
pstmt.setLong(2, podId);
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
Long clusterId = rs.getLong(1);
|
||||
result.add(clusterId);
|
||||
clusterVmCountMap.put(clusterId, rs.getDouble(2));
|
||||
}
|
||||
return new Pair<List<Long>, Map<Long, Double>>(result, clusterVmCountMap);
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("DB Exception on: " + sql, e);
|
||||
} catch (Throwable e) {
|
||||
throw new CloudRuntimeException("Caught: " + sql, e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<List<Long>, Map<Long, Double>> listPodIdsInZoneByVmCount(long dataCenterId, long accountId) {
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = null;
|
||||
List<Long> result = new ArrayList<Long>();
|
||||
Map<Long, Double> podVmCountMap = new HashMap<Long, Double>();
|
||||
try {
|
||||
String sql = ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT;
|
||||
pstmt = txn.prepareAutoCloseStatement(sql);
|
||||
pstmt.setLong(1, accountId);
|
||||
pstmt.setLong(2, dataCenterId);
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
Long podId = rs.getLong(1);
|
||||
result.add(podId);
|
||||
podVmCountMap.put(podId, rs.getDouble(2));
|
||||
}
|
||||
return new Pair<List<Long>, Map<Long, Double>>(result, podVmCountMap);
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("DB Exception on: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e);
|
||||
} catch (Throwable e) {
|
||||
throw new CloudRuntimeException("Caught: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Long> listHostIdsByVmCount(long dcId, Long podId, Long clusterId, long accountId) {
|
||||
Transaction txn = Transaction.currentTxn();
|
||||
PreparedStatement pstmt = null;
|
||||
List<Long> result = new ArrayList<Long>();
|
||||
try {
|
||||
String sql = ORDER_HOSTS_NUMBER_OF_VMS_FOR_ACCOUNT;
|
||||
pstmt = txn.prepareAutoCloseStatement(sql);
|
||||
pstmt.setLong(1, accountId);
|
||||
pstmt.setLong(2, dcId);
|
||||
pstmt.setLong(3, podId);
|
||||
pstmt.setLong(4, clusterId);
|
||||
|
||||
ResultSet rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
result.add(rs.getLong(1));
|
||||
}
|
||||
return result;
|
||||
} catch (SQLException e) {
|
||||
throw new CloudRuntimeException("DB Exception on: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e);
|
||||
} catch (Throwable e) {
|
||||
throw new CloudRuntimeException("Caught: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long countRunningByAccount(long accountId){
|
||||
SearchCriteria<Long> sc = CountRunningByAccount.create();
|
||||
sc.setParameters("account", accountId);
|
||||
sc.setParameters("state", State.Running);
|
||||
return customSearch(sc, null).get(0);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -229,3 +229,7 @@ CREATE TABLE `cloud`.`vm_template_details` (
|
|||
ALTER TABLE `cloud`.`op_host_capacity` ADD COLUMN `created` datetime;
|
||||
ALTER TABLE `cloud`.`op_host_capacity` ADD COLUMN `update_time` datetime;
|
||||
|
||||
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-server', 'apply.allocation.algorithm.to.pods', 'false', 'If true, deployment planner applies the allocation heuristics at pods first in the given datacenter during VM resource allocation');
|
||||
INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-server', 'vm.user.dispersion.weight', 1, 'Weight for user dispersion heuristic (as a value between 0 and 1) applied to resource allocation during vm deployment. Weight for capacity heuristic will be (1 – weight of user dispersion)');
|
||||
DELETE FROM configuration WHERE name='use.user.concentrated.pod.allocation';
|
||||
UPDATE configuration SET description = '[''random'', ''firstfit'', ''userdispersing'', ''userconcentratedpod''] : Order in which hosts within a cluster will be considered for VM/volume allocation.' WHERE name = 'vm.allocation.algorithm';
|
||||
Loading…
Reference in New Issue