mirror of https://github.com/apache/cloudstack.git
CLOUDSTACK-5391:
check for host cpu capability while stop starting a vm on the same host. Also changed the FirstFitAllocator to use the same method.
This commit is contained in:
parent
25e51a5716
commit
c06e69db19
|
|
@ -289,8 +289,6 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
continue;
|
||||
}
|
||||
|
||||
boolean numCpusGood = host.getCpus().intValue() >= offering.getCpu();
|
||||
boolean cpuFreqGood = host.getSpeed().intValue() >= offering.getSpeed();
|
||||
int cpu_requested = offering.getCpu() * offering.getSpeed();
|
||||
long ram_requested = offering.getRamSize() * 1024L * 1024L;
|
||||
Cluster cluster = _clusterDao.findById(host.getClusterId());
|
||||
|
|
@ -299,17 +297,18 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
|
|||
Float cpuOvercommitRatio = Float.parseFloat(clusterDetailsCpuOvercommit.getValue());
|
||||
Float memoryOvercommitRatio = Float.parseFloat(clusterDetailsRamOvercommmt.getValue());
|
||||
|
||||
boolean hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed());
|
||||
boolean hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio,
|
||||
considerReservedCapacity);
|
||||
|
||||
if (numCpusGood && cpuFreqGood && hostHasCapacity) {
|
||||
if (hostHasCpuCapability && hostHasCapacity) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Found a suitable host, adding to list: " + host.getId());
|
||||
}
|
||||
suitableHosts.add(host);
|
||||
} else {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Not using host " + host.getId() + "; numCpusGood: " + numCpusGood + "; cpuFreqGood: " + cpuFreqGood + ", host has capacity?" + hostHasCapacity);
|
||||
s_logger.debug("Not using host " + host.getId() + "; host has cpu capability? " + hostHasCpuCapability + ", host has capacity?" + hostHasCapacity);
|
||||
}
|
||||
avoid.addHost(host.getId());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ import javax.naming.ConfigurationException;
|
|||
|
||||
import org.apache.cloudstack.affinity.AffinityGroupProcessor;
|
||||
import org.apache.cloudstack.affinity.AffinityGroupService;
|
||||
import org.apache.cloudstack.affinity.AffinityGroupVMMapVO;
|
||||
import org.apache.cloudstack.affinity.AffinityGroupVMMapVO;
|
||||
import org.apache.cloudstack.affinity.AffinityGroupVO;
|
||||
import org.apache.cloudstack.affinity.dao.AffinityGroupDao;
|
||||
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
|
||||
|
|
@ -56,11 +56,11 @@ import com.cloud.dc.ClusterDetailsVO;
|
|||
import com.cloud.dc.ClusterVO;
|
||||
import com.cloud.dc.DataCenter;
|
||||
import com.cloud.dc.DataCenterVO;
|
||||
import com.cloud.dc.DedicatedResourceVO;
|
||||
import com.cloud.dc.DedicatedResourceVO;
|
||||
import com.cloud.dc.Pod;
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.dc.dao.DataCenterDao;
|
||||
import com.cloud.dc.dao.DedicatedResourceDao;
|
||||
import com.cloud.dc.dao.DedicatedResourceDao;
|
||||
import com.cloud.dc.dao.HostPodDao;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage;
|
||||
|
|
@ -102,7 +102,7 @@ import com.cloud.utils.db.Transaction;
|
|||
import com.cloud.utils.db.TransactionCallback;
|
||||
import com.cloud.utils.db.TransactionCallbackNoReturn;
|
||||
import com.cloud.utils.db.TransactionStatus;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.fsm.StateListener;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.ReservationContext;
|
||||
|
|
@ -176,7 +176,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
|
|||
@Inject protected HostDao _hostDao;
|
||||
@Inject protected HostPodDao _podDao;
|
||||
@Inject protected ClusterDao _clusterDao;
|
||||
@Inject protected DedicatedResourceDao _dedicatedDao;
|
||||
@Inject protected DedicatedResourceDao _dedicatedDao;
|
||||
@Inject protected GuestOSDao _guestOSDao = null;
|
||||
@Inject protected GuestOSCategoryDao _guestOSCategoryDao = null;
|
||||
@Inject protected DiskOfferingDao _diskOfferingDao;
|
||||
|
|
@ -216,7 +216,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
|
|||
// call affinitygroup chain
|
||||
VirtualMachine vm = vmProfile.getVirtualMachine();
|
||||
long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId());
|
||||
DataCenter dc = _dcDao.findById(vm.getDataCenterId());
|
||||
DataCenter dc = _dcDao.findById(vm.getDataCenterId());
|
||||
|
||||
if (vmGroupCount > 0) {
|
||||
for (AffinityGroupProcessor processor : _affinityProcessors) {
|
||||
|
|
@ -226,14 +226,14 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
|
|||
|
||||
if (vm.getType() == VirtualMachine.Type.User) {
|
||||
checkForNonDedicatedResources(vmProfile, dc, avoids);
|
||||
}
|
||||
}
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: "
|
||||
+ avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid());
|
||||
}
|
||||
|
||||
// call planners
|
||||
//DataCenter dc = _dcDao.findById(vm.getDataCenterId());
|
||||
//DataCenter dc = _dcDao.findById(vm.getDataCenterId());
|
||||
// check if datacenter is in avoid set
|
||||
if (avoids.shouldAvoid(dc)) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
|
|
@ -308,7 +308,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
|
|||
List<Host> suitableHosts = new ArrayList<Host>();
|
||||
suitableHosts.add(host);
|
||||
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
|
||||
suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner,vmProfile, plan ,avoids));
|
||||
suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner,vmProfile, plan ,avoids));
|
||||
if (potentialResources != null) {
|
||||
Pod pod = _podDao.findById(host.getPodId());
|
||||
Cluster cluster = _clusterDao.findById(host.getClusterId());
|
||||
|
|
@ -350,7 +350,8 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
|
|||
Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
|
||||
Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
|
||||
if (_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true,
|
||||
cpuOvercommitRatio, memoryOvercommitRatio, true)) {
|
||||
cpuOvercommitRatio, memoryOvercommitRatio, true)
|
||||
&& _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed())) {
|
||||
s_logger.debug("The last host of this VM is UP and has enough capacity");
|
||||
s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId()
|
||||
+ ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
|
||||
|
|
@ -362,13 +363,13 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
|
|||
vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
|
||||
Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
|
||||
List<Volume> readyAndReusedVolumes = result.second();
|
||||
|
||||
|
||||
// choose the potential pool for this VM for this host
|
||||
if (!suitableVolumeStoragePools.isEmpty()) {
|
||||
List<Host> suitableHosts = new ArrayList<Host>();
|
||||
suitableHosts.add(host);
|
||||
Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
|
||||
suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner,vmProfile, plan ,avoids));
|
||||
suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner,vmProfile, plan ,avoids));
|
||||
if (potentialResources != null) {
|
||||
Pod pod = _podDao.findById(host.getPodId());
|
||||
Cluster cluster = _clusterDao.findById(host.getClusterId());
|
||||
|
|
@ -418,7 +419,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
|
|||
resetAvoidSet(plannerAvoidOutput, plannerAvoidInput);
|
||||
|
||||
dest = checkClustersforDestination(clusterList, vmProfile, plan, avoids, dc,
|
||||
getPlannerUsage(planner, vmProfile, plan, avoids), plannerAvoidOutput);
|
||||
getPlannerUsage(planner, vmProfile, plan, avoids), plannerAvoidOutput);
|
||||
if (dest != null) {
|
||||
return dest;
|
||||
}
|
||||
|
|
@ -453,7 +454,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
|
|||
return dest;
|
||||
}
|
||||
|
||||
private void checkForNonDedicatedResources(VirtualMachineProfile vmProfile, DataCenter dc, ExcludeList avoids) {
|
||||
private void checkForNonDedicatedResources(VirtualMachineProfile vmProfile, DataCenter dc, ExcludeList avoids) {
|
||||
boolean isExplicit = false;
|
||||
VirtualMachine vm = vmProfile.getVirtualMachine();
|
||||
|
||||
|
|
@ -488,16 +489,16 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
|
|||
}
|
||||
|
||||
// check affinity group of type Explicit dedication exists. If No put
|
||||
// dedicated pod/cluster/host in avoid list
|
||||
List<AffinityGroupVMMapVO> vmGroupMappings = _affinityGroupVMMapDao.findByVmIdType(vm.getId(), "ExplicitDedication");
|
||||
|
||||
if (vmGroupMappings != null && !vmGroupMappings.isEmpty()){
|
||||
isExplicit = true;
|
||||
}
|
||||
|
||||
if (!isExplicit) {
|
||||
//add explicitly dedicated resources in avoidList
|
||||
|
||||
// dedicated pod/cluster/host in avoid list
|
||||
List<AffinityGroupVMMapVO> vmGroupMappings = _affinityGroupVMMapDao.findByVmIdType(vm.getId(), "ExplicitDedication");
|
||||
|
||||
if (vmGroupMappings != null && !vmGroupMappings.isEmpty()){
|
||||
isExplicit = true;
|
||||
}
|
||||
|
||||
if (!isExplicit) {
|
||||
//add explicitly dedicated resources in avoidList
|
||||
|
||||
List<Long> allPodsInDc = _podDao.listAllPods(dc.getId());
|
||||
List<Long> allDedicatedPods = _dedicatedDao.listAllPods();
|
||||
allPodsInDc.retainAll(allDedicatedPods);
|
||||
|
|
@ -511,10 +512,10 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
|
|||
List<Long> allHostsInDc = _hostDao.listAllHosts(dc.getId());
|
||||
List<Long> allDedicatedHosts = _dedicatedDao.listAllHosts();
|
||||
allHostsInDc.retainAll(allDedicatedHosts);
|
||||
avoids.addHostList(allHostsInDc);
|
||||
}
|
||||
}
|
||||
|
||||
avoids.addHostList(allHostsInDc);
|
||||
}
|
||||
}
|
||||
|
||||
private void resetAvoidSet(ExcludeList avoidSet, ExcludeList removeSet) {
|
||||
if (avoidSet.getDataCentersToAvoid() != null && removeSet.getDataCentersToAvoid() != null) {
|
||||
avoidSet.getDataCentersToAvoid().removeAll(removeSet.getDataCentersToAvoid());
|
||||
|
|
@ -533,9 +534,9 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
|
|||
}
|
||||
}
|
||||
|
||||
private PlannerResourceUsage getPlannerUsage(DeploymentPlanner planner, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids) throws InsufficientServerCapacityException {
|
||||
private PlannerResourceUsage getPlannerUsage(DeploymentPlanner planner, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids) throws InsufficientServerCapacityException {
|
||||
if (planner != null && planner instanceof DeploymentClusterPlanner) {
|
||||
return ((DeploymentClusterPlanner) planner).getResourceUsage(vmProfile, plan, avoids);
|
||||
return ((DeploymentClusterPlanner) planner).getResourceUsage(vmProfile, plan, avoids);
|
||||
} else {
|
||||
return DeploymentPlanner.PlannerResourceUsage.Shared;
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue