mirror of https://github.com/apache/cloudstack.git
CLOUDSTACK-4498 we should not reserve memory and cpu for vmware VMs if the vmware.reserve.cpu and vmware.reserve.mem are set to false.
Conflicts: plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java Signed-off-by: Jayapal <jayapal@apache.org>
This commit is contained in:
parent
37d500d2a6
commit
f701831713
|
|
@ -29,8 +29,11 @@ import javax.ejb.Local;
|
|||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.agent.api.storage.CreateEntityDownloadURLCommand;
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.server.ConfigurationServer;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.vm.dao.VMInstanceDao;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
|
|
@ -108,12 +111,17 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru {
|
|||
@Inject SecondaryStorageVmManager _secStorageMgr;
|
||||
@Inject NetworkModel _networkMgr;
|
||||
@Inject ConfigurationDao _configDao;
|
||||
@Inject ConfigurationServer _configServer;
|
||||
@Inject
|
||||
NicDao _nicDao;
|
||||
@Inject
|
||||
PhysicalNetworkDao _physicalNetworkDao;
|
||||
@Inject
|
||||
PhysicalNetworkTrafficTypeDao _physicalNetworkTrafficTypeDao;
|
||||
@Inject
|
||||
VMInstanceDao _vmDao;
|
||||
@Inject
|
||||
ClusterDao _clusterDao;
|
||||
|
||||
protected VMwareGuru() {
|
||||
super();
|
||||
|
|
@ -180,7 +188,9 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru {
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
long clusterId = _hostDao.findById( _vmDao.findById(vm.getId()).getHostId()).getClusterId();
|
||||
details.put(Config.VmwareReserveCpu.key(), _configServer.getConfigValue(Config.VmwareReserveCpu.key(), Config.ConfigurationParameterScope.cluster.toString(), clusterId));
|
||||
details.put(Config.VmwareReserveMem.key(), _configServer.getConfigValue(Config.VmwareReserveMem.key(), Config.ConfigurationParameterScope.cluster.toString(), clusterId));
|
||||
to.setDetails(details);
|
||||
|
||||
if(vm.getVirtualMachine() instanceof DomainRouterVO) {
|
||||
|
|
|
|||
|
|
@ -518,8 +518,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
|
|||
params.put("vm.instancename.flag", _instanceNameFlag);
|
||||
params.put("service.console.name", _serviceConsoleName);
|
||||
params.put("management.portgroup.name", _managemetPortGroupName);
|
||||
params.put("vmware.reserve.cpu", _reserveCpu);
|
||||
params.put("vmware.reserve.mem", _reserveMem);
|
||||
params.put("vmware.root.disk.controller", _rootDiskController);
|
||||
params.put("vmware.recycle.hung.wokervm", _recycleHungWorker);
|
||||
params.put("ports.per.dvportgroup", _portsPerDvPortGroup);
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ import java.util.concurrent.TimeUnit;
|
|||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.configuration.Config;
|
||||
import org.apache.cloudstack.storage.command.DeleteCommand;
|
||||
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
|
|
@ -364,9 +365,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
protected boolean _fullCloneFlag = false;
|
||||
protected boolean _instanceNameFlag = false;
|
||||
|
||||
protected boolean _reserveCpu = false;
|
||||
|
||||
protected boolean _reserveMem = false;
|
||||
protected boolean _recycleHungWorker = false;
|
||||
protected DiskControllerType _rootDiskController = DiskControllerType.ide;
|
||||
|
||||
|
|
@ -2689,7 +2688,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
vmMo.tearDownDevices(new Class<?>[] { VirtualDisk.class, VirtualEthernetCard.class });
|
||||
vmMo.ensureScsiDeviceController();
|
||||
} else {
|
||||
int ramMb = (int) (vmSpec.getMinRam() / (1024 * 1024));
|
||||
Pair<ManagedObjectReference, DatastoreMO> rootDiskDataStoreDetails = null;
|
||||
for (DiskTO vol : disks) {
|
||||
if (vol.getType() == Volume.Type.ROOT) {
|
||||
|
|
@ -2707,7 +2705,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
|
||||
if (!hyperHost.createBlankVm(vmNameOnVcenter, vmInternalCSName, vmSpec.getCpus(), vmSpec.getMaxSpeed().intValue(),
|
||||
vmSpec.getMinSpeed(), vmSpec.getLimitCpuUse(),(int)(vmSpec.getMaxRam()/(1024*1024)), ramMb,
|
||||
getReservedCpuMHZ(vmSpec), vmSpec.getLimitCpuUse(),(int)(vmSpec.getMaxRam()/(1024*1024)), getReservedMemoryMb(vmSpec),
|
||||
translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).value(), rootDiskDataStoreDetails.first(), false)) {
|
||||
throw new Exception("Failed to create VM. vmName: " + vmInternalCSName);
|
||||
}
|
||||
|
|
@ -2732,11 +2730,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
|
||||
VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec();
|
||||
int ramMb = (int) (vmSpec.getMinRam() / (1024 * 1024));
|
||||
String guestOsId = translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).value();
|
||||
|
||||
VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getMaxSpeed(),
|
||||
vmSpec.getMinSpeed(),(int) (vmSpec.getMaxRam()/(1024*1024)), ramMb,
|
||||
getReservedCpuMHZ(vmSpec),(int) (vmSpec.getMaxRam()/(1024*1024)), getReservedMemoryMb(vmSpec),
|
||||
guestOsId, vmSpec.getLimitCpuUse());
|
||||
|
||||
// Check for hotadd settings
|
||||
|
|
@ -2988,7 +2985,27 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int getReservedMemoryMb(VirtualMachineTO vmSpec) {
|
||||
if (vmSpec.getDetails().get(Config.VmwareReserveMem.key()).equalsIgnoreCase("true")) {
|
||||
return (int) (vmSpec.getMinRam() / (1024 * 1024));
|
||||
} else if (vmSpec.getMinRam() != vmSpec.getMaxRam()) {
|
||||
s_logger.warn("memory overprovisioning factor is set to "+ (vmSpec.getMaxRam()/vmSpec.getMinRam())+" ignoring the flag vmware.reserve.mem");
|
||||
return (int) (vmSpec.getMinRam() / (1024 * 1024));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int getReservedCpuMHZ(VirtualMachineTO vmSpec) {
|
||||
if (vmSpec.getDetails().get(Config.VmwareReserveCpu.key()).equalsIgnoreCase("true")) {
|
||||
return vmSpec.getMinSpeed();
|
||||
}else if (vmSpec.getMinSpeed().intValue() != vmSpec.getMaxSpeed().intValue()) {
|
||||
s_logger.warn("cpu overprovisioning factor is set to "+ (vmSpec.getMaxSpeed().intValue()/vmSpec.getMinSpeed().intValue())+" ignoring the flag vmware.reserve.cpu");
|
||||
return vmSpec.getMinSpeed();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// return the finalized disk chain for startup, from top to bottom
|
||||
private String[] syncDiskChain(DatacenterMO dcMo, VirtualMachineMO vmMo, VirtualMachineTO vmSpec,
|
||||
DiskTO vol, VirtualMachineDiskInfoBuilder diskInfoBuilder,
|
||||
|
|
@ -6623,18 +6640,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
|||
_privateNetworkVSwitchName = (String) params.get("private.network.vswitch.name");
|
||||
}
|
||||
|
||||
String value = (String) params.get("vmware.reserve.cpu");
|
||||
if(value != null && value.equalsIgnoreCase("true"))
|
||||
_reserveCpu = true;
|
||||
|
||||
value = (String) params.get("vmware.recycle.hung.wokervm");
|
||||
String value = (String) params.get("vmware.recycle.hung.wokervm");
|
||||
if(value != null && value.equalsIgnoreCase("true"))
|
||||
_recycleHungWorker = true;
|
||||
|
||||
value = (String) params.get("vmware.reserve.mem");
|
||||
if(value != null && value.equalsIgnoreCase("true"))
|
||||
_reserveMem = true;
|
||||
|
||||
|
||||
value = (String)params.get("vmware.root.disk.controller");
|
||||
if(value != null && value.equalsIgnoreCase("scsi"))
|
||||
_rootDiskController = DiskControllerType.scsi;
|
||||
|
|
|
|||
|
|
@ -293,8 +293,8 @@ public enum Config {
|
|||
VmwareAdditionalVncPortRangeStart("Advanced", ManagementServer.class, Integer.class, "vmware.additional.vnc.portrange.start", "50000", "Start port number of additional VNC port range", null),
|
||||
VmwareAdditionalVncPortRangeSize("Advanced", ManagementServer.class, Integer.class, "vmware.additional.vnc.portrange.size", "1000", "Start port number of additional VNC port range", null),
|
||||
//VmwareGuestNicDeviceType("Advanced", ManagementServer.class, String.class, "vmware.guest.nic.device.type", "E1000", "Ethernet card type used in guest VM, valid values are E1000, PCNet32, Vmxnet2, Vmxnet3", null),
|
||||
VmwareReserveCpu("Advanced", ManagementServer.class, Boolean.class, "vmware.reserve.cpu", "false", "Specify whether or not to reserve CPU based on CPU overprovisioning factor", null),
|
||||
VmwareReserveMem("Advanced", ManagementServer.class, Boolean.class, "vmware.reserve.mem", "false", "Specify whether or not to reserve memory based on memory overprovisioning factor", null),
|
||||
VmwareReserveCpu("Advanced", ManagementServer.class, Boolean.class, "vmware.reserve.cpu", "false", "Specify whether or not to reserve CPU when not overprovisioning, In case of cpu overprovisioning we will always reserve cpu", null, ConfigurationParameterScope.cluster.toString()),
|
||||
VmwareReserveMem("Advanced", ManagementServer.class, Boolean.class, "vmware.reserve.mem", "false", "Specify whether or not to reserve memory when not overprovisioning, In case of memory overprovisioning we will always reserve memory", null, ConfigurationParameterScope.cluster.toString()),
|
||||
VmwareRootDiskControllerType("Advanced", ManagementServer.class, String.class, "vmware.root.disk.controller", "ide", "Specify the default disk controller for root volumes, valid values are scsi, ide", null),
|
||||
VmwareSystemVmNicDeviceType("Advanced", ManagementServer.class, String.class, "vmware.systemvm.nic.device.type", "E1000", "Specify the default network device type for system VMs, valid values are E1000, PCNet32, Vmxnet2, Vmxnet3", null),
|
||||
VmwareRecycleHungWorker("Advanced", ManagementServer.class, Boolean.class, "vmware.recycle.hung.wokervm", "false", "Specify whether or not to recycle hung worker VMs", null),
|
||||
|
|
|
|||
Loading…
Reference in New Issue