From a29e39365aab0ff4024ff5ff4b04fccb7abc2885 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Thu, 16 May 2013 22:07:03 +0100 Subject: [PATCH 01/19] CLOUDSTACK-2545: Change unit of network statistics from 1000 to 1024 for KVM Signed-off-by: Chip Childers --- .../hypervisor/kvm/resource/LibvirtComputingResource.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index f3ae33365c3..c3140d3921a 100755 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -2842,7 +2842,7 @@ ServerResource { Pair nicStats = getNicStats(_publicBridgeName); HostStatsEntry hostStats = new HostStatsEntry(cmd.getHostId(), cpuUtil, - nicStats.first() / 1000, nicStats.second() / 1000, "host", + nicStats.first() / 1024, nicStats.second() / 1024, "host", totMem, freeMem, 0, 0); return new GetHostStatsAnswer(cmd, hostStats); } @@ -4561,10 +4561,10 @@ ServerResource { if (oldStats != null) { long deltarx = rx - oldStats._rx; if (deltarx > 0) - stats.setNetworkReadKBs(deltarx / 1000); + stats.setNetworkReadKBs(deltarx / 1024); long deltatx = tx - oldStats._tx; if (deltatx > 0) - stats.setNetworkWriteKBs(deltatx / 1000); + stats.setNetworkWriteKBs(deltatx / 1024); } vmStats newStat = new vmStats(); From 15be97772e1b41801867beef25ae66dfaf286458 Mon Sep 17 00:00:00 2001 From: Vijayendra Bhamidipati Date: Thu, 16 May 2013 08:15:22 -0700 Subject: [PATCH 02/19] PVLAN : Implementing PVLAN deployment capability for VMware deployments in cloudstack. --- .../com/cloud/agent/api/PlugNicCommand.java | 9 +- .../vmware/resource/VmwareResource.java | 121 +++++++++----- .../com/cloud/network/NetworkManagerImpl.java | 2 + ...VpcVirtualNetworkApplianceManagerImpl.java | 45 +++-- .../src/com/cloud/vm/UserVmManagerImpl.java | 5 +- .../vmware/mo/DistributedVirtualSwitchMO.java | 77 +++++++++ .../vmware/mo/HypervisorHostHelper.java | 156 ++++++++++++++++-- 7 files changed, 335 insertions(+), 80 deletions(-) diff --git a/core/src/com/cloud/agent/api/PlugNicCommand.java b/core/src/com/cloud/agent/api/PlugNicCommand.java index b896e4540cb..d10c6808a59 100644 --- a/core/src/com/cloud/agent/api/PlugNicCommand.java +++ b/core/src/com/cloud/agent/api/PlugNicCommand.java @@ -17,11 +17,13 @@ package com.cloud.agent.api; import com.cloud.agent.api.to.NicTO; +import com.cloud.vm.VirtualMachine; public class PlugNicCommand extends Command { NicTO nic; String instanceName; + VirtualMachine.Type vmType; public NicTO getNic() { return nic; @@ -35,12 +37,17 @@ public class PlugNicCommand extends Command { protected PlugNicCommand() { } - public PlugNicCommand(NicTO nic, String instanceName) { + public PlugNicCommand(NicTO nic, String instanceName, VirtualMachine.Type vmtype) { this.nic = nic; this.instanceName = instanceName; + this.vmType = vmtype; } public String getVmName() { return instanceName; } + + public VirtualMachine.Type getVMType() { + return vmType; + } } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 37ddaa14bd9..be4775452f3 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -16,6 +16,32 @@ // under the License. package com.cloud.hypervisor.vmware.resource; +import java.io.File; +import java.io.IOException; +import java.net.ConnectException; +import java.net.InetSocketAddress; +import java.net.URI; +import java.nio.channels.SocketChannel; +import java.rmi.RemoteException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.Date; +import java.util.GregorianCalendar; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.TimeZone; +import java.util.UUID; + +import javax.naming.ConfigurationException; + +import org.apache.log4j.Logger; +import org.apache.log4j.NDC; + import com.cloud.agent.IAgentControl; import com.cloud.agent.api.Answer; import com.cloud.agent.api.AttachIsoCommand; @@ -47,7 +73,6 @@ import com.cloud.agent.api.CreateVolumeFromSnapshotCommand; import com.cloud.agent.api.DeleteStoragePoolCommand; import com.cloud.agent.api.DeleteVMSnapshotAnswer; import com.cloud.agent.api.DeleteVMSnapshotCommand; -import com.cloud.agent.api.UnregisterVMCommand; import com.cloud.agent.api.GetDomRVersionAnswer; import com.cloud.agent.api.GetDomRVersionCmd; import com.cloud.agent.api.GetHostStatsAnswer; @@ -78,6 +103,7 @@ import com.cloud.agent.api.PlugNicCommand; import com.cloud.agent.api.PoolEjectCommand; import com.cloud.agent.api.PrepareForMigrationAnswer; import com.cloud.agent.api.PrepareForMigrationCommand; +import com.cloud.agent.api.PvlanSetupCommand; import com.cloud.agent.api.ReadyAnswer; import com.cloud.agent.api.ReadyCommand; import com.cloud.agent.api.RebootAnswer; @@ -85,8 +111,8 @@ import com.cloud.agent.api.RebootCommand; import com.cloud.agent.api.RebootRouterCommand; import com.cloud.agent.api.RevertToVMSnapshotAnswer; import com.cloud.agent.api.RevertToVMSnapshotCommand; -import com.cloud.agent.api.ScaleVmCommand; import com.cloud.agent.api.ScaleVmAnswer; +import com.cloud.agent.api.ScaleVmCommand; import com.cloud.agent.api.SetupAnswer; import com.cloud.agent.api.SetupCommand; import com.cloud.agent.api.SetupGuestNetworkAnswer; @@ -101,6 +127,7 @@ import com.cloud.agent.api.StopCommand; import com.cloud.agent.api.StoragePoolInfo; import com.cloud.agent.api.UnPlugNicAnswer; import com.cloud.agent.api.UnPlugNicCommand; +import com.cloud.agent.api.UnregisterVMCommand; import com.cloud.agent.api.UpgradeSnapshotCommand; import com.cloud.agent.api.ValidateSnapshotAnswer; import com.cloud.agent.api.ValidateSnapshotCommand; @@ -135,14 +162,14 @@ import com.cloud.agent.api.routing.VmDataCommand; import com.cloud.agent.api.routing.VpnUsersCfgCommand; import com.cloud.agent.api.storage.CopyVolumeAnswer; import com.cloud.agent.api.storage.CopyVolumeCommand; -import com.cloud.agent.api.storage.CreateVolumeOVACommand; -import com.cloud.agent.api.storage.CreateVolumeOVAAnswer; -import com.cloud.agent.api.storage.PrepareOVAPackingAnswer; -import com.cloud.agent.api.storage.PrepareOVAPackingCommand; import com.cloud.agent.api.storage.CreateAnswer; import com.cloud.agent.api.storage.CreateCommand; import com.cloud.agent.api.storage.CreatePrivateTemplateAnswer; +import com.cloud.agent.api.storage.CreateVolumeOVAAnswer; +import com.cloud.agent.api.storage.CreateVolumeOVACommand; import com.cloud.agent.api.storage.DestroyCommand; +import com.cloud.agent.api.storage.PrepareOVAPackingAnswer; +import com.cloud.agent.api.storage.PrepareOVAPackingCommand; import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer; import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand; import com.cloud.agent.api.storage.ResizeVolumeAnswer; @@ -250,30 +277,6 @@ import com.vmware.vim25.VirtualMachineGuestOsIdentifier; import com.vmware.vim25.VirtualMachinePowerState; import com.vmware.vim25.VirtualMachineRuntimeInfo; import com.vmware.vim25.VirtualSCSISharing; -import org.apache.log4j.Logger; -import org.apache.log4j.NDC; - -import javax.naming.ConfigurationException; -import java.io.File; -import java.io.IOException; -import java.net.ConnectException; -import java.net.InetSocketAddress; -import java.net.URI; -import java.nio.channels.SocketChannel; -import java.rmi.RemoteException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.TimeZone; -import java.util.UUID; public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService { @@ -495,6 +498,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return execute((UnregisterVMCommand) cmd); } else if (clz == ScaleVmCommand.class) { return execute((ScaleVmCommand) cmd); + } else if (clz == PvlanSetupCommand.class) { + return execute((PvlanSetupCommand) cmd); } else { answer = Answer.createUnsupportedCommandAnswer(cmd); } @@ -1037,7 +1042,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String domrGIP = cmd.getAccessDetail(NetworkElementCommand.ROUTER_GUEST_IP); String domrName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME); String gw = cmd.getAccessDetail(NetworkElementCommand.GUEST_NETWORK_GATEWAY); - String cidr = Long.toString(NetUtils.getCidrSize(nic.getNetmask()));; + String cidr = Long.toString(NetUtils.getCidrSize(nic.getNetmask())); String domainName = cmd.getNetworkDomain(); String dns = cmd.getDefaultDns1(); if (dns == null || dns.isEmpty()) { @@ -1376,7 +1381,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa NicTO nicTo = cmd.getNic(); VirtualDevice nic; - Pair networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, false); + Pair networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, false, cmd.getVMType());; if (VmwareHelper.isDvPortGroup(networkInfo.first())) { String dvSwitchUuid; ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); @@ -1643,7 +1648,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa vmMo.getRunningHost(), vlanId, null, null, this._ops_timeout, true); } else { networkInfo = HypervisorHostHelper.prepareNetwork(this._publicTrafficInfo.getVirtualSwitchName(), "cloud.public", - vmMo.getRunningHost(), vlanId, null, null, this._ops_timeout, vSwitchType, _portsPerDvPortGroup, null, false); + vmMo.getRunningHost(), vlanId, null, null, null, this._ops_timeout, vSwitchType, _portsPerDvPortGroup, null, false); } int nicIndex = allocPublicNicIndex(vmMo); @@ -2537,7 +2542,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa s_logger.info("Prepare NIC device based on NicTO: " + _gson.toJson(nicTo)); boolean configureVServiceInNexus = (nicTo.getType() == TrafficType.Guest) && (vmSpec.getDetails().containsKey("ConfigureVServiceInNexus")); - Pair networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, configureVServiceInNexus); + VirtualMachine.Type vmType = cmd.getVirtualMachine().getType(); + Pair networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, configureVServiceInNexus, vmType); if (VmwareHelper.isDvPortGroup(networkInfo.first())) { String dvSwitchUuid; ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); @@ -2719,16 +2725,28 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return poolMors; } + + private String getPvlanInfo(NicTO nicTo) { + if (nicTo.getBroadcastType() == BroadcastDomainType.Pvlan) { + return NetUtils.getIsolatedPvlanFromUri(nicTo.getBroadcastUri()); + } + return null; + } + private String getVlanInfo(NicTO nicTo, String defaultVlan) { if (nicTo.getBroadcastType() == BroadcastDomainType.Native) { return defaultVlan; } - - if (nicTo.getBroadcastType() == BroadcastDomainType.Vlan) { + if (nicTo.getBroadcastType() == BroadcastDomainType.Vlan || nicTo.getBroadcastType() == BroadcastDomainType.Pvlan) { if (nicTo.getBroadcastUri() != null) { + if (nicTo.getBroadcastType() == BroadcastDomainType.Vlan) + // For vlan, the broadcast uri is of the form vlan:// return nicTo.getBroadcastUri().getHost(); + else + // for pvlan, the broacast uri will be of the form pvlan://-i + return NetUtils.getPrimaryPvlanFromUri(nicTo.getBroadcastUri()); } else { - s_logger.warn("BroadcastType is not claimed as VLAN, but without vlan info in broadcast URI. Use vlan info from labeling: " + defaultVlan); + s_logger.warn("BroadcastType is not claimed as VLAN or PVLAN, but without vlan info in broadcast URI. Use vlan info from labeling: " + defaultVlan); return defaultVlan; } } @@ -2737,7 +2755,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return defaultVlan; } - private Pair prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo, boolean configureVServiceInNexus) throws Exception { + private Pair prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo, boolean configureVServiceInNexus, VirtualMachine.Type vmType) throws Exception { Pair switchName; TrafficType trafficType; VirtualSwitchType switchType; @@ -2761,12 +2779,22 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa s_logger.info("Prepare network on " + switchType + " " + switchName + " with name prefix: " + namePrefix); if (VirtualSwitchType.StandardVirtualSwitch == switchType) { - networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()), - nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout, + networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, + hostMo, getVlanInfo(nicTo, switchName.second()), nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout, !namePrefix.startsWith("cloud.private")); } else { - networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()), + String vlanId = getVlanInfo(nicTo, switchName.second()); + String svlanId = null; + boolean pvlannetwork = (getPvlanInfo(nicTo) == null)?false:true; + if (vmType != null && vmType.equals(VirtualMachine.Type.DomainRouter) && pvlannetwork) { + // plumb this network to the promiscuous vlan. + svlanId = vlanId; + } else { + // plumb this network to the isolated vlan. + svlanId = getPvlanInfo(nicTo); + } + networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, vlanId, svlanId, nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout, switchType, _portsPerDvPortGroup, nicTo.getGateway(), configureVServiceInNexus); } @@ -3253,7 +3281,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa NicTO[] nics = vm.getNics(); for (NicTO nic : nics) { // prepare network on the host - prepareNetworkFromNicInfo(new HostMO(getServiceContext(), _morHyperHost), nic, false); + prepareNetworkFromNicInfo(new HostMO(getServiceContext(), _morHyperHost), nic, false, cmd.getVirtualMachine().getType()); } String secStoreUrl = mgr.getSecondaryStorageStoreUrl(Long.parseLong(_dcId)); @@ -3917,6 +3945,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } + protected Answer execute(PvlanSetupCommand cmd) { + // Pvlan related operations are performed in the start/stop command paths + // for vmware. This function is implemented to support mgmt layer code + // that issue this command. Note that pvlan operations are supported only + // in Distributed Virtual Switch environments for vmware deployments. + return new Answer(cmd, true, "success"); + } + protected Answer execute(UnregisterVMCommand cmd){ if (s_logger.isInfoEnabled()) { s_logger.info("Executing resource UnregisterVMCommand: " + _gson.toJson(cmd)); @@ -4134,6 +4170,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } + @Override public CreateVolumeOVAAnswer execute(CreateVolumeOVACommand cmd) { if (s_logger.isInfoEnabled()) { s_logger.info("Executing resource CreateVolumeOVACommand: " + _gson.toJson(cmd)); diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java index 01a0384df5c..c58ef220ea1 100755 --- a/server/src/com/cloud/network/NetworkManagerImpl.java +++ b/server/src/com/cloud/network/NetworkManagerImpl.java @@ -3004,6 +3004,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L Random _rand = new Random(System.currentTimeMillis()); + @Override public List listVmNics(Long vmId, Long nicId) { List result = null; if (nicId == null) { @@ -3014,6 +3015,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L return result; } + @Override public String allocateGuestIP(Account ipOwner, boolean isSystem, long zoneId, Long networkId, String requestedIp) throws InsufficientAddressCapacityException { String ipaddr = null; diff --git a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java index 70073741433..9992b7ca01e 100644 --- a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java @@ -27,24 +27,6 @@ import java.util.TreeSet; import javax.ejb.Local; import javax.inject.Inject; -import com.cloud.network.vpc.NetworkACLItem; -import com.cloud.network.vpc.NetworkACLItemDao; -import com.cloud.network.vpc.NetworkACLItemVO; -import com.cloud.network.vpc.NetworkACLManager; -import com.cloud.network.vpc.PrivateGateway; -import com.cloud.network.vpc.PrivateIpAddress; -import com.cloud.network.vpc.PrivateIpVO; -import com.cloud.network.vpc.StaticRoute; -import com.cloud.network.vpc.StaticRouteProfile; -import com.cloud.network.vpc.Vpc; -import com.cloud.network.vpc.VpcGateway; -import com.cloud.network.vpc.VpcManager; -import com.cloud.network.vpc.VpcVO; -import com.cloud.network.vpc.dao.PrivateIpDao; -import com.cloud.network.vpc.dao.StaticRouteDao; -import com.cloud.network.vpc.dao.VpcDao; -import com.cloud.network.vpc.dao.VpcGatewayDao; -import com.cloud.network.vpc.dao.VpcOfferingDao; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -108,6 +90,24 @@ import com.cloud.network.dao.Site2SiteCustomerGatewayVO; import com.cloud.network.dao.Site2SiteVpnConnectionDao; import com.cloud.network.dao.Site2SiteVpnGatewayDao; import com.cloud.network.dao.Site2SiteVpnGatewayVO; +import com.cloud.network.vpc.NetworkACLItem; +import com.cloud.network.vpc.NetworkACLItemDao; +import com.cloud.network.vpc.NetworkACLItemVO; +import com.cloud.network.vpc.NetworkACLManager; +import com.cloud.network.vpc.PrivateGateway; +import com.cloud.network.vpc.PrivateIpAddress; +import com.cloud.network.vpc.PrivateIpVO; +import com.cloud.network.vpc.StaticRoute; +import com.cloud.network.vpc.StaticRouteProfile; +import com.cloud.network.vpc.Vpc; +import com.cloud.network.vpc.VpcGateway; +import com.cloud.network.vpc.VpcManager; +import com.cloud.network.vpc.VpcVO; +import com.cloud.network.vpc.dao.PrivateIpDao; +import com.cloud.network.vpc.dao.StaticRouteDao; +import com.cloud.network.vpc.dao.VpcDao; +import com.cloud.network.vpc.dao.VpcGatewayDao; +import com.cloud.network.vpc.dao.VpcOfferingDao; import com.cloud.network.vpn.Site2SiteVpnManager; import com.cloud.offering.NetworkOffering; import com.cloud.user.Account; @@ -127,7 +127,6 @@ import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.VirtualMachineProfile.Param; import com.cloud.vm.dao.VMInstanceDao; - @Component @Local(value = {VpcVirtualNetworkApplianceManager.class, VpcVirtualNetworkApplianceService.class}) public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplianceManagerImpl implements VpcVirtualNetworkApplianceManager{ @@ -339,7 +338,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian DomainRouterVO router = _routerDao.findById(vm.getId()); if (router.getState() == State.Running) { try { - PlugNicCommand plugNicCmd = new PlugNicCommand(nic, vm.getName()); + PlugNicCommand plugNicCmd = new PlugNicCommand(nic, vm.getName(), vm.getType()); Commands cmds = new Commands(OnError.Stop); cmds.addCommand("plugnic", plugNicCmd); @@ -748,7 +747,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian // if (rule.getSourceCidrList() == null && (rule.getPurpose() == Purpose.Firewall || rule.getPurpose() == Purpose.NetworkACL)) { // _firewallDao.loadSourceCidrs((FirewallRuleVO)rule); // } - NetworkACLTO ruleTO = new NetworkACLTO((NetworkACLItemVO)rule, guestVlan, rule.getTrafficType()); + NetworkACLTO ruleTO = new NetworkACLTO(rule, guestVlan, rule.getTrafficType()); rulesTO.add(ruleTO); } } @@ -828,7 +827,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian _routerDao.update(routerVO.getId(), routerVO); } } - PlugNicCommand plugNicCmd = new PlugNicCommand(getNicTO(router, publicNic.getNetworkId(), publicNic.getBroadcastUri().toString()), router.getInstanceName()); + PlugNicCommand plugNicCmd = new PlugNicCommand(getNicTO(router, publicNic.getNetworkId(), publicNic.getBroadcastUri().toString()), router.getInstanceName(), router.getType()); cmds.addCommand(plugNicCmd); VpcVO vpc = _vpcDao.findById(router.getVpcId()); NetworkUsageCommand netUsageCmd = new NetworkUsageCommand(router.getPrivateIpAddress(), router.getInstanceName(), true, publicNic.getIp4Address(), vpc.getCidr()); @@ -851,7 +850,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian for (Pair nicNtwk : guestNics) { Nic guestNic = nicNtwk.first(); //plug guest nic - PlugNicCommand plugNicCmd = new PlugNicCommand(getNicTO(router, guestNic.getNetworkId(), null), router.getInstanceName()); + PlugNicCommand plugNicCmd = new PlugNicCommand(getNicTO(router, guestNic.getNetworkId(), null), router.getInstanceName(), router.getType()); cmds.addCommand(plugNicCmd); if (!_networkModel.isPrivateGateway(guestNic)) { diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index 7416cae61de..aa065294f8c 100755 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -2898,6 +2898,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use originalIp = nic.getIp4Address(); guestNic = nic; guestNetwork = network; + // In vmware, we will be effecting pvlan settings in portgroups in StartCommand. + if (profile.getHypervisorType() != HypervisorType.VMware) { if (nic.getBroadcastUri().getScheme().equals("pvlan")) { if (!setupVmForPvlan(true, hostId, nic)) { return false; @@ -2905,6 +2907,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } } } + } boolean ipChanged = false; if (originalIp != null && !originalIp.equalsIgnoreCase(returnedIp)) { if (returnedIp != null && guestNic != null) { @@ -4336,7 +4339,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use UserVmVO vmVO = _vmDao.findById(vm.getId()); if (vmVO.getState() == State.Running) { try { - PlugNicCommand plugNicCmd = new PlugNicCommand(nic,vm.getName()); + PlugNicCommand plugNicCmd = new PlugNicCommand(nic,vm.getName(), vm.getType()); Commands cmds = new Commands(OnError.Stop); cmds.addCommand("plugnic",plugNicCmd); _agentMgr.send(dest.getHost().getId(),cmds); diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/DistributedVirtualSwitchMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/DistributedVirtualSwitchMO.java index 247be2a5fab..b00b97ca3ae 100644 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/DistributedVirtualSwitchMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/DistributedVirtualSwitchMO.java @@ -17,13 +17,20 @@ package com.cloud.hypervisor.vmware.mo; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.log4j.Logger; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.vmware.vim25.DVPortgroupConfigSpec; +import com.vmware.vim25.DVSConfigInfo; import com.vmware.vim25.ManagedObjectReference; +import com.vmware.vim25.TaskInfo; +import com.vmware.vim25.VMwareDVSConfigInfo; +import com.vmware.vim25.VMwareDVSConfigSpec; +import com.vmware.vim25.VMwareDVSPvlanMapEntry; public class DistributedVirtualSwitchMO extends BaseMO { private static final Logger s_logger = Logger.getLogger(DistributedVirtualSwitchMO.class); @@ -46,4 +53,74 @@ public class DistributedVirtualSwitchMO extends BaseMO { // TODO(sateesh): Update numPorts _context.getService().reconfigureDVPortgroupTask(dvPortGroupMor, dvPortGroupSpec); } + + public void updateVMWareDVSwitch(ManagedObjectReference dvSwitchMor, VMwareDVSConfigSpec dvsSpec) throws Exception { + _context.getService().reconfigureDvsTask(dvSwitchMor, dvsSpec); + } + + public TaskInfo updateVMWareDVSwitchGetTask(ManagedObjectReference dvSwitchMor, VMwareDVSConfigSpec dvsSpec) throws Exception { + ManagedObjectReference task = _context.getService().reconfigureDvsTask(dvSwitchMor, dvsSpec); + TaskInfo info = (TaskInfo) (_context.getVimClient().getDynamicProperty(task, "info")); + boolean waitvalue = _context.getVimClient().waitForTask(task); + return info; + } + + public String getDVSConfigVersion(ManagedObjectReference dvSwitchMor) throws Exception { + assert (dvSwitchMor != null); + DVSConfigInfo dvsConfigInfo = (DVSConfigInfo)_context.getVimClient().getDynamicProperty(dvSwitchMor, "config"); + return dvsConfigInfo.getConfigVersion(); + } + + public Map retrieveVlanPvlan(int vlanid, int secondaryvlanid, ManagedObjectReference dvSwitchMor) throws Exception { + assert (dvSwitchMor != null); + + Map result = new HashMap(); + + VMwareDVSConfigInfo configinfo = (VMwareDVSConfigInfo)_context.getVimClient().getDynamicProperty(dvSwitchMor, "config"); + List pvlanconfig = null; + pvlanconfig = configinfo.getPvlanConfig(); + + if (null == pvlanconfig || 0 == pvlanconfig.size()) { + return result; + } + // Iterate through the pvlanMapList and check if the specified vlan id and pvlan id exist. If they do, set the fields in result accordingly. + + for (VMwareDVSPvlanMapEntry mapEntry : pvlanconfig) { + int entryVlanid = mapEntry.getPrimaryVlanId(); + int entryPvlanid = mapEntry.getSecondaryVlanId(); + if (entryVlanid == entryPvlanid) { + // promiscuous + if (vlanid == entryVlanid) { + // pvlan type will always be promiscuous in this case. + result.put(vlanid, HypervisorHostHelper.PvlanType.valueOf(mapEntry.getPvlanType())); + } else if ((vlanid != secondaryvlanid) && secondaryvlanid == entryVlanid) { + result.put(secondaryvlanid, HypervisorHostHelper.PvlanType.valueOf(mapEntry.getPvlanType())); + } + } else { + if (vlanid == entryVlanid) { + // vlan id in entry is promiscuous + result.put(vlanid, HypervisorHostHelper.PvlanType.promiscuous); + } else if (vlanid == entryPvlanid) { + result.put(vlanid, HypervisorHostHelper.PvlanType.valueOf(mapEntry.getPvlanType())); + } + if ((vlanid != secondaryvlanid) && secondaryvlanid == entryVlanid) { + //promiscuous + result.put(secondaryvlanid, HypervisorHostHelper.PvlanType.promiscuous); + } else if (secondaryvlanid == entryPvlanid) { + result.put(secondaryvlanid, HypervisorHostHelper.PvlanType.valueOf(mapEntry.getPvlanType())); + } + + } + // If we already know that the vlanid is being used as a non primary vlan, it's futile to + // go over the entire list. Return. + if (result.containsKey(vlanid) && result.get(vlanid) != HypervisorHostHelper.PvlanType.promiscuous) + return result; + + // If we've already found both vlanid and pvlanid, we have enough info to make a decision. Return. + if (result.containsKey(vlanid) && result.containsKey(secondaryvlanid)) + return result; + } + return result; + } + } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java index 7f323c5e400..20f84784157 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java @@ -39,6 +39,7 @@ import com.cloud.utils.cisco.n1kv.vsm.VsmCommand.SwitchPortMode; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; +import com.vmware.vim25.AlreadyExistsFaultMsg; import com.vmware.vim25.BoolPolicy; import com.vmware.vim25.DVPortSetting; import com.vmware.vim25.DVPortgroupConfigInfo; @@ -59,7 +60,11 @@ import com.vmware.vim25.ObjectContent; import com.vmware.vim25.OvfCreateImportSpecParams; import com.vmware.vim25.OvfCreateImportSpecResult; import com.vmware.vim25.OvfFileItem; +import com.vmware.vim25.TaskInfo; +import com.vmware.vim25.VMwareDVSConfigSpec; import com.vmware.vim25.VMwareDVSPortSetting; +import com.vmware.vim25.VMwareDVSPvlanConfigSpec; +import com.vmware.vim25.VMwareDVSPvlanMapEntry; import com.vmware.vim25.VirtualDeviceConfigSpec; import com.vmware.vim25.VirtualDeviceConfigSpecOperation; import com.vmware.vim25.VirtualLsiLogicController; @@ -67,6 +72,7 @@ import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VirtualMachineFileInfo; import com.vmware.vim25.VirtualMachineVideoCard; import com.vmware.vim25.VirtualSCSISharing; +import com.vmware.vim25.VmwareDistributedVirtualSwitchPvlanSpec; import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanIdSpec; import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanSpec; @@ -124,12 +130,17 @@ public class HypervisorHostHelper { } } - public static String composeCloudNetworkName(String prefix, String vlanId, Integer networkRateMbps, String vSwitchName) { + public static String composeCloudNetworkName(String prefix, String vlanId, String svlanId, Integer networkRateMbps, String vSwitchName) { StringBuffer sb = new StringBuffer(prefix); - if(vlanId == null || UNTAGGED_VLAN_NAME.equalsIgnoreCase(vlanId)) + if(vlanId == null || UNTAGGED_VLAN_NAME.equalsIgnoreCase(vlanId)) { sb.append(".untagged"); - else + } else { sb.append(".").append(vlanId); + if (svlanId != null) { + sb.append(".").append("s" + svlanId); + } + + } if(networkRateMbps != null && networkRateMbps.intValue() > 0) sb.append(".").append(String.valueOf(networkRateMbps)); @@ -412,7 +423,7 @@ public class HypervisorHostHelper { */ public static Pair prepareNetwork(String physicalNetwork, String namePrefix, - HostMO hostMo, String vlanId, Integer networkRateMbps, Integer networkRateMulticastMbps, long timeOutMs, + HostMO hostMo, String vlanId, String secondaryvlanId, Integer networkRateMbps, Integer networkRateMulticastMbps, long timeOutMs, VirtualSwitchType vSwitchType, int numPorts, String gateway, boolean configureVServiceInNexus) throws Exception { ManagedObjectReference morNetwork = null; VmwareContext context = hostMo.getContext(); @@ -428,20 +439,28 @@ public class HypervisorHostHelper { boolean createGCTag = false; String networkName; Integer vid = null; + Integer spvlanid = null; // secondary pvlan id if(vlanId != null && !UNTAGGED_VLAN_NAME.equalsIgnoreCase(vlanId)) { createGCTag = true; vid = Integer.parseInt(vlanId); } - networkName = composeCloudNetworkName(namePrefix, vlanId, networkRateMbps, physicalNetwork); + if (secondaryvlanId != null) { + spvlanid = Integer.parseInt(secondaryvlanId); + } + networkName = composeCloudNetworkName(namePrefix, vlanId, secondaryvlanId, networkRateMbps, physicalNetwork); if (vSwitchType == VirtualSwitchType.VMwareDistributedVirtualSwitch) { + VMwareDVSConfigSpec dvsSpec = null; DVSTrafficShapingPolicy shapingPolicy; - VmwareDistributedVirtualSwitchVlanSpec vlanSpec; + VmwareDistributedVirtualSwitchVlanSpec vlanSpec = null; + VmwareDistributedVirtualSwitchPvlanSpec pvlanSpec = null; + //VMwareDVSPvlanConfigSpec pvlanSpec = null; DVSSecurityPolicy secPolicy; VMwareDVSPortSetting dvsPortSetting; DVPortgroupConfigSpec dvPortGroupSpec; DVPortgroupConfigInfo dvPortgroupInfo; + //DVSConfigInfo dvsInfo; dvSwitchName = physicalNetwork; // TODO(sateesh): Remove this after ensuring proper default value for vSwitchName throughout traffic types @@ -462,13 +481,95 @@ public class HypervisorHostHelper { dvSwitchMo = new DistributedVirtualSwitchMO(context, morDvSwitch); shapingPolicy = getDVSShapingPolicy(networkRateMbps); - if (vid != null) { - vlanSpec = createDVPortVlanIdSpec(vid); - } else { - vlanSpec = createDVPortVlanSpec(); - } secPolicy = createDVSSecurityPolicy(); + + // First, if both vlan id and pvlan id are provided, we need to + // reconfigure the DVSwitch to have a tuple of + // type isolated. + if (vid != null && spvlanid != null) { + // First check if the vlan/pvlan pair already exists on this dvswitch. + + Map vlanmap = dvSwitchMo.retrieveVlanPvlan(vid, spvlanid, morDvSwitch); + if (vlanmap.size() != 0) { + // Then either vid or pvlanid or both are already being used. + if (vlanmap.containsKey(vid) && vlanmap.get(vid) != HypervisorHostHelper.PvlanType.promiscuous) { + // This VLAN ID is already setup as a non-promiscuous vlan id on the DVS. Throw an exception. + String msg = "VLAN ID " + vid + " is already in use as a " + vlanmap.get(vid).toString() + " VLAN on the DVSwitch"; + s_logger.error(msg); + throw new Exception(msg); + } + if ((vid != spvlanid) && vlanmap.containsKey(spvlanid) && vlanmap.get(spvlanid) != HypervisorHostHelper.PvlanType.isolated) { + // This PVLAN ID is already setup as a non-isolated vlan id on the DVS. Throw an exception. + String msg = "PVLAN ID " + spvlanid + " is already in use as a " + vlanmap.get(spvlanid).toString() + " VLAN in the DVSwitch"; + s_logger.error(msg); + throw new Exception(msg); + } + } + + // First create a DVSconfig spec. + dvsSpec = new VMwareDVSConfigSpec(); + // Next, add the required primary and secondary vlan config specs to the dvs config spec. + if (!vlanmap.containsKey(vid)) { + VMwareDVSPvlanConfigSpec ppvlanConfigSpec = createDVPortPvlanConfigSpec(vid, vid, PvlanType.promiscuous, PvlanOperation.add); + dvsSpec.getPvlanConfigSpec().add(ppvlanConfigSpec); + } + if ( !vid.equals(spvlanid) && !vlanmap.containsKey(spvlanid)) { + VMwareDVSPvlanConfigSpec spvlanConfigSpec = createDVPortPvlanConfigSpec(vid, spvlanid, PvlanType.isolated, PvlanOperation.add); + dvsSpec.getPvlanConfigSpec().add(spvlanConfigSpec); + } + + if (dvsSpec.getPvlanConfigSpec().size() > 0) { + // We have something to configure on the DVS... so send it the command. + // When reconfiguring a vmware DVSwitch, we need to send in the configVersion in the spec. + // Let's retrieve this switch's configVersion first. + String dvsConfigVersion = dvSwitchMo.getDVSConfigVersion(morDvSwitch); + dvsSpec.setConfigVersion(dvsConfigVersion); + // Reconfigure the dvs using this spec. + + try { + TaskInfo reconfigTask = dvSwitchMo.updateVMWareDVSwitchGetTask(morDvSwitch, dvsSpec); + } catch (Exception e) { + if(e instanceof AlreadyExistsFaultMsg) { + s_logger.info("Specified vlan id (" + vid + ") private vlan id (" + spvlanid + ") tuple already configured on VMWare DVSwitch"); + // Do nothing, good if the tuple's already configured on the dvswitch. + } else { + // Rethrow the exception + s_logger.error("Failed to configure vlan/pvlan tuple on VMware DVSwitch: " + vid + "/" + spvlanid + ", failure message: " + e.getMessage()); + e.printStackTrace(); + throw e; + } + } + } + // Else the vlan/pvlan pair already exists on the DVSwitch, and we needn't configure it again. + } + + // Next, create the port group. For this, we need to create a VLAN spec. + if (vid == null) { + vlanSpec = createDVPortVlanSpec(); + } else { + if (spvlanid == null) { + // Create vlan spec. + vlanSpec = createDVPortVlanIdSpec(vid); + } else { + // Create a pvlan spec. The pvlan spec is different from the pvlan config spec + // that we created earlier. The pvlan config spec is used to configure the switch + // with a tuple. The pvlan spec is used + // to configure a port group (i.e., a network) with a secondary vlan id. We don't + // need to mention more than the secondary vlan id because one secondary vlan id + // can be associated with only one primary vlan id. Give vCenter the secondary vlan id, + // and it will find out the associated primary vlan id and do the rest of the + // port group configuration. + pvlanSpec = createDVPortPvlanIdSpec(spvlanid); + } + } + + // NOTE - VmwareDistributedVirtualSwitchPvlanSpec extends VmwareDistributedVirtualSwitchVlanSpec. + if (pvlanSpec != null) { + dvsPortSetting = createVmwareDVPortSettingSpec(shapingPolicy, secPolicy, pvlanSpec); + } else { dvsPortSetting = createVmwareDVPortSettingSpec(shapingPolicy, secPolicy, vlanSpec); + } + dvPortGroupSpec = createDvPortGroupSpec(networkName, dvsPortSetting, numPorts); if (!dataCenterMo.hasDvPortGroup(networkName)) { @@ -627,7 +728,6 @@ public class HypervisorHostHelper { dvsPortSetting.setSecurityPolicy(secPolicy); dvsPortSetting.setInShapingPolicy(shapingPolicy); dvsPortSetting.setOutShapingPolicy(shapingPolicy); - return dvsPortSetting; } @@ -658,6 +758,35 @@ public class HypervisorHostHelper { return shapingPolicy; } + public static VmwareDistributedVirtualSwitchPvlanSpec createDVPortPvlanIdSpec(int pvlanId) { + VmwareDistributedVirtualSwitchPvlanSpec pvlanIdSpec = new VmwareDistributedVirtualSwitchPvlanSpec(); + pvlanIdSpec.setPvlanId(pvlanId); + return pvlanIdSpec; + } + + public enum PvlanOperation { + add, + edit, + remove + } + + public enum PvlanType { + promiscuous, + isolated, + community, // We don't use Community + } + + public static VMwareDVSPvlanConfigSpec createDVPortPvlanConfigSpec(int vlanId, int secondaryVlanId, PvlanType pvlantype, PvlanOperation operation) { + VMwareDVSPvlanConfigSpec pvlanConfigSpec = new VMwareDVSPvlanConfigSpec(); + VMwareDVSPvlanMapEntry map = new VMwareDVSPvlanMapEntry(); + map.setPvlanType(pvlantype.toString()); + map.setPrimaryVlanId(vlanId); + map.setSecondaryVlanId(secondaryVlanId); + pvlanConfigSpec.setPvlanEntry(map); + + pvlanConfigSpec.setOperation(operation.toString()); + return pvlanConfigSpec; + } public static VmwareDistributedVirtualSwitchVlanIdSpec createDVPortVlanIdSpec(int vlanId) { VmwareDistributedVirtualSwitchVlanIdSpec vlanIdSpec = new VmwareDistributedVirtualSwitchVlanIdSpec(); vlanIdSpec.setVlanId(vlanId); @@ -706,7 +835,7 @@ public class HypervisorHostHelper { vid = Integer.parseInt(vlanId); } - networkName = composeCloudNetworkName(namePrefix, vlanId, networkRateMbps, vSwitchName); + networkName = composeCloudNetworkName(namePrefix, vlanId, null, networkRateMbps, vSwitchName); HostNetworkSecurityPolicy secPolicy = null; if (namePrefix.equalsIgnoreCase("cloud.private")) { secPolicy = new HostNetworkSecurityPolicy(); @@ -1036,6 +1165,7 @@ public class HypervisorHostHelper { context.uploadVmdkFile(ovfFileItem.isCreate() ? "PUT" : "POST", urlToPost, absoluteFile, bytesAlreadyWritten, new ActionDelegate () { + @Override public void action(Long param) { progressReporter.reportProgress((int)(param * 100 / totalBytes)); } From a2eb7bab1ef67de6589053ccd45515cf410d187c Mon Sep 17 00:00:00 2001 From: Prachi Damle Date: Thu, 16 May 2013 14:53:46 -0700 Subject: [PATCH 03/19] CLOUDSTACK-2056: DeploymentPlanner choice via ServiceOffering - Changes merged from planner_reserve branch - Exposing deploymentplanner as an optional parameter while creating a service offering - changes to DeploymentPlanningManagerImpl to make sure host reserve-release happens between conflicting planner usages. --- .../deploy/DeploymentClusterPlanner.java | 45 + .../com/cloud/deploy/DeploymentPlanner.java | 26 +- api/src/com/cloud/event/EventTypes.java | 2 +- .../com/cloud/offering/ServiceOffering.java | 2 + .../com/cloud/resource/ResourceService.java | 4 +- .../com/cloud/server/ManagementService.java | 2 + .../apache/cloudstack/api/ApiConstants.java | 1 + .../config/ListDeploymentPlannersCmd.java | 71 ++ .../admin/host/ReleaseHostReservationCmd.java | 105 ++ .../offering/CreateServiceOfferingCmd.java | 6 + .../api/response/ServiceOfferingResponse.java | 12 + client/tomcatconf/applicationContext.xml.in | 28 +- client/tomcatconf/commands.properties.in | 2 + .../cloud/migration/ServiceOffering21VO.java | 5 + .../com/cloud/service/ServiceOfferingVO.java | 17 + .../cloud/upgrade/dao/Upgrade410to420.java | 60 +- .../src/com/cloud/vm/dao/VMInstanceDao.java | 28 +- .../com/cloud/vm/dao/VMInstanceDaoImpl.java | 147 ++- .../ClusterScopeStoragePoolAllocator.java | 18 +- .../allocator/LocalStoragePoolAllocator.java | 21 +- .../ZoneWideStoragePoolAllocator.java | 25 +- .../deploy/UserConcentratedPodPlanner.java | 34 +- .../cloud/deploy/UserDispersingPlanner.java | 14 +- .../manager/BaremetalPlannerSelector.java | 39 - server/pom.xml | 5 + .../allocator/impl/FirstFitAllocator.java | 110 +- .../query/dao/ServiceOfferingJoinDaoImpl.java | 1 + .../api/query/vo/ServiceOfferingJoinVO.java | 11 + .../cloud/capacity/CapacityManagerImpl.java | 38 + .../src/com/cloud/configuration/Config.java | 3 + .../configuration/ConfigurationManager.java | 3 +- .../ConfigurationManagerImpl.java | 162 +-- .../deploy/AbstractDeployPlannerSelector.java | 84 -- .../deploy/DeploymentPlanningManagerImpl.java | 1032 ++++++++++++++++- .../src/com/cloud/deploy/FirstFitPlanner.java | 617 ++-------- .../deploy/HypervisorVmPlannerSelector.java | 54 - .../deploy/PlannerHostReservationVO.java | 117 ++ .../PlannerHostReservationDao.java} | 54 +- .../dao/PlannerHostReservationDaoImpl.java | 63 + .../cloud/resource/ResourceManagerImpl.java | 43 + .../cloud/server/ManagementServerImpl.java | 125 +- .../src/com/cloud/vm/UserVmManagerImpl.java | 26 +- .../resource/MockResourceManagerImpl.java | 6 + .../vm/DeploymentPlanningManagerImplTest.java | 359 ++++++ .../vpc/MockConfigurationManagerImpl.java | 2 +- .../ChildTestConfiguration.java | 72 +- server/test/resources/affinityContext.xml | 42 + setup/db/db/schema-410to420.sql | 61 +- ...ploy_vms_with_varied_deploymentplanners.py | 164 +++ tools/apidoc/gen_toc.py | 1 + 50 files changed, 2878 insertions(+), 1091 deletions(-) create mode 100644 api/src/com/cloud/deploy/DeploymentClusterPlanner.java create mode 100644 api/src/org/apache/cloudstack/api/command/admin/config/ListDeploymentPlannersCmd.java create mode 100644 api/src/org/apache/cloudstack/api/command/admin/host/ReleaseHostReservationCmd.java delete mode 100755 plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalPlannerSelector.java delete mode 100755 server/src/com/cloud/deploy/AbstractDeployPlannerSelector.java delete mode 100755 server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java create mode 100644 server/src/com/cloud/deploy/PlannerHostReservationVO.java rename server/src/com/cloud/deploy/{DeployPlannerSelector.java => dao/PlannerHostReservationDao.java} (67%) mode change 100755 => 100644 create mode 100644 server/src/com/cloud/deploy/dao/PlannerHostReservationDaoImpl.java create mode 100644 server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java create mode 100644 server/test/resources/affinityContext.xml create mode 100644 test/integration/smoke/test_deploy_vms_with_varied_deploymentplanners.py diff --git a/api/src/com/cloud/deploy/DeploymentClusterPlanner.java b/api/src/com/cloud/deploy/DeploymentClusterPlanner.java new file mode 100644 index 00000000000..1a19c71dbfa --- /dev/null +++ b/api/src/com/cloud/deploy/DeploymentClusterPlanner.java @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.deploy; + +import java.util.List; + +import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; + +/** + */ +public interface DeploymentClusterPlanner extends DeploymentPlanner { + /** + * This is called to determine list of possible clusters where a virtual + * machine can be deployed. + * + * @param vm + * virtual machine. + * @param plan + * deployment plan that tells you where it's being deployed to. + * @param avoid + * avoid these data centers, pods, clusters, or hosts. + * @return DeployDestination for that virtual machine. + */ + List orderClusters(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) + throws InsufficientServerCapacityException; + + PlannerResourceUsage getResourceUsage(); + +} diff --git a/api/src/com/cloud/deploy/DeploymentPlanner.java b/api/src/com/cloud/deploy/DeploymentPlanner.java index 537dd314733..eb56a591f6b 100644 --- a/api/src/com/cloud/deploy/DeploymentPlanner.java +++ b/api/src/com/cloud/deploy/DeploymentPlanner.java @@ -35,6 +35,7 @@ import com.cloud.vm.VirtualMachineProfile; /** */ public interface DeploymentPlanner extends Adapter { + /** * plan is called to determine where a virtual machine should be running. * @@ -46,6 +47,7 @@ public interface DeploymentPlanner extends Adapter { * avoid these data centers, pods, clusters, or hosts. * @return DeployDestination for that virtual machine. */ + @Deprecated DeployDestination plan(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException; /** @@ -88,6 +90,10 @@ public interface DeploymentPlanner extends Adapter { userconcentratedpod_firstfit; } + public enum PlannerResourceUsage { + Shared, Dedicated; + } + public static class ExcludeList { private Set _dcIds; private Set _podIds; @@ -99,10 +105,22 @@ public interface DeploymentPlanner extends Adapter { } public ExcludeList(Set _dcIds, Set _podIds, Set _clusterIds, Set _hostIds, Set _poolIds) { - this._dcIds = _dcIds; - this._podIds = _podIds; - this._clusterIds = _clusterIds; - this._poolIds = _poolIds; + if (_dcIds != null) { + this._dcIds = new HashSet(_dcIds); + } + if (_podIds != null) { + this._podIds = new HashSet(_podIds); + } + if (_clusterIds != null) { + this._clusterIds = new HashSet(_clusterIds); + } + + if (_hostIds != null) { + this._hostIds = new HashSet(_hostIds); + } + if (_poolIds != null) { + this._poolIds = new HashSet(_poolIds); + } } public boolean add(InsufficientCapacityException e) { diff --git a/api/src/com/cloud/event/EventTypes.java b/api/src/com/cloud/event/EventTypes.java index e317a8fbf1c..ee7f5b7d89f 100755 --- a/api/src/com/cloud/event/EventTypes.java +++ b/api/src/com/cloud/event/EventTypes.java @@ -423,6 +423,7 @@ public class EventTypes { public static final String EVENT_INTERNAL_LB_VM_START = "INTERNALLBVM.START"; public static final String EVENT_INTERNAL_LB_VM_STOP = "INTERNALLBVM.STOP"; + public static final String EVENT_HOST_RESERVATION_RELEASE = "HOST.RESERVATION.RELEASE"; // Dedicated guest vlan range public static final String EVENT_GUEST_VLAN_RANGE_DEDICATE = "GUESTVLANRANGE.DEDICATE"; public static final String EVENT_DEDICATED_GUEST_VLAN_RANGE_RELEASE = "GUESTVLANRANGE.RELEASE"; @@ -728,7 +729,6 @@ public class EventTypes { entityEventDetails.put(EVENT_AUTOSCALEVMGROUP_UPDATE, AutoScaleVmGroup.class.getName()); entityEventDetails.put(EVENT_AUTOSCALEVMGROUP_ENABLE, AutoScaleVmGroup.class.getName()); entityEventDetails.put(EVENT_AUTOSCALEVMGROUP_DISABLE, AutoScaleVmGroup.class.getName()); - entityEventDetails.put(EVENT_GUEST_VLAN_RANGE_DEDICATE, GuestVlan.class.getName()); entityEventDetails.put(EVENT_DEDICATED_GUEST_VLAN_RANGE_RELEASE, GuestVlan.class.getName()); } diff --git a/api/src/com/cloud/offering/ServiceOffering.java b/api/src/com/cloud/offering/ServiceOffering.java index 165369c5e9b..45d5f38952b 100755 --- a/api/src/com/cloud/offering/ServiceOffering.java +++ b/api/src/com/cloud/offering/ServiceOffering.java @@ -108,4 +108,6 @@ public interface ServiceOffering extends InfrastructureEntity, InternalIdentity, boolean getDefaultUse(); String getSystemVmType(); + + String getDeploymentPlanner(); } diff --git a/api/src/com/cloud/resource/ResourceService.java b/api/src/com/cloud/resource/ResourceService.java index 08e2585d1a7..ce0df635bfe 100755 --- a/api/src/com/cloud/resource/ResourceService.java +++ b/api/src/com/cloud/resource/ResourceService.java @@ -100,11 +100,13 @@ public interface ResourceService { Swift discoverSwift(AddSwiftCmd addSwiftCmd) throws DiscoveryException; S3 discoverS3(AddS3Cmd cmd) throws DiscoveryException; - + List getSupportedHypervisorTypes(long zoneId, boolean forVirtualRouter, Long podId); Pair, Integer> listSwifts(ListSwiftsCmd cmd); List listS3s(ListS3sCmd cmd); + boolean releaseHostReservation(Long hostId); + } diff --git a/api/src/com/cloud/server/ManagementService.java b/api/src/com/cloud/server/ManagementService.java index 22494072648..59b83c9bbce 100755 --- a/api/src/com/cloud/server/ManagementService.java +++ b/api/src/com/cloud/server/ManagementService.java @@ -419,5 +419,7 @@ public interface ManagementService { * @return List of capacities */ List listTopConsumedResources(ListCapacityCmd cmd); + + List listDeploymentPlanners(); } diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java index f09a00b98f8..8d7739c13e1 100755 --- a/api/src/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/org/apache/cloudstack/api/ApiConstants.java @@ -496,6 +496,7 @@ public class ApiConstants { public static final String AFFINITY_GROUP_NAMES = "affinitygroupnames"; public static final String ASA_INSIDE_PORT_PROFILE = "insideportprofile"; public static final String AFFINITY_GROUP_ID = "affinitygroupid"; + public static final String DEPLOYMENT_PLANNER = "deploymentplanner"; public static final String ACL_ID = "aclid"; public static final String NUMBER = "number"; diff --git a/api/src/org/apache/cloudstack/api/command/admin/config/ListDeploymentPlannersCmd.java b/api/src/org/apache/cloudstack/api/command/admin/config/ListDeploymentPlannersCmd.java new file mode 100644 index 00000000000..598b620c301 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/admin/config/ListDeploymentPlannersCmd.java @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.config; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.response.DeploymentPlannersResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.log4j.Logger; + +@APICommand(name = "listDeploymentPlanners", description = "Lists all DeploymentPlanners available.", responseObject = DeploymentPlannersResponse.class) +public class ListDeploymentPlannersCmd extends BaseListCmd { + public static final Logger s_logger = Logger.getLogger(ListDeploymentPlannersCmd.class.getName()); + + private static final String s_name = "listdeploymentplannersresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public void execute(){ + List planners = _mgr.listDeploymentPlanners(); + ListResponse response = new ListResponse(); + List plannerResponses = new ArrayList(); + + for (String planner : planners) { + DeploymentPlannersResponse plannerResponse = new DeploymentPlannersResponse(); + plannerResponse.setName(planner); + plannerResponse.setObjectName("deploymentPlanner"); + plannerResponses.add(plannerResponse); + } + + response.setResponses(plannerResponses); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + + } +} diff --git a/api/src/org/apache/cloudstack/api/command/admin/host/ReleaseHostReservationCmd.java b/api/src/org/apache/cloudstack/api/command/admin/host/ReleaseHostReservationCmd.java new file mode 100644 index 00000000000..d09cf38cc50 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/admin/host/ReleaseHostReservationCmd.java @@ -0,0 +1,105 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.host; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.log4j.Logger; + +import com.cloud.async.AsyncJob; +import com.cloud.event.EventTypes; +import com.cloud.user.Account; +import com.cloud.user.UserContext; + +@APICommand(name = "releaseHostReservation", description = "Releases host reservation.", responseObject = SuccessResponse.class) +public class ReleaseHostReservationCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(ReleaseHostReservationCmd.class.getName()); + + private static final String s_name = "releasehostreservationresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=HostResponse.class, + required=true, description="the host ID") + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + Account account = UserContext.current().getCaller(); + if (account != null) { + return account.getId(); + } + + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_HOST_RESERVATION_RELEASE; + } + + @Override + public String getEventDescription() { + return "releasing reservation for host: " + getId(); + } + + @Override + public AsyncJob.Type getInstanceType() { + return AsyncJob.Type.Host; + } + + @Override + public Long getInstanceId() { + return getId(); + } + + @Override + public void execute(){ + boolean result = _resourceService.releaseHostReservation(getId()); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to release host reservation"); + } + } +} diff --git a/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java b/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java index 0e35276d914..c155b706fc0 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java @@ -84,6 +84,9 @@ public class CreateServiceOfferingCmd extends BaseCmd { @Parameter(name=ApiConstants.NETWORKRATE, type=CommandType.INTEGER, description="data transfer rate in megabits per second allowed. Supported only for non-System offering and system offerings having \"domainrouter\" systemvmtype") private Integer networkRate; + @Parameter(name = ApiConstants.DEPLOYMENT_PLANNER, type = CommandType.STRING, description = "The deployment planner heuristics used to deploy a VM of this offering. If null, value of global config vm.deployment.planner is used") + private String deploymentPlanner; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -148,6 +151,9 @@ public class CreateServiceOfferingCmd extends BaseCmd { return networkRate; } + public String getDeploymentPlanner() { + return deploymentPlanner; + } ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// diff --git a/api/src/org/apache/cloudstack/api/response/ServiceOfferingResponse.java b/api/src/org/apache/cloudstack/api/response/ServiceOfferingResponse.java index f35e87e3b0f..08ebbb05887 100644 --- a/api/src/org/apache/cloudstack/api/response/ServiceOfferingResponse.java +++ b/api/src/org/apache/cloudstack/api/response/ServiceOfferingResponse.java @@ -18,6 +18,8 @@ package org.apache.cloudstack.api.response; import java.util.Date; +import javax.persistence.Column; + import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseResponse; import org.apache.cloudstack.api.EntityReference; @@ -82,6 +84,8 @@ public class ServiceOfferingResponse extends BaseResponse { @SerializedName(ApiConstants.NETWORKRATE) @Param(description="data transfer rate in megabits per second allowed.") private Integer networkRate; + @SerializedName(ApiConstants.DEPLOYMENT_PLANNER) @Param(description="deployment strategy used to deploy VM.") + private String deploymentPlanner; public String getId() { return id; @@ -225,4 +229,12 @@ public class ServiceOfferingResponse extends BaseResponse { public void setNetworkRate(Integer networkRate) { this.networkRate = networkRate; } + + public String getDeploymentPlanner() { + return deploymentPlanner; + } + + public void setDeploymentPlanner(String deploymentPlanner) { + this.deploymentPlanner = deploymentPlanner; + } } diff --git a/client/tomcatconf/applicationContext.xml.in b/client/tomcatconf/applicationContext.xml.in index 6406660c814..1d1eca4c191 100644 --- a/client/tomcatconf/applicationContext.xml.in +++ b/client/tomcatconf/applicationContext.xml.in @@ -540,15 +540,11 @@ Deployment planners --> - - - + - - - + @@ -605,10 +601,6 @@ - - - - @@ -623,6 +615,7 @@ + @@ -630,9 +623,7 @@ - - - + @@ -833,17 +824,13 @@ - + - - - - @@ -859,6 +846,8 @@ + + @@ -868,4 +857,7 @@ + + + diff --git a/client/tomcatconf/commands.properties.in b/client/tomcatconf/commands.properties.in index f57cc3c1815..4cd9065b641 100644 --- a/client/tomcatconf/commands.properties.in +++ b/client/tomcatconf/commands.properties.in @@ -212,6 +212,7 @@ listConfigurations=1 ldapConfig=1 ldapRemove=1 listCapabilities=15 +listDeploymentPlanners=1 #### pod commands createPod=1 @@ -261,6 +262,7 @@ listHosts=3 findHostsForMigration=1 addSecondaryStorage=1 updateHostPassword=1 +releaseHostReservation=1 #### volume commands attachVolume=15 diff --git a/engine/schema/src/com/cloud/migration/ServiceOffering21VO.java b/engine/schema/src/com/cloud/migration/ServiceOffering21VO.java index d07be6462f1..7a49e63e5b3 100644 --- a/engine/schema/src/com/cloud/migration/ServiceOffering21VO.java +++ b/engine/schema/src/com/cloud/migration/ServiceOffering21VO.java @@ -174,5 +174,10 @@ public class ServiceOffering21VO extends DiskOffering21VO implements ServiceOffe return false; } + @Override + public String getDeploymentPlanner() { + // TODO Auto-generated method stub + return null; + } } diff --git a/engine/schema/src/com/cloud/service/ServiceOfferingVO.java b/engine/schema/src/com/cloud/service/ServiceOfferingVO.java index 94a73515e6a..fd31d301bc3 100755 --- a/engine/schema/src/com/cloud/service/ServiceOfferingVO.java +++ b/engine/schema/src/com/cloud/service/ServiceOfferingVO.java @@ -68,6 +68,9 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering @Column(name="sort_key") int sortKey; + @Column(name = "deployment_planner") + private String deploymentPlanner = null; + protected ServiceOfferingVO() { super(); } @@ -104,6 +107,15 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering this.hostTag = hostTag; } + public ServiceOfferingVO(String name, int cpu, int ramSize, int speed, Integer rateMbps, Integer multicastRateMbps, + boolean offerHA, boolean limitResourceUse, boolean volatileVm, String displayText, boolean useLocalStorage, + boolean recreatable, String tags, boolean systemUse, VirtualMachine.Type vm_type, Long domainId, + String hostTag, String deploymentPlanner) { + this(name, cpu, ramSize, speed, rateMbps, multicastRateMbps, offerHA, limitResourceUse, volatileVm, + displayText, useLocalStorage, recreatable, tags, systemUse, vm_type, domainId, hostTag); + this.deploymentPlanner = deploymentPlanner; + } + @Override public boolean getOfferHA() { return offerHA; @@ -208,4 +220,9 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering return volatileVm; } + @Override + public String getDeploymentPlanner() { + return deploymentPlanner; + } + } diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java index 1d35c8981fa..c03d377cbe0 100644 --- a/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java @@ -17,6 +17,10 @@ package com.cloud.upgrade.dao; +import com.cloud.deploy.DeploymentPlanner; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; +import org.apache.log4j.Logger; import java.io.File; import java.sql.Connection; import java.sql.Date; @@ -25,12 +29,7 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Types; import java.util.UUID; - import com.cloud.network.vpc.NetworkACL; -import org.apache.log4j.Logger; - -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.script.Script; public class Upgrade410to420 implements DbUpgrade { final static Logger s_logger = Logger.getLogger(Upgrade410to420.class); @@ -70,6 +69,7 @@ public class Upgrade410to420 implements DbUpgrade { updatePrimaryStore(conn); addEgressFwRulesForSRXGuestNw(conn); upgradeEIPNetworkOfferings(conn); + updateGlobalDeploymentPlanner(conn); upgradeDefaultVpcOffering(conn); upgradePhysicalNtwksWithInternalLbProvider(conn); updateNetworkACLs(conn); @@ -563,6 +563,53 @@ public class Upgrade410to420 implements DbUpgrade { } } + private void updateGlobalDeploymentPlanner(Connection conn) { + PreparedStatement pstmt = null; + ResultSet rs = null; + + try { + pstmt = conn + .prepareStatement("select value from `cloud`.`configuration` where name = 'vm.allocation.algorithm'"); + rs = pstmt.executeQuery(); + while (rs.next()) { + String globalValue = rs.getString(1); + String plannerName = "FirstFitPlanner"; + + if (globalValue != null) { + if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.random.toString())) { + plannerName = "FirstFitPlanner"; + } else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.firstfit.toString())) { + plannerName = "FirstFitPlanner"; + } else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userconcentratedpod_firstfit + .toString())) { + plannerName = "UserConcentratedPodPlanner"; + } else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userconcentratedpod_random + .toString())) { + plannerName = "UserConcentratedPodPlanner"; + } else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userdispersing.toString())) { + plannerName = "UserDispersingPlanner"; + } + } + // update vm.deployment.planner global config + pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` set value=? where name = 'vm.deployment.planner'"); + pstmt.setString(1, plannerName); + pstmt.executeUpdate(); + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to set vm.deployment.planner global config", e); + } finally { + try { + if (rs != null) { + rs.close(); + } + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + } + private void upgradeDefaultVpcOffering(Connection conn) { PreparedStatement pstmt = null; @@ -596,8 +643,6 @@ public class Upgrade410to420 implements DbUpgrade { } } - - private void upgradePhysicalNtwksWithInternalLbProvider(Connection conn) { PreparedStatement pstmt = null; @@ -644,7 +689,6 @@ public class Upgrade410to420 implements DbUpgrade { } catch (SQLException e) { } } - } private void addHostDetailsIndex(Connection conn) { diff --git a/engine/schema/src/com/cloud/vm/dao/VMInstanceDao.java b/engine/schema/src/com/cloud/vm/dao/VMInstanceDao.java index c604027abde..830e4643251 100644 --- a/engine/schema/src/com/cloud/vm/dao/VMInstanceDao.java +++ b/engine/schema/src/com/cloud/vm/dao/VMInstanceDao.java @@ -38,14 +38,14 @@ public interface VMInstanceDao extends GenericDao, StateDao< * @return list of VMInstanceVO running on that host. */ List listByHostId(long hostId); - + /** * List VMs by zone ID * @param zoneId * @return list of VMInstanceVO in the specified zone */ List listByZoneId(long zoneId); - + /** * List VMs by pod ID * @param podId @@ -59,32 +59,32 @@ public interface VMInstanceDao extends GenericDao, StateDao< * @return list of VMInstanceVO in the specified zone, deployed from the specified template, that are not expunged */ public List listNonExpungedByZoneAndTemplate(long zoneId, long templateId); - + /** * Find vm instance with names like. - * + * * @param name name that fits SQL like. * @return list of VMInstanceVO */ List findVMInstancesLike(String name); - + List findVMInTransition(Date time, State... states); List listByTypes(VirtualMachine.Type... types); - + VMInstanceVO findByIdTypes(long id, VirtualMachine.Type... types); - + VMInstanceVO findVMByInstanceName(String name); void updateProxyId(long id, Long proxyId, Date time); List listByHostIdTypes(long hostid, VirtualMachine.Type... types); - + List listUpByHostIdTypes(long hostid, VirtualMachine.Type... types); List listByZoneIdAndType(long zoneId, VirtualMachine.Type type); List listUpByHostId(Long hostId); List listByLastHostId(Long hostId); - + List listByTypeAndState(VirtualMachine.Type type, State state); List listByAccountId(long accountId); @@ -92,9 +92,9 @@ public interface VMInstanceDao extends GenericDao, StateDao< List listByClusterId(long clusterId); // this does not pull up VMs which are starting List listLHByClusterId(long clusterId); // get all the VMs even starting one on this cluster - + List listVmsMigratingFromHost(Long hostId); - + public Long countRunningByHostId(long hostId); Pair, Map> listClusterIdsInZoneByVmCount(long zoneId, long accountId); @@ -106,7 +106,7 @@ public interface VMInstanceDao extends GenericDao, StateDao< List listHostIdsByVmCount(long dcId, Long podId, Long clusterId, long accountId); Long countRunningByAccount(long accountId); - + List listNonRemovedVmsByTypeAndNetwork(long networkId, VirtualMachine.Type... types); /** @@ -116,4 +116,8 @@ public interface VMInstanceDao extends GenericDao, StateDao< */ List listDistinctHostNames(long networkId, VirtualMachine.Type... types); + List findByHostInStates(Long hostId, State... states); + + List listStartingWithNoHostId(); + } diff --git a/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java index 7198b7c24e0..ffb1a0b8b3d 100644 --- a/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -5,7 +5,7 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, @@ -83,30 +83,32 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem protected GenericSearchBuilder CountRunningByAccount; protected SearchBuilder NetworkTypeSearch; protected GenericSearchBuilder DistinctHostNameSearch; - + protected SearchBuilder HostAndStateSearch; + protected SearchBuilder StartingWithNoHostSearch; + @Inject ResourceTagDao _tagsDao; @Inject NicDao _nicDao; - + protected Attribute _updateTimeAttr; - - private static final String ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART1 = + + private static final String ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART1 = "SELECT host.cluster_id, SUM(IF(vm.state='Running' AND vm.account_id = ?, 1, 0)) FROM `cloud`.`host` host LEFT JOIN `cloud`.`vm_instance` vm ON host.id = vm.host_id WHERE "; private static final String ORDER_CLUSTERS_NUMBER_OF_VMS_FOR_ACCOUNT_PART2 = " AND host.type = 'Routing' GROUP BY host.cluster_id ORDER BY 2 ASC "; - + private static final String ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT = "SELECT pod.id, SUM(IF(vm.state='Running' AND vm.account_id = ?, 1, 0)) FROM `cloud`.`host_pod_ref` pod LEFT JOIN `cloud`.`vm_instance` vm ON pod.id = vm.pod_id WHERE pod.data_center_id = ? " + " GROUP BY pod.id ORDER BY 2 ASC "; - + private static final String ORDER_HOSTS_NUMBER_OF_VMS_FOR_ACCOUNT = "SELECT host.id, SUM(IF(vm.state='Running' AND vm.account_id = ?, 1, 0)) FROM `cloud`.`host` host LEFT JOIN `cloud`.`vm_instance` vm ON host.id = vm.host_id WHERE host.data_center_id = ? " + " AND host.pod_id = ? AND host.cluster_id = ? AND host.type = 'Routing' " + " GROUP BY host.id ORDER BY 2 ASC "; @Inject protected HostDao _hostDao; - + public VMInstanceDaoImpl() { } - + @PostConstruct protected void init() { @@ -114,14 +116,14 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem IdStatesSearch.and("id", IdStatesSearch.entity().getId(), Op.EQ); IdStatesSearch.and("states", IdStatesSearch.entity().getState(), Op.IN); IdStatesSearch.done(); - + VMClusterSearch = createSearchBuilder(); SearchBuilder hostSearch = _hostDao.createSearchBuilder(); VMClusterSearch.join("hostSearch", hostSearch, hostSearch.entity().getId(), VMClusterSearch.entity().getHostId(), JoinType.INNER); hostSearch.and("clusterId", hostSearch.entity().getClusterId(), SearchCriteria.Op.EQ); VMClusterSearch.done(); - + LHVMClusterSearch = createSearchBuilder(); SearchBuilder hostSearch1 = _hostDao.createSearchBuilder(); LHVMClusterSearch.join("hostSearch1", hostSearch1, hostSearch1.entity().getId(), LHVMClusterSearch.entity().getLastHostId(), JoinType.INNER); @@ -129,7 +131,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem hostSearch1.and("clusterId", hostSearch1.entity().getClusterId(), SearchCriteria.Op.EQ); LHVMClusterSearch.done(); - + AllFieldsSearch = createSearchBuilder(); AllFieldsSearch.and("host", AllFieldsSearch.entity().getHostId(), Op.EQ); AllFieldsSearch.and("lastHost", AllFieldsSearch.entity().getLastHostId(), Op.EQ); @@ -169,23 +171,23 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem IdTypesSearch.and("id", IdTypesSearch.entity().getId(), Op.EQ); IdTypesSearch.and("types", IdTypesSearch.entity().getType(), Op.IN); IdTypesSearch.done(); - + HostIdTypesSearch = createSearchBuilder(); HostIdTypesSearch.and("hostid", HostIdTypesSearch.entity().getHostId(), Op.EQ); HostIdTypesSearch.and("types", HostIdTypesSearch.entity().getType(), Op.IN); HostIdTypesSearch.done(); - + HostIdUpTypesSearch = createSearchBuilder(); HostIdUpTypesSearch.and("hostid", HostIdUpTypesSearch.entity().getHostId(), Op.EQ); HostIdUpTypesSearch.and("types", HostIdUpTypesSearch.entity().getType(), Op.IN); HostIdUpTypesSearch.and("states", HostIdUpTypesSearch.entity().getState(), Op.NIN); HostIdUpTypesSearch.done(); - + HostUpSearch = createSearchBuilder(); HostUpSearch.and("host", HostUpSearch.entity().getHostId(), Op.EQ); HostUpSearch.and("states", HostUpSearch.entity().getState(), Op.IN); HostUpSearch.done(); - + InstanceNameSearch = createSearchBuilder(); InstanceNameSearch.and("instanceName", InstanceNameSearch.entity().getInstanceName(), Op.EQ); InstanceNameSearch.done(); @@ -194,21 +196,31 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem CountVirtualRoutersByAccount.select(null, Func.COUNT, null); CountVirtualRoutersByAccount.and("account", CountVirtualRoutersByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); CountVirtualRoutersByAccount.and("type", CountVirtualRoutersByAccount.entity().getType(), SearchCriteria.Op.EQ); - CountVirtualRoutersByAccount.and("state", CountVirtualRoutersByAccount.entity().getState(), SearchCriteria.Op.NIN); + CountVirtualRoutersByAccount.and("state", CountVirtualRoutersByAccount.entity().getState(), SearchCriteria.Op.NIN); CountVirtualRoutersByAccount.done(); - + CountRunningByHost = createSearchBuilder(Long.class); CountRunningByHost.select(null, Func.COUNT, null); CountRunningByHost.and("host", CountRunningByHost.entity().getHostId(), SearchCriteria.Op.EQ); CountRunningByHost.and("state", CountRunningByHost.entity().getState(), SearchCriteria.Op.EQ); - CountRunningByHost.done(); + CountRunningByHost.done(); CountRunningByAccount = createSearchBuilder(Long.class); CountRunningByAccount.select(null, Func.COUNT, null); CountRunningByAccount.and("account", CountRunningByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); CountRunningByAccount.and("state", CountRunningByAccount.entity().getState(), SearchCriteria.Op.EQ); - CountRunningByAccount.done(); - + CountRunningByAccount.done(); + + HostAndStateSearch = createSearchBuilder(); + HostAndStateSearch.and("host", HostAndStateSearch.entity().getHostId(), Op.EQ); + HostAndStateSearch.and("states", HostAndStateSearch.entity().getState(), Op.IN); + HostAndStateSearch.done(); + + StartingWithNoHostSearch = createSearchBuilder(); + StartingWithNoHostSearch.and("state", StartingWithNoHostSearch.entity().getState(), Op.EQ); + StartingWithNoHostSearch.and("host", StartingWithNoHostSearch.entity().getHostId(), Op.NULL); + StartingWithNoHostSearch.done(); + _updateTimeAttr = _allAttributes.get("updateTime"); assert _updateTimeAttr != null : "Couldn't get this updateTime attribute"; } @@ -219,7 +231,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("account", accountId); return listBy(sc); } - + @Override public List findVMInstancesLike(String name) { SearchCriteria sc = NameLikeSearch.create(); @@ -234,7 +246,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return listBy(sc); } - + @Override public List listByZoneId(long zoneId) { SearchCriteria sc = AllFieldsSearch.create(); @@ -242,7 +254,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return listBy(sc); } - + @Override public List listByPodId(long podId) { SearchCriteria sc = AllFieldsSearch.create(); @@ -263,7 +275,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setJoinParameters("hostSearch1", "clusterId", clusterId); return listBy(sc); } - + @Override public List listByZoneIdAndType(long zoneId, VirtualMachine.Type type) { SearchCriteria sc = AllFieldsSearch.create(); @@ -271,8 +283,8 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("type", type.toString()); return listBy(sc); } - - + + @Override public List listNonExpungedByZoneAndTemplate(long zoneId, long templateId) { SearchCriteria sc = ZoneTemplateNonExpungedSearch.create(); @@ -310,7 +322,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("states", new Object[] {State.Destroyed, State.Stopped, State.Expunging}); return listBy(sc); } - + @Override public List listUpByHostId(Long hostId) { SearchCriteria sc = HostUpSearch.create(); @@ -318,14 +330,14 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("states", new Object[] {State.Starting, State.Running}); return listBy(sc); } - + @Override public List listByTypes(Type... types) { SearchCriteria sc = TypesSearch.create(); sc.setParameters("types", (Object[]) types); return listBy(sc); } - + @Override public List listByTypeAndState(VirtualMachine.Type type, State state) { SearchCriteria sc = AllFieldsSearch.create(); @@ -348,7 +360,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("instanceName", name); return findOneBy(sc); } - + @Override public void updateProxyId(long id, Long proxyId, Date time) { VMInstanceVO vo = createForUpdate(); @@ -369,12 +381,12 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem @SuppressWarnings("unchecked") Pair hosts = (Pair)opaque; Long newHostId = hosts.second(); - + VMInstanceVO vmi = (VMInstanceVO)vm; Long oldHostId = vmi.getHostId(); Long oldUpdated = vmi.getUpdated(); Date oldUpdateDate = vmi.getUpdateTime(); - + SearchCriteria sc = StateChangeSearch.create(); sc.setParameters("id", vmi.getId()); sc.setParameters("states", oldState); @@ -383,7 +395,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem vmi.incrUpdated(); UpdateBuilder ub = getUpdateBuilder(vmi); - + ub.set(vmi, "state", newState); ub.set(vmi, "hostId", newHostId); ub.set(vmi, "podIdToDeployIn", vmi.getPodIdToDeployIn()); @@ -393,7 +405,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem if (result == 0 && s_logger.isDebugEnabled()) { VMInstanceVO vo = findByIdIncludingRemoved(vm.getId()); - + if (vo != null) { StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); str.append(": DB Data={Host=").append(vo.getHostId()).append("; State=").append(vo.getState().toString()).append("; updated=").append(vo.getUpdated()).append("; time=").append(vo.getUpdateTime()); @@ -407,7 +419,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem } return result > 0; } - + @Override public List listByLastHostId(Long hostId) { SearchCriteria sc = AllFieldsSearch.create(); @@ -415,7 +427,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("state", State.Stopped); return listBy(sc); } - + @Override public Long countAllocatedVirtualRoutersForAccount(long accountId) { SearchCriteria sc = CountVirtualRoutersByAccount.create(); @@ -424,7 +436,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("state", new Object[] {State.Destroyed, State.Error, State.Expunging}); return customSearch(sc, null).get(0); } - + @Override public List listVmsMigratingFromHost(Long hostId) { SearchCriteria sc = AllFieldsSearch.create(); @@ -432,7 +444,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("state", State.Migrating); return listBy(sc); } - + @Override public Long countRunningByHostId(long hostId){ SearchCriteria sc = CountRunningByHost.create(); @@ -455,7 +467,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem pstmt = txn.prepareAutoCloseStatement(sql.toString()); pstmt.setLong(1, accountId); pstmt.setLong(2, zoneId); - + ResultSet rs = pstmt.executeQuery(); while (rs.next()) { Long clusterId = rs.getLong(1); @@ -484,11 +496,11 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem pstmt = txn.prepareAutoCloseStatement(sql.toString()); pstmt.setLong(1, accountId); pstmt.setLong(2, podId); - + ResultSet rs = pstmt.executeQuery(); while (rs.next()) { Long clusterId = rs.getLong(1); - result.add(clusterId); + result.add(clusterId); clusterVmCountMap.put(clusterId, rs.getDouble(2)); } return new Pair, Map>(result, clusterVmCountMap); @@ -511,11 +523,11 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem pstmt = txn.prepareAutoCloseStatement(sql); pstmt.setLong(1, accountId); pstmt.setLong(2, dataCenterId); - + ResultSet rs = pstmt.executeQuery(); while (rs.next()) { Long podId = rs.getLong(1); - result.add(podId); + result.add(podId); podVmCountMap.put(podId, rs.getDouble(2)); } return new Pair, Map>(result, podVmCountMap); @@ -523,7 +535,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem throw new CloudRuntimeException("DB Exception on: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e); } catch (Throwable e) { throw new CloudRuntimeException("Caught: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e); - } + } } @Override @@ -538,7 +550,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem pstmt.setLong(2, dcId); pstmt.setLong(3, podId); pstmt.setLong(4, clusterId); - + ResultSet rs = pstmt.executeQuery(); while (rs.next()) { result.add(rs.getLong(1)); @@ -548,9 +560,9 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem throw new CloudRuntimeException("DB Exception on: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e); } catch (Throwable e) { throw new CloudRuntimeException("Caught: " + ORDER_PODS_NUMBER_OF_VMS_FOR_ACCOUNT, e); - } + } } - + @Override public Long countRunningByAccount(long accountId){ SearchCriteria sc = CountRunningByAccount.create(); @@ -558,18 +570,18 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("state", State.Running); return customSearch(sc, null).get(0); } - + @Override public List listNonRemovedVmsByTypeAndNetwork(long networkId, VirtualMachine.Type... types) { if (NetworkTypeSearch == null) { - + SearchBuilder nicSearch = _nicDao.createSearchBuilder(); nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); NetworkTypeSearch = createSearchBuilder(); NetworkTypeSearch.and("types", NetworkTypeSearch.entity().getType(), SearchCriteria.Op.IN); NetworkTypeSearch.and("removed", NetworkTypeSearch.entity().getRemoved(), SearchCriteria.Op.NULL); - NetworkTypeSearch.join("nicSearch", nicSearch, NetworkTypeSearch.entity().getId(), + NetworkTypeSearch.join("nicSearch", nicSearch, NetworkTypeSearch.entity().getId(), nicSearch.entity().getInstanceId(), JoinBuilder.JoinType.INNER); NetworkTypeSearch.done(); } @@ -577,27 +589,27 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem SearchCriteria sc = NetworkTypeSearch.create(); if (types != null && types.length != 0) { sc.setParameters("types", (Object[]) types); - } + } sc.setJoinParameters("nicSearch", "networkId", networkId); return listBy(sc); } - - - + + + @Override public List listDistinctHostNames(long networkId, VirtualMachine.Type... types) { if (DistinctHostNameSearch == null) { - + SearchBuilder nicSearch = _nicDao.createSearchBuilder(); nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); DistinctHostNameSearch = createSearchBuilder(String.class); DistinctHostNameSearch.selectField(DistinctHostNameSearch.entity().getHostName()); - + DistinctHostNameSearch.and("types", DistinctHostNameSearch.entity().getType(), SearchCriteria.Op.IN); DistinctHostNameSearch.and("removed", DistinctHostNameSearch.entity().getRemoved(), SearchCriteria.Op.NULL); - DistinctHostNameSearch.join("nicSearch", nicSearch, DistinctHostNameSearch.entity().getId(), + DistinctHostNameSearch.join("nicSearch", nicSearch, DistinctHostNameSearch.entity().getId(), nicSearch.entity().getInstanceId(), JoinBuilder.JoinType.INNER); DistinctHostNameSearch.done(); } @@ -605,12 +617,12 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem SearchCriteria sc = DistinctHostNameSearch.create(); if (types != null && types.length != 0) { sc.setParameters("types", (Object[]) types); - } + } sc.setJoinParameters("nicSearch", "networkId", networkId); return customSearch(sc, null); } - + @Override @DB public boolean remove(Long id) { @@ -625,4 +637,19 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return result; } + @Override + public List findByHostInStates(Long hostId, State... states) { + SearchCriteria sc = HostAndStateSearch.create(); + sc.setParameters("host", hostId); + sc.setParameters("states", (Object[]) states); + return listBy(sc); + } + + @Override + public List listStartingWithNoHostId() { + SearchCriteria sc = StartingWithNoHostSearch.create(); + sc.setParameters("state", State.Starting); + return listBy(sc); + } + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java index 0dd55d1d325..5b1f8cd699a 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java @@ -50,7 +50,7 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat @Override protected List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { - + s_logger.debug("ClusterScopeStoragePoolAllocator looking for storage pool"); List suitablePools = new ArrayList(); @@ -65,6 +65,14 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat } List pools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags()); + + // add remaining pools in cluster, that did not match tags, to avoid set + List allPools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, null); + allPools.removeAll(pools); + for (StoragePoolVO pool : allPools) { + avoid.addPool(pool.getId()); + } + if (pools.size() == 0) { if (s_logger.isDebugEnabled()) { String storageType = dskCh.useLocalStorage() ? ServiceOffering.StorageType.local.toString() : ServiceOffering.StorageType.shared.toString(); @@ -72,7 +80,7 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat } return suitablePools; } - + for (StoragePoolVO pool: pools) { if(suitablePools.size() == returnUpTo){ break; @@ -80,13 +88,15 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); if (filter(avoid, pol, dskCh, plan)) { suitablePools.add(pol); + } else { + avoid.addPool(pool.getId()); } } - + if (s_logger.isDebugEnabled()) { s_logger.debug("FirstFitStoragePoolAllocator returning "+suitablePools.size() +" suitable storage pools"); } - + return suitablePools; } diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java index 7447d988a58..632ba439cb0 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java @@ -74,7 +74,7 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { if (!dskCh.useLocalStorage()) { return suitablePools; } - + // data disk and host identified from deploying vm (attach volume case) if (dskCh.getType() == Volume.Type.DATADISK && plan.getHostId() != null) { List hostPools = _poolHostDao.listByHostId(plan.getHostId()); @@ -85,7 +85,9 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { if (filter(avoid, pol, dskCh, plan)) { s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list"); suitablePools.add(pol); - } + } else { + avoid.addPool(pool.getId()); + } } if (suitablePools.size() == returnUpTo) { @@ -101,8 +103,19 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); if (filter(avoid, pol, dskCh, plan)) { suitablePools.add(pol); - } + } else { + avoid.addPool(pool.getId()); + } } + + // add remaining pools in cluster, that did not match tags, to avoid + // set + List allPools = _storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(), + plan.getPodId(), plan.getClusterId(), null); + allPools.removeAll(availablePools); + for (StoragePoolVO pool : allPools) { + avoid.addPool(pool.getId()); + } } if (s_logger.isDebugEnabled()) { @@ -111,7 +124,7 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { return suitablePools; } - + @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java index 1d3cd819d70..e9769802a37 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java @@ -39,18 +39,18 @@ import com.cloud.vm.VirtualMachineProfile; @Component public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { private static final Logger s_logger = Logger.getLogger(ZoneWideStoragePoolAllocator.class); - @Inject PrimaryDataStoreDao _storagePoolDao; - @Inject DataStoreManager dataStoreMgr; - + @Inject PrimaryDataStoreDao _storagePoolDao; + @Inject DataStoreManager dataStoreMgr; + @Override - protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, + protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, DeploymentPlan plan) { Volume volume = _volumeDao.findById(dskCh.getVolumeId()); List requestVolumes = new ArrayList(); requestVolumes.add(volume); return storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool); } - + @Override protected List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, @@ -64,9 +64,16 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { return suitablePools; } } - + List storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags()); - + + // add remaining pools in zone, that did not match tags, to avoid set + List allPools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), null); + allPools.removeAll(storagePools); + for (StoragePoolVO pool : allPools) { + avoid.addPool(pool.getId()); + } + for (StoragePoolVO storage : storagePools) { if (suitablePools.size() == returnUpTo) { break; @@ -74,7 +81,9 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId()); if (filter(avoid, pol, dskCh, plan)) { suitablePools.add(pol); - } + } else { + avoid.addPool(pol.getId()); + } } return suitablePools; } diff --git a/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java b/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java index 2ab98566e0a..d917893719e 100644 --- a/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java +++ b/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.deploy; @@ -24,18 +24,17 @@ import javax.ejb.Local; import org.apache.log4j.Logger; -import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.Pair; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @Local(value=DeploymentPlanner.class) -public class UserConcentratedPodPlanner extends FirstFitPlanner implements DeploymentPlanner { +public class UserConcentratedPodPlanner extends FirstFitPlanner implements DeploymentClusterPlanner { private static final Logger s_logger = Logger.getLogger(UserConcentratedPodPlanner.class); - + /** - * This method should reorder the given list of Cluster Ids by applying any necessary heuristic + * This method should reorder the given list of Cluster Ids by applying any necessary heuristic * for this planner * For UserConcentratedPodPlanner we need to order the clusters in a zone across pods, by considering those pods first which have more number of VMs for this account * This reordering is not done incase the clusters within single pod are passed when the allocation is applied at pod-level. @@ -49,7 +48,7 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo } return applyUserConcentrationPodHeuristicToClusters(id, clusterIdsByCapacity, vmProfile.getOwner().getAccountId()); } - + private List applyUserConcentrationPodHeuristicToClusters(long zoneId, List prioritizedClusterIds, long accountId){ //user has VMs in certain pods. - prioritize those pods first //UserConcentratedPod strategy @@ -61,8 +60,8 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo clusterList = prioritizedClusterIds; } return clusterList; - } - + } + private List reorderClustersByPods(List clusterIds, List podIds) { if (s_logger.isDebugEnabled()) { @@ -111,11 +110,11 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo return prioritizedPods; } - + /** - * This method should reorder the given list of Pod Ids by applying any necessary heuristic + * This method should reorder the given list of Pod Ids by applying any necessary heuristic * for this planner - * For UserConcentratedPodPlanner we need to order the pods by considering those pods first which have more number of VMs for this account + * For UserConcentratedPodPlanner we need to order the pods by considering those pods first which have more number of VMs for this account * @return List ordered list of Pod Ids */ @Override @@ -124,7 +123,7 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo if(vmProfile.getOwner() == null){ return podIdsByCapacity; } - long accountId = vmProfile.getOwner().getAccountId(); + long accountId = vmProfile.getOwner().getAccountId(); //user has VMs in certain pods. - prioritize those pods first //UserConcentratedPod strategy @@ -138,18 +137,7 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo }else{ return podIdsByCapacity; } - - } - @Override - public boolean canHandle(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) { - if(vm.getHypervisorType() != HypervisorType.BareMetal){ - //check the allocation strategy - if (_allocationAlgorithm != null && (_allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_random.toString()) || _allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_firstfit.toString()))){ - return true; - } - } - return false; } } diff --git a/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java b/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java index 2db2051389d..2b0b1588802 100755 --- a/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java +++ b/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java @@ -29,14 +29,13 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import com.cloud.configuration.Config; -import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @Local(value=DeploymentPlanner.class) -public class UserDispersingPlanner extends FirstFitPlanner implements DeploymentPlanner { +public class UserDispersingPlanner extends FirstFitPlanner implements DeploymentClusterPlanner { private static final Logger s_logger = Logger.getLogger(UserDispersingPlanner.class); @@ -191,17 +190,6 @@ public class UserDispersingPlanner extends FirstFitPlanner implements Deployment } - @Override - public boolean canHandle(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) { - if(vm.getHypervisorType() != HypervisorType.BareMetal){ - //check the allocation strategy - if (_allocationAlgorithm != null && _allocationAlgorithm.equals(AllocationAlgorithm.userdispersing.toString())) { - return true; - } - } - return false; - } - float _userDispersionWeight; diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalPlannerSelector.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalPlannerSelector.java deleted file mode 100755 index 45fbeb782ab..00000000000 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalPlannerSelector.java +++ /dev/null @@ -1,39 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal.manager; - -import java.util.Map; - -import javax.ejb.Local; -import javax.naming.ConfigurationException; - -import com.cloud.deploy.AbstractDeployPlannerSelector; -import com.cloud.deploy.DeployPlannerSelector; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.vm.UserVmVO; -@Local(value = {DeployPlannerSelector.class}) -public class BaremetalPlannerSelector extends AbstractDeployPlannerSelector{ - - @Override - public String selectPlanner(UserVmVO vm) { - if (vm.getHypervisorType() == HypervisorType.BareMetal) { - return "BareMetalPlanner"; - } - return null; - } - -} diff --git a/server/pom.xml b/server/pom.xml index 004d9c8e068..6385bf2f233 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -90,6 +90,11 @@ cloud-api ${project.version} + + org.apache.cloudstack + cloud-framework-ipc + ${project.version} + org.apache.cloudstack cloud-framework-events diff --git a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java index b54b1c1f527..b6286aab8da 100755 --- a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java +++ b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java @@ -78,7 +78,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { @Inject ConsoleProxyDao _consoleProxyDao = null; @Inject SecondaryStorageVmDao _secStorgaeVmDao = null; @Inject ConfigurationDao _configDao = null; - @Inject GuestOSDao _guestOSDao = null; + @Inject GuestOSDao _guestOSDao = null; @Inject GuestOSCategoryDao _guestOSCategoryDao = null; @Inject VMInstanceDao _vmInstanceDao = null; @Inject ResourceManager _resourceMgr; @@ -88,17 +88,17 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { boolean _checkHvm = true; protected String _allocationAlgorithm = "random"; @Inject CapacityManager _capacityMgr; - - + + @Override public List allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan plan, Type type, ExcludeList avoid, int returnUpTo) { return allocateTo(vmProfile, plan, type, avoid, returnUpTo, true); } - + @Override public List allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan plan, Type type, ExcludeList avoid, int returnUpTo, boolean considerReservedCapacity) { - + long dcId = plan.getDataCenterId(); Long podId = plan.getPodId(); Long clusterId = plan.getClusterId(); @@ -110,19 +110,19 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { // FirstFitAllocator should be used for user VMs only since it won't care whether the host is capable of routing or not return new ArrayList(); } - + if(s_logger.isDebugEnabled()){ s_logger.debug("Looking for hosts in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId ); } - + String hostTagOnOffering = offering.getHostTag(); String hostTagOnTemplate = template.getTemplateTag(); - + boolean hasSvcOfferingTag = hostTagOnOffering != null ? true : false; boolean hasTemplateTag = hostTagOnTemplate != null ? true : false; - + List clusterHosts = new ArrayList(); - + String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); if (haVmTag != null) { clusterHosts = _hostDao.listByHostTag(type, clusterId, podId, dcId, haVmTag); @@ -133,31 +133,31 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { List hostsMatchingOfferingTag = new ArrayList(); List hostsMatchingTemplateTag = new ArrayList(); if (hasSvcOfferingTag){ - if (s_logger.isDebugEnabled()){ + if (s_logger.isDebugEnabled()){ s_logger.debug("Looking for hosts having tag specified on SvcOffering:" + hostTagOnOffering); } hostsMatchingOfferingTag = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnOffering); - if (s_logger.isDebugEnabled()){ + if (s_logger.isDebugEnabled()){ s_logger.debug("Hosts with tag '" + hostTagOnOffering + "' are:" + hostsMatchingOfferingTag); - } + } } if (hasTemplateTag){ - if (s_logger.isDebugEnabled()){ + if (s_logger.isDebugEnabled()){ s_logger.debug("Looking for hosts having tag specified on Template:" + hostTagOnTemplate); } - hostsMatchingTemplateTag = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate); - if (s_logger.isDebugEnabled()){ + hostsMatchingTemplateTag = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate); + if (s_logger.isDebugEnabled()){ s_logger.debug("Hosts with tag '" + hostTagOnTemplate+"' are:" + hostsMatchingTemplateTag); - } + } } - + if (hasSvcOfferingTag && hasTemplateTag){ hostsMatchingOfferingTag.retainAll(hostsMatchingTemplateTag); - clusterHosts = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate); - if (s_logger.isDebugEnabled()){ + clusterHosts = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate); + if (s_logger.isDebugEnabled()){ s_logger.debug("Found "+ hostsMatchingOfferingTag.size() +" Hosts satisfying both tags, host ids are:" + hostsMatchingOfferingTag); } - + clusterHosts = hostsMatchingOfferingTag; } else { if (hasSvcOfferingTag){ @@ -168,7 +168,14 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } } } - + + // add all hosts that we are not considering to the avoid list + List allhostsInCluster = _hostDao.listAllUpAndEnabledNonHAHosts(type, clusterId, podId, dcId, null); + allhostsInCluster.removeAll(clusterHosts); + for (HostVO host : allhostsInCluster) { + avoid.addHost(host.getId()); + } + return allocateTo(plan, offering, template, avoid, clusterHosts, returnUpTo, considerReservedCapacity, account); } @@ -226,11 +233,11 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { }else if(_allocationAlgorithm.equals("userdispersing")){ hosts = reorderHostsByNumberOfVms(plan, hosts, account); } - + if (s_logger.isDebugEnabled()) { s_logger.debug("FirstFitAllocator has " + hosts.size() + " hosts to check for allocation: "+hosts); } - + // We will try to reorder the host lists such that we give priority to hosts that have // the minimums to support a VM's requirements hosts = prioritizeHosts(template, hosts); @@ -242,7 +249,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { if (s_logger.isDebugEnabled()) { s_logger.debug("Looking for speed=" + (offering.getCpu() * offering.getSpeed()) + "Mhz, Ram=" + offering.getRamSize()); } - + List suitableHosts = new ArrayList(); for (HostVO host : hosts) { @@ -255,7 +262,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } continue; } - + //find number of guest VMs occupying capacity on this host. if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)){ if (s_logger.isDebugEnabled()) { @@ -285,13 +292,14 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { if (s_logger.isDebugEnabled()) { s_logger.debug("Not using host " + host.getId() + "; numCpusGood: " + numCpusGood + "; cpuFreqGood: " + cpuFreqGood + ", host has capacity?" + hostHasCapacity); } + avoid.addHost(host.getId()); } } - + if (s_logger.isDebugEnabled()) { s_logger.debug("Host Allocator returning "+suitableHosts.size() +" suitable hosts"); } - + return suitableHosts; } @@ -302,26 +310,26 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { long dcId = plan.getDataCenterId(); Long podId = plan.getPodId(); Long clusterId = plan.getClusterId(); - + List hostIdsByVmCount = _vmInstanceDao.listHostIdsByVmCount(dcId, podId, clusterId, account.getAccountId()); if (s_logger.isDebugEnabled()) { s_logger.debug("List of hosts in ascending order of number of VMs: "+ hostIdsByVmCount); } - + //now filter the given list of Hosts by this ordered list - Map hostMap = new HashMap(); + Map hostMap = new HashMap(); for (HostVO host : hosts) { hostMap.put(host.getId(), host); } List matchingHostIds = new ArrayList(hostMap.keySet()); - + hostIdsByVmCount.retainAll(matchingHostIds); - + List reorderedHosts = new ArrayList(); for(Long id: hostIdsByVmCount){ reorderedHosts.add(hostMap.get(id)); } - + return reorderedHosts; } @@ -336,13 +344,13 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { if (template == null) { return hosts; } - + // Determine the guest OS category of the template String templateGuestOSCategory = getTemplateGuestOSCategory(template); - + List prioritizedHosts = new ArrayList(); List noHvmHosts = new ArrayList(); - + // If a template requires HVM and a host doesn't support HVM, remove it from consideration List hostsToCheck = new ArrayList(); if (template.isRequiresHvm()) { @@ -356,7 +364,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } else { hostsToCheck.addAll(hosts); } - + if (s_logger.isDebugEnabled()) { if (noHvmHosts.size() > 0) { s_logger.debug("Not considering hosts: " + noHvmHosts + " to deploy template: " + template +" as they are not HVM enabled"); @@ -376,10 +384,10 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { lowPriorityHosts.add(host); } } - + hostsToCheck.removeAll(highPriorityHosts); hostsToCheck.removeAll(lowPriorityHosts); - + // Prioritize the remaining hosts by HVM capability for (HostVO host : hostsToCheck) { if (!template.isRequiresHvm() && !hostSupportsHVM(host)) { @@ -390,21 +398,21 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { prioritizedHosts.add(host); } } - + // Merge the lists prioritizedHosts.addAll(0, highPriorityHosts); prioritizedHosts.addAll(lowPriorityHosts); - + return prioritizedHosts; } - + protected boolean hostSupportsHVM(HostVO host) { if ( !_checkHvm ) { return true; } // Determine host capabilities String caps = host.getCapabilities(); - + if (caps != null) { String[] tokens = caps.split(","); for (String token : tokens) { @@ -413,24 +421,24 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { } } } - + return false; } - + protected String getHostGuestOSCategory(HostVO host) { DetailVO hostDetail = _hostDetailsDao.findDetail(host.getId(), "guest.os.category.id"); if (hostDetail != null) { String guestOSCategoryIdString = hostDetail.getValue(); long guestOSCategoryId; - + try { guestOSCategoryId = Long.parseLong(guestOSCategoryIdString); } catch (Exception e) { return null; } - + GuestOSCategoryVO guestOSCategory = _guestOSCategoryDao.findById(guestOSCategoryId); - + if (guestOSCategory != null) { return guestOSCategory.getName(); } else { @@ -440,7 +448,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { return null; } } - + protected String getTemplateGuestOSCategory(VMTemplateVO template) { long guestOSId = template.getGuestOSId(); GuestOSVO guestOS = _guestOSDao.findById(guestOSId); @@ -455,7 +463,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { Map configs = _configDao.getConfiguration(params); String opFactor = configs.get("cpu.overprovisioning.factor"); _factor = NumbersUtil.parseFloat(opFactor, 1); - + String allocationAlgorithm = configs.get("vm.allocation.algorithm"); if (allocationAlgorithm != null) { _allocationAlgorithm = allocationAlgorithm; diff --git a/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java index 9795fef66fd..ce20562d5f7 100644 --- a/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java @@ -73,6 +73,7 @@ public class ServiceOfferingJoinDaoImpl extends GenericDaoBase params) throws ConfigurationException { _vmCapacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600); @@ -552,6 +564,20 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId()); reservedMemory += so.getRamSize() * 1024L * 1024L; reservedCpu += so.getCpu() * so.getSpeed(); + } else { + // signal if not done already, that the VM has been stopped for skip.counting.hours, + // hence capacity will not be reserved anymore. + UserVmDetailVO messageSentFlag = _userVmDetailsDao.findDetail(vm.getId(), MESSAGE_RESERVED_CAPACITY_FREED_FLAG); + if (messageSentFlag == null || !Boolean.valueOf(messageSentFlag.getValue())) { + _messageBus.publish(_name, "VM_ReservedCapacity_Free", PublishScope.LOCAL, vm); + + if (vm.getType() == VirtualMachine.Type.User) { + UserVmVO userVM = _userVMDao.findById(vm.getId()); + _userVMDao.loadDetails(userVM); + userVM.setDetail(MESSAGE_RESERVED_CAPACITY_FREED_FLAG, "true"); + _userVMDao.saveDetails(userVM); + } + } } } @@ -688,6 +714,18 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, allocateVmCapacity(vm, fromLastHost); } + if (newState == State.Stopped) { + if (vm.getType() == VirtualMachine.Type.User) { + + UserVmVO userVM = _userVMDao.findById(vm.getId()); + _userVMDao.loadDetails(userVM); + // free the message sent flag if it exists + userVM.setDetail(MESSAGE_RESERVED_CAPACITY_FREED_FLAG, "false"); + _userVMDao.saveDetails(userVM); + + } + } + return true; } diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index 77ca2de1923..e1d3751f290 100755 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -214,6 +214,8 @@ public enum Config { SecStorageProxy("Advanced", AgentManager.class, String.class, "secstorage.proxy", null, "http proxy used by ssvm, in http://username:password@proxyserver:port format", null), AlertPurgeInterval("Advanced", ManagementServer.class, Integer.class, "alert.purge.interval", "86400", "The interval (in seconds) to wait before running the alert purge thread", null), AlertPurgeDelay("Advanced", ManagementServer.class, Integer.class, "alert.purge.delay", "0", "Alerts older than specified number days will be purged. Set this value to 0 to never delete alerts", null), + HostReservationReleasePeriod("Advanced", ManagementServer.class, Integer.class, "host.reservation.release.period", "300000", "The interval in milliseconds between host reservation release checks", null), + // LB HealthCheck Interval. LBHealthCheck("Advanced", ManagementServer.class, String.class, "healthcheck.update.interval", "600", @@ -235,6 +237,7 @@ public enum Config { ApplyAllocationAlgorithmToPods("Advanced", ManagementServer.class, Boolean.class, "apply.allocation.algorithm.to.pods", "false", "If true, deployment planner applies the allocation heuristics at pods first in the given datacenter during VM resource allocation", "true,false"), VmUserDispersionWeight("Advanced", ManagementServer.class, Float.class, "vm.user.dispersion.weight", "1", "Weight for user dispersion heuristic (as a value between 0 and 1) applied to resource allocation during vm deployment. Weight for capacity heuristic will be (1 - weight of user dispersion)", null), VmAllocationAlgorithm("Advanced", ManagementServer.class, String.class, "vm.allocation.algorithm", "random", "'random', 'firstfit', 'userdispersing', 'userconcentratedpod_random', 'userconcentratedpod_firstfit' : Order in which hosts within a cluster will be considered for VM/volume allocation.", null), + VmDeploymentPlanner("Advanced", ManagementServer.class, String.class, "vm.deployment.planner", "FirstFitPlanner", "'FirstFitPlanner', 'UserDispersingPlanner', 'UserConcentratedPodPlanner': DeploymentPlanner heuristic that will be used for VM deployment.", null), EndpointeUrl("Advanced", ManagementServer.class, String.class, "endpointe.url", "http://localhost:8080/client/api", "Endpointe Url", null), ElasticLoadBalancerEnabled("Advanced", ManagementServer.class, String.class, "network.loadbalancer.basiczone.elb.enabled", "false", "Whether the load balancing service is enabled for basic zones", "true,false"), ElasticLoadBalancerNetwork("Advanced", ManagementServer.class, String.class, "network.loadbalancer.basiczone.elb.network", "guest", "Whether the elastic load balancing service public ips are taken from the public or guest network", "guest,public"), diff --git a/server/src/com/cloud/configuration/ConfigurationManager.java b/server/src/com/cloud/configuration/ConfigurationManager.java index 84ffc3e1aad..d0ae914c20f 100755 --- a/server/src/com/cloud/configuration/ConfigurationManager.java +++ b/server/src/com/cloud/configuration/ConfigurationManager.java @@ -79,10 +79,11 @@ public interface ConfigurationManager extends ConfigurationService, Manager { * TODO * @param id * @param useVirtualNetwork + * @param deploymentPlanner * @return ID */ ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vm_typeType, String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired, - boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate); + boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner); /** * Creates a new disk offering diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index 887878254b3..9e0c847ed57 100755 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -5,7 +5,7 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, @@ -162,6 +162,7 @@ import com.cloud.org.Grouping.AllocationState; import com.cloud.projects.Project; import com.cloud.projects.ProjectManager; import com.cloud.server.ConfigurationServer; +import com.cloud.server.ManagementService; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.DiskOfferingVO; @@ -302,7 +303,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Inject AlertManager _alertMgr; // @com.cloud.utils.component.Inject(adapter = SecurityChecker.class) - @Inject + @Inject List _secChecker; @Inject @@ -346,6 +347,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Inject NicIpAliasDao _nicIpAliasDao; + @Inject + public ManagementService _mgr; + // FIXME - why don't we have interface for DataCenterLinkLocalIpAddressDao? @Inject protected DataCenterLinkLocalIpAddressDao _LinkLocalIpAllocDao; @@ -356,11 +360,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Override public boolean configure(final String name, final Map params) throws ConfigurationException { String maxVolumeSizeInGbString = _configDao.getValue(Config.MaxVolumeSize.key()); - _maxVolumeSizeInGb = NumbersUtil.parseInt(maxVolumeSizeInGbString, + _maxVolumeSizeInGb = NumbersUtil.parseInt(maxVolumeSizeInGbString, Integer.parseInt(Config.MaxVolumeSize.getDefaultValue())); String defaultPageSizeString = _configDao.getValue(Config.DefaultPageSize.key()); - _defaultPageSize = NumbersUtil.parseLong(defaultPageSizeString, + _defaultPageSize = NumbersUtil.parseLong(defaultPageSizeString, Long.parseLong(Config.DefaultPageSize.getDefaultValue())); populateConfigValuesForValidationSet(); @@ -920,7 +924,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati checkPodCidrSubnets(zoneId, podId, cidr); /* * Commenting out due to Bug 11593 - CIDR conflicts with zone when extending pod but not when creating it - * + * * checkCidrVlanOverlap(zoneId, cidr); */ } @@ -1713,7 +1717,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (internalDns2 == null) { internalDns2 = zone.getInternalDns2(); } - + if (guestCidr == null) { guestCidr = zone.getGuestNetworkCidr(); } @@ -2034,17 +2038,29 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("Network rate can be specified only for non-System offering and system offerings having \"domainrouter\" systemvmtype"); } + if (cmd.getDeploymentPlanner() != null) { + List planners = _mgr.listDeploymentPlanners(); + if (planners != null && !planners.isEmpty()) { + if (!planners.contains(cmd.getDeploymentPlanner())) { + throw new InvalidParameterValueException( + "Invalid name for Deployment Planner specified, please use listDeploymentPlanners to get the valid set"); + } + } else { + throw new InvalidParameterValueException("No deployment planners found"); + } + } + return createServiceOffering(userId, cmd.getIsSystem(), vmType, cmd.getServiceOfferingName(), cpuNumber.intValue(), memory.intValue(), cpuSpeed.intValue(), cmd.getDisplayText(), - localStorageRequired, offerHA, limitCpuUse, volatileVm, cmd.getTags(), cmd.getDomainId(), cmd.getHostTag(), cmd.getNetworkRate()); + localStorageRequired, offerHA, limitCpuUse, volatileVm, cmd.getTags(), cmd.getDomainId(), cmd.getHostTag(), cmd.getNetworkRate(), cmd.getDeploymentPlanner()); } @Override @ActionEvent(eventType = EventTypes.EVENT_SERVICE_OFFERING_CREATE, eventDescription = "creating service offering") public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vm_type, String name, int cpu, int ramSize, int speed, String displayText, - boolean localStorageRequired, boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate) { + boolean localStorageRequired, boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner) { tags = cleanupTags(tags); ServiceOfferingVO offering = new ServiceOfferingVO(name, cpu, ramSize, speed, networkRate, null, offerHA, limitResourceUse, volatileVm, displayText, localStorageRequired, false, tags, isSystem, vm_type, - domainId, hostTag); + domainId, hostTag, deploymentPlanner); if ((offering = _serviceOfferingDao.persist(offering)) != null) { UserContext.current().setEventDetails("Service offering id=" + offering.getId()); @@ -2328,9 +2344,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati String endIPv6 = cmd.getEndIpv6(); String ip6Gateway = cmd.getIp6Gateway(); String ip6Cidr = cmd.getIp6Cidr(); - + Account vlanOwner = null; - + boolean ipv4 = (startIP != null); boolean ipv6 = (startIPv6 != null); @@ -2387,7 +2403,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } else if (ipv6) { throw new InvalidParameterValueException("Only support IPv6 on extending existed network"); } - + // Verify that zone exists DataCenterVO zone = _zoneDao.findById(zoneId); if (zone == null) { @@ -2434,18 +2450,18 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } } - - + + // Check if zone is enabled Account caller = UserContext.current().getCaller(); if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getType())) { throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); - } + } if (zone.isSecurityGroupEnabled() && zone.getNetworkType() != DataCenter.NetworkType.Basic && forVirtualNetwork) { throw new InvalidParameterValueException("Can't add virtual ip range into a zone with security group enabled"); } - + // If networkId is not specified, and vlan is Virtual or Direct Untagged, try to locate default networks if (forVirtualNetwork) { if (network == null) { @@ -2604,35 +2620,35 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati String startIP, String endIP, String vlanGateway, String vlanNetmask, String vlanId, Account vlanOwner, String startIPv6, String endIPv6, String vlanIp6Gateway, String vlanIp6Cidr) { Network network = _networkModel.getNetwork(networkId); - + boolean ipv4 = false, ipv6 = false; - + if (startIP != null) { ipv4 = true; } - + if (startIPv6 != null) { ipv6 = true; } - + if (!ipv4 && !ipv6) { throw new InvalidParameterValueException("Please specify IPv4 or IPv6 address."); } - + //Validate the zone DataCenterVO zone = _zoneDao.findById(zoneId); if (zone == null) { throw new InvalidParameterValueException("Please specify a valid zone."); } - + // ACL check checkZoneAccess(UserContext.current().getCaller(), zone); - + //Validate the physical network if (_physicalNetworkDao.findById(physicalNetworkId) == null) { throw new InvalidParameterValueException("Please specify a valid physical network id"); } - + //Validate the pod if (podId != null) { Pod pod = _podDao.findById(podId); @@ -2644,11 +2660,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } //pod vlans can be created in basic zone only if (zone.getNetworkType() != NetworkType.Basic || network.getTrafficType() != TrafficType.Guest) { - throw new InvalidParameterValueException("Pod id can be specified only for the networks of type " - + TrafficType.Guest + " in zone of type " + NetworkType.Basic); + throw new InvalidParameterValueException("Pod id can be specified only for the networks of type " + + TrafficType.Guest + " in zone of type " + NetworkType.Basic); } } - + //1) if vlan is specified for the guest network range, it should be the same as network's vlan //2) if vlan is missing, default it to the guest network's vlan if (network.getTrafficType() == TrafficType.Guest) { @@ -2660,7 +2676,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati //For pvlan networkVlanId = networkVlanId.split("-")[0]; } - + if (vlanId != null) { // if vlan is specified, throw an error if it's not equal to network's vlanId if (networkVlanId != null && !networkVlanId.equalsIgnoreCase(vlanId)) { @@ -2673,14 +2689,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati //vlan id is required for public network throw new InvalidParameterValueException("Vlan id is required when add ip range to the public network"); } - + if (vlanId == null) { vlanId = Vlan.UNTAGGED; } VlanType vlanType = forVirtualNetwork ? VlanType.VirtualNetwork : VlanType.DirectAttached; - - + + if (vlanOwner != null && zone.getNetworkType() != NetworkType.Advanced) { throw new InvalidParameterValueException("Vlan owner can be defined only in the zone of type " + NetworkType.Advanced); } @@ -2696,7 +2712,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("Please specify a valid netmask"); } } - + if (ipv6) { if (!NetUtils.isValidIpv6(vlanIp6Gateway)) { throw new InvalidParameterValueException("Please specify a valid IPv6 gateway"); @@ -2751,7 +2767,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati List vlans = _vlanDao.listByZone(zone.getId()); for (VlanVO vlan : vlans) { String otherVlanGateway = vlan.getVlanGateway(); - // Continue if it's not IPv4 + // Continue if it's not IPv4 if (otherVlanGateway == null) { continue; } @@ -2787,14 +2803,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } } - + String ipv6Range = null; if (ipv6) { ipv6Range = startIPv6; if (endIPv6 != null) { ipv6Range += "-" + endIPv6; } - + List vlans = _vlanDao.listByZone(zone.getId()); for (VlanVO vlan : vlans) { if (vlan.getIp6Gateway() == null) { @@ -2820,14 +2836,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } String ipRange = null; - + if (ipv4) { ipRange = startIP; if (endIP != null) { ipRange += "-" + endIP; } } - + // Everything was fine, so persist the VLAN Transaction txn = Transaction.currentTxn(); txn.start(); @@ -2839,7 +2855,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // IPv6 use a used ip map, is different from ipv4, no need to save public ip range if (ipv4) { if (!savePublicIPRange(startIP, endIP, zoneId, vlan.getId(), networkId, physicalNetworkId)) { - throw new CloudRuntimeException("Failed to save IPv4 range. Please contact Cloud Support."); + throw new CloudRuntimeException("Failed to save IPv4 range. Please contact Cloud Support."); } } @@ -2875,7 +2891,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (vlanRange == null) { throw new InvalidParameterValueException("Please specify a valid IP range id."); } - + boolean isAccountSpecific = false; List acctVln = _accountVlanMapDao.listAccountVlanMapsByVlan(vlanRange.getId()); // Check for account wide pool. It will have an entry for account_vlan_map. @@ -2888,7 +2904,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati List ips = _publicIpAddressDao.listByVlanId(vlanDbId); boolean success = true; if (allocIpCount > 0) { - if (isAccountSpecific) { + if (isAccountSpecific) { try { vlanRange = _vlanDao.acquireInLockTable(vlanDbId, 30); if (vlanRange == null) { @@ -2901,7 +2917,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati for (IPAddressVO ip : ips) { if (ip.isOneToOneNat()) { - throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId + + throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId + " as ip " + ip + " belonging to the range is used for static nat purposes. Cleanup the rules first"); } @@ -2910,9 +2926,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati " as ip " + ip + " belonging to the range is a source nat ip for the network id=" + ip.getSourceNetworkId() + ". IP range with the source nat ip address can be removed either as a part of Network, or account removal"); } - + if (_firewallDao.countRulesByIpId(ip.getId()) > 0) { - throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId + + throw new InvalidParameterValueException("Can't delete account specific vlan " + vlanDbId + " as ip " + ip + " belonging to the range has firewall rules applied. Cleanup the rules first"); } //release public ip address here @@ -3268,7 +3284,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati return true; } - + @DB protected boolean savePublicIPRange(String startIP, String endIP, long zoneId, long vlanDbId, long sourceNetworkid, long physicalNetworkId) { @@ -3471,7 +3487,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } - + private boolean validPod(long podId) { return (_podDao.findById(podId) != null); } @@ -3690,7 +3706,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (provider == Provider.JuniperSRX || provider == Provider.CiscoVnmc) { firewallProvider = provider; } - + if ((service == Service.PortForwarding || service == Service.StaticNat) && provider == Provider.VirtualRouter){ firewallProvider = Provider.VirtualRouter; } @@ -3890,7 +3906,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (!specifyVlan && type == GuestType.Shared) { throw new InvalidParameterValueException("SpecifyVlan should be true if network offering's type is " + type); } - + //specifyIpRanges should always be true for Shared networks //specifyIpRanges can only be true for Isolated networks with no Source Nat service if (specifyIpRanges) { @@ -3914,7 +3930,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (availability == NetworkOffering.Availability.Required) { boolean canOffBeRequired = (type == GuestType.Isolated && serviceProviderMap.containsKey(Service.SourceNat)); if (!canOffBeRequired) { - throw new InvalidParameterValueException("Availability can be " + NetworkOffering.Availability.Required + throw new InvalidParameterValueException("Availability can be " + NetworkOffering.Availability.Required + " only for networkOfferings of type " + GuestType.Isolated + " and with " + Service.SourceNat.getName() + " enabled"); } @@ -3922,11 +3938,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // only one network offering in the system can be Required List offerings = _networkOfferingDao.listByAvailability(Availability.Required, false); if (!offerings.isEmpty()) { - throw new InvalidParameterValueException("System already has network offering id=" + offerings.get(0).getId() + throw new InvalidParameterValueException("System already has network offering id=" + offerings.get(0).getId() + " with availability " + Availability.Required); } } - + boolean dedicatedLb = false; boolean elasticLb = false; boolean sharedSourceNat = false; @@ -3938,7 +3954,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati boolean internalLb = false; if (serviceCapabilityMap != null && !serviceCapabilityMap.isEmpty()) { Map lbServiceCapabilityMap = serviceCapabilityMap.get(Service.Lb); - + if ((lbServiceCapabilityMap != null) && (!lbServiceCapabilityMap.isEmpty())) { String isolationCapability = lbServiceCapabilityMap.get(Capability.SupportedLBIsolation); if (isolationCapability != null) { @@ -3952,7 +3968,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (param != null) { elasticLb = param.contains("true"); } - + String inlineMode = lbServiceCapabilityMap.get(Capability.InlineMode); if (inlineMode != null) { _networkModel.checkCapabilityForProvider(serviceProviderMap.get(Service.Lb), Service.Lb, Capability.InlineMode, inlineMode); @@ -3983,14 +3999,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if ((sourceNatServiceCapabilityMap != null) && (!sourceNatServiceCapabilityMap.isEmpty())) { String sourceNatType = sourceNatServiceCapabilityMap.get(Capability.SupportedSourceNatTypes); if (sourceNatType != null) { - _networkModel.checkCapabilityForProvider(serviceProviderMap.get(Service.SourceNat), Service.SourceNat, + _networkModel.checkCapabilityForProvider(serviceProviderMap.get(Service.SourceNat), Service.SourceNat, Capability.SupportedSourceNatTypes, sourceNatType); sharedSourceNat = sourceNatType.contains("perzone"); } String param = sourceNatServiceCapabilityMap.get(Capability.RedundantRouter); if (param != null) { - _networkModel.checkCapabilityForProvider(serviceProviderMap.get(Service.SourceNat), Service.SourceNat, + _networkModel.checkCapabilityForProvider(serviceProviderMap.get(Service.SourceNat), Service.SourceNat, Capability.RedundantRouter, param); redundantRouter = param.contains("true"); } @@ -4009,7 +4025,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } - NetworkOfferingVO offering = new NetworkOfferingVO(name, displayText, trafficType, systemOnly, specifyVlan, + NetworkOfferingVO offering = new NetworkOfferingVO(name, displayText, trafficType, systemOnly, specifyVlan, networkRate, multicastRate, isDefault, availability, tags, type, conserveMode, dedicatedLb, sharedSourceNat, redundantRouter, elasticIp, elasticLb, specifyIpRanges, inline, isPersistent, associatePublicIp, publicLb, internalLb); @@ -4041,7 +4057,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati _ntwkOffServiceMapDao.persist(offService); s_logger.trace("Added service for the network offering: " + offService + " with provider " + provider.getName()); } - + if (vpcOff) { List supportedSvcs = new ArrayList(); supportedSvcs.addAll(serviceProviderMap.keySet()); @@ -4251,7 +4267,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // filter by supported services boolean listBySupportedServices = (supportedServicesStr != null && !supportedServicesStr.isEmpty() && !offerings.isEmpty()); boolean checkIfProvidersAreEnabled = (zoneId != null); - boolean parseOfferings = (listBySupportedServices || sourceNatSupported != null || checkIfProvidersAreEnabled + boolean parseOfferings = (listBySupportedServices || sourceNatSupported != null || checkIfProvidersAreEnabled || forVpc != null || network != null); if (parseOfferings) { @@ -4299,7 +4315,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (sourceNatSupported != null) { addOffering = addOffering && (_networkModel.areServicesSupportedByNetworkOffering(offering.getId(), Network.Service.SourceNat) == sourceNatSupported); } - + if (forVpc != null) { addOffering = addOffering && (isOfferingForVpc(offering) == forVpc.booleanValue()); } else if (network != null){ @@ -4418,14 +4434,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } if (availability == null) { - throw new InvalidParameterValueException("Invalid value for Availability. Supported types: " + throw new InvalidParameterValueException("Invalid value for Availability. Supported types: " + Availability.Required + ", " + Availability.Optional); } else { if (availability == NetworkOffering.Availability.Required) { - boolean canOffBeRequired = (offeringToUpdate.getGuestType() == GuestType.Isolated + boolean canOffBeRequired = (offeringToUpdate.getGuestType() == GuestType.Isolated && _networkModel.areServicesSupportedByNetworkOffering(offeringToUpdate.getId(), Service.SourceNat)); if (!canOffBeRequired) { - throw new InvalidParameterValueException("Availability can be " + + throw new InvalidParameterValueException("Availability can be " + NetworkOffering.Availability.Required + " only for networkOfferings of type " + GuestType.Isolated + " and with " + Service.SourceNat.getName() + " enabled"); } @@ -4433,7 +4449,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // only one network offering in the system can be Required List offerings = _networkOfferingDao.listByAvailability(Availability.Required, false); if (!offerings.isEmpty() && offerings.get(0).getId() != offeringToUpdate.getId()) { - throw new InvalidParameterValueException("System already has network offering id=" + + throw new InvalidParameterValueException("System already has network offering id=" + offerings.get(0).getId() + " with availability " + Availability.Required); } } @@ -4452,7 +4468,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @ActionEvent(eventType = EventTypes.EVENT_ACCOUNT_MARK_DEFAULT_ZONE, eventDescription = "Marking account with the " + "default zone", async=true) public AccountVO markDefaultZone(String accountName, long domainId, long defaultZoneId) { - + // Check if the account exists Account account = _accountDao.findEnabledAccount(accountName, domainId); if (account == null) { @@ -4466,9 +4482,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } AccountVO acctForUpdate = _accountDao.findById(account.getId()); - + acctForUpdate.setDefaultZoneId(defaultZoneId); - + if (_accountDao.update(account.getId(), acctForUpdate)) { UserContext.current().setEventDetails("Default zone id= " + defaultZoneId); return _accountDao.findById(account.getId()); @@ -4476,7 +4492,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati return null; } } - + // Note: This method will be used for entity name validations in the coming // releases (place holder for now) private void validateEntityName(String str) { @@ -4604,10 +4620,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati public ClusterVO getCluster(long id) { return _clusterDao.findById(id); } - + @Override public AllocationState findClusterAllocationState(ClusterVO cluster){ - + if(cluster.getAllocationState() == AllocationState.Disabled){ return AllocationState.Disabled; }else if(ApiDBUtils.findPodById(cluster.getPodId()).getAllocationState() == AllocationState.Disabled){ @@ -4615,20 +4631,20 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati }else { DataCenterVO zone = ApiDBUtils.findZoneById(cluster.getDataCenterId()); return zone.getAllocationState(); - } - } + } + } @Override public AllocationState findPodAllocationState(HostPodVO pod){ - + if(pod.getAllocationState() == AllocationState.Disabled){ return AllocationState.Disabled; }else { DataCenterVO zone = ApiDBUtils.findZoneById(pod.getDataCenterId()); return zone.getAllocationState(); - } + } } - + private boolean allowIpRangeOverlap(VlanVO vlan, boolean forVirtualNetwork, long networkId) { // FIXME - delete restriction for virtual network in the future if (vlan.getVlanType() == VlanType.DirectAttached && !forVirtualNetwork) { diff --git a/server/src/com/cloud/deploy/AbstractDeployPlannerSelector.java b/server/src/com/cloud/deploy/AbstractDeployPlannerSelector.java deleted file mode 100755 index 7665687be60..00000000000 --- a/server/src/com/cloud/deploy/AbstractDeployPlannerSelector.java +++ /dev/null @@ -1,84 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.deploy; - -import java.util.Map; - -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import com.cloud.configuration.Config; -import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.utils.component.AdapterBase; -import com.cloud.vm.UserVmVO; - -public abstract class AbstractDeployPlannerSelector extends AdapterBase implements DeployPlannerSelector { - protected Map params; - protected String name; - protected int runLevel; - - @Inject - protected ConfigurationDao _configDao; - protected String _allocationAlgorithm = "random"; - - @Override - public String getName() { - return name; - } - - @Override - public void setName(String name) { - this.name = name; - } - - @Override - public void setConfigParams(Map params) { - this.params = params; - } - - @Override - public Map getConfigParams() { - return params; - } - - @Override - public int getRunLevel() { - return runLevel; - } - - @Override - public void setRunLevel(int level) { - this.runLevel = level; - } - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - super.configure(name, params); - _allocationAlgorithm = _configDao.getValue(Config.VmAllocationAlgorithm.key()); - return true; - } - - @Override - public boolean start() { - return true; - } - - @Override - public boolean stop() { - return true; - } -} diff --git a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java index c7162a2003f..c86d5e1a1b2 100644 --- a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -16,32 +16,107 @@ // under the License. package com.cloud.deploy; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.Timer; +import java.util.TimerTask; +import java.util.TreeSet; import javax.ejb.Local; import javax.inject.Inject; +import javax.naming.ConfigurationException; import org.apache.cloudstack.affinity.AffinityGroupProcessor; -import org.apache.cloudstack.affinity.AffinityGroupVMMapVO; + import org.apache.cloudstack.affinity.dao.AffinityGroupDao; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; +import org.apache.cloudstack.framework.messagebus.MessageBus; +import org.apache.cloudstack.framework.messagebus.MessageSubscriber; + +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; + +import com.cloud.capacity.CapacityManager; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.cluster.ManagementServerNode; +import com.cloud.configuration.Config; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenter; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.Pod; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; import com.cloud.deploy.DeploymentPlanner.ExcludeList; +import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage; +import com.cloud.deploy.dao.PlannerHostReservationDao; import com.cloud.exception.AffinityConflictException; +import com.cloud.exception.ConnectionException; import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.offering.ServiceOffering; +import com.cloud.org.Cluster; +import com.cloud.org.Grouping; +import com.cloud.resource.ResourceState; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.GuestOSCategoryDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.AccountManager; +import com.cloud.utils.DateUtil; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; +import com.cloud.vm.DiskProfile; +import com.cloud.vm.ReservationContext; +import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.agent.AgentManager; +import com.cloud.agent.Listener; +import com.cloud.agent.api.AgentControlAnswer; +import com.cloud.agent.api.AgentControlCommand; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.StartupCommand; +import com.cloud.agent.api.StartupRoutingCommand; +import com.cloud.agent.manager.allocator.HostAllocator; + @Local(value = { DeploymentPlanningManager.class }) -public class DeploymentPlanningManagerImpl extends ManagerBase implements DeploymentPlanningManager, Manager { +public class DeploymentPlanningManagerImpl extends ManagerBase implements DeploymentPlanningManager, Manager, Listener { private static final Logger s_logger = Logger.getLogger(DeploymentPlanningManagerImpl.class); @Inject + AgentManager _agentMgr; + @Inject protected UserVmDao _vmDao; @Inject protected VMInstanceDao _vmInstanceDao; @@ -49,6 +124,53 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy protected AffinityGroupDao _affinityGroupDao; @Inject protected AffinityGroupVMMapDao _affinityGroupVMMapDao; + @Inject + DataCenterDao _dcDao; + @Inject + PlannerHostReservationDao _plannerHostReserveDao; + private int _vmCapacityReleaseInterval; + @Inject + MessageBus _messageBus; + private Timer _timer = null; + private long _hostReservationReleasePeriod = 60L * 60L * 1000L; // one hour by default + + private static final long INITIAL_RESERVATION_RELEASE_CHECKER_DELAY = 30L * 1000L; // thirty seconds expressed in milliseconds + protected long _nodeId = -1; + + protected List _storagePoolAllocators; + public List getStoragePoolAllocators() { + return _storagePoolAllocators; + } + public void setStoragePoolAllocators( + List _storagePoolAllocators) { + this._storagePoolAllocators = _storagePoolAllocators; + } + + protected List _hostAllocators; + public List getHostAllocators() { + return _hostAllocators; + } + public void setHostAllocators(List _hostAllocators) { + this._hostAllocators = _hostAllocators; + } + + @Inject protected HostDao _hostDao; + @Inject protected HostPodDao _podDao; + @Inject protected ClusterDao _clusterDao; + @Inject protected GuestOSDao _guestOSDao = null; + @Inject protected GuestOSCategoryDao _guestOSCategoryDao = null; + @Inject protected DiskOfferingDao _diskOfferingDao; + @Inject protected StoragePoolHostDao _poolHostDao; + + @Inject protected VolumeDao _volsDao; + @Inject protected CapacityManager _capacityMgr; + @Inject protected ConfigurationDao _configDao; + @Inject protected PrimaryDataStoreDao _storagePoolDao; + @Inject protected CapacityDao _capacityDao; + @Inject protected AccountManager _accountMgr; + @Inject protected StorageManager _storageMgr; + @Inject DataStoreManager dataStoreMgr; + @Inject protected ClusterDetailsDao _clusterDetailsDao; protected List _planners; public List getPlanners() { @@ -87,20 +209,908 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy } // call planners - DeployDestination dest = null; - for (DeploymentPlanner planner : _planners) { - if (planner.canHandle(vmProfile, plan, avoids)) { - dest = planner.plan(vmProfile, plan, avoids); - } else { - continue; + DataCenter dc = _dcDao.findById(vm.getDataCenterId()); + // check if datacenter is in avoid set + if (avoids.shouldAvoid(dc)) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("DataCenter id = '" + dc.getId() + + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning."); } - if (dest != null) { - avoids.addHost(dest.getHost().getId()); + return null; + } + + + ServiceOffering offering = vmProfile.getServiceOffering(); + String plannerName = offering.getDeploymentPlanner(); + if (plannerName == null) { + if (vm.getHypervisorType() == HypervisorType.BareMetal) { + plannerName = "BareMetalPlanner"; + } else { + plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key()); + } + } + DeploymentPlanner planner = null; + for (DeploymentPlanner plannerInList : _planners) { + if (plannerName.equals(plannerInList.getName())) { + planner = plannerInList; break; } - } + + int cpu_requested = offering.getCpu() * offering.getSpeed(); + long ram_requested = offering.getRamSize() * 1024L * 1024L; + + if (s_logger.isDebugEnabled()) { + s_logger.debug("DeploymentPlanner allocation algorithm: " + planner); + + s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + + plan.getPodId() + ",cluster:" + plan.getClusterId() + ", requested cpu: " + cpu_requested + + ", requested ram: " + ram_requested); + + s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + + (plan.getPoolId() != null ? "Yes" : "No")); + } + + String haVmTag = (String) vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); + + if (plan.getHostId() != null && haVmTag == null) { + Long hostIdSpecified = plan.getHostId(); + if (s_logger.isDebugEnabled()) { + s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: " + + hostIdSpecified); + } + HostVO host = _hostDao.findById(hostIdSpecified); + if (host == null) { + s_logger.debug("The specified host cannot be found"); + } else if (avoids.shouldAvoid(host)) { + s_logger.debug("The specified host is in avoid set"); + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Looking for suitable pools for this host under zone: " + host.getDataCenterId() + + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); + } + + // search for storage under the zone, pod, cluster of the host. + DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), + host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, plan.getReservationContext()); + + Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, + lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); + Map> suitableVolumeStoragePools = result.first(); + List readyAndReusedVolumes = result.second(); + + // choose the potential pool for this VM for this host + if (!suitableVolumeStoragePools.isEmpty()) { + List suitableHosts = new ArrayList(); + suitableHosts.add(host); + + Pair> potentialResources = findPotentialDeploymentResources( + suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner)); + if (potentialResources != null) { + Pod pod = _podDao.findById(host.getPodId()); + Cluster cluster = _clusterDao.findById(host.getClusterId()); + Map storageVolMap = potentialResources.second(); + // remove the reused vol<->pool from destination, since + // we don't have to prepare this volume. + for (Volume vol : readyAndReusedVolumes) { + storageVolMap.remove(vol); + } + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap); + s_logger.debug("Returning Deployment Destination: " + dest); + return dest; + } + } + } + s_logger.debug("Cannnot deploy to specified host, returning."); + return null; + } + + if (vm.getLastHostId() != null && haVmTag == null) { + s_logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId()); + + HostVO host = _hostDao.findById(vm.getLastHostId()); + if (host == null) { + s_logger.debug("The last host of this VM cannot be found"); + } else if (avoids.shouldAvoid(host)) { + s_logger.debug("The last host of this VM is in avoid set"); + } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { + s_logger.debug("The last Host, hostId: " + + host.getId() + + " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); + } else { + if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) { + long cluster_id = host.getClusterId(); + ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, + "cpuOvercommitRatio"); + ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, + "memoryOvercommitRatio"); + Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); + Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); + if (_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, + cpuOvercommitRatio, memoryOvercommitRatio, true)) { + s_logger.debug("The last host of this VM is UP and has enough capacity"); + s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() + + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); + // search for storage under the zone, pod, cluster of + // the last host. + DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), + host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null); + Pair>, List> result = findSuitablePoolsForVolumes( + vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); + Map> suitableVolumeStoragePools = result.first(); + List readyAndReusedVolumes = result.second(); + // choose the potential pool for this VM for this host + if (!suitableVolumeStoragePools.isEmpty()) { + List suitableHosts = new ArrayList(); + suitableHosts.add(host); + + Pair> potentialResources = findPotentialDeploymentResources( + suitableHosts, suitableVolumeStoragePools, avoids, getPlannerUsage(planner)); + if (potentialResources != null) { + Pod pod = _podDao.findById(host.getPodId()); + Cluster cluster = _clusterDao.findById(host.getClusterId()); + Map storageVolMap = potentialResources.second(); + // remove the reused vol<->pool from + // destination, since we don't have to prepare + // this volume. + for (Volume vol : readyAndReusedVolumes) { + storageVolMap.remove(vol); + } + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap); + s_logger.debug("Returning Deployment Destination: " + dest); + return dest; + } + } + } else { + s_logger.debug("The last host of this VM does not have enough capacity"); + } + } else { + s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + + host.getStatus().name() + ", host resource state is: " + host.getResourceState()); + } + } + s_logger.debug("Cannot choose the last host to deploy this VM "); + } + + DeployDestination dest = null; + List clusterList = null; + + if (planner != null && planner.canHandle(vmProfile, plan, avoids)) { + while (true) { + + if (planner instanceof DeploymentClusterPlanner) { + + ExcludeList PlannerAvoidInput = new ExcludeList(avoids.getDataCentersToAvoid(), + avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), + avoids.getPoolsToAvoid()); + + clusterList = ((DeploymentClusterPlanner) planner).orderClusters(vmProfile, plan, avoids); + + if (clusterList != null && !clusterList.isEmpty()) { + // planner refactoring. call allocators to list hosts + ExcludeList PlannerAvoidOutput = new ExcludeList(avoids.getDataCentersToAvoid(), + avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), + avoids.getPoolsToAvoid()); + + resetAvoidSet(PlannerAvoidOutput, PlannerAvoidInput); + + dest = checkClustersforDestination(clusterList, vmProfile, plan, avoids, dc, + getPlannerUsage(planner), PlannerAvoidOutput); + if (dest != null) { + return dest; + } + // reset the avoid input to the planners + resetAvoidSet(avoids, PlannerAvoidOutput); + + } else { + return null; + } + } else { + dest = planner.plan(vmProfile, plan, avoids); + if (dest != null) { + long hostId = dest.getHost().getId(); + avoids.addHost(dest.getHost().getId()); + + if (checkIfHostFitsPlannerUsage(hostId, DeploymentPlanner.PlannerResourceUsage.Shared)) { + // found destination + return dest; + } else { + // find another host - seems some concurrent + // deployment picked it up for dedicated access + continue; + } + } else { + return null; + } + } + } + } + + return dest; } + private void resetAvoidSet(ExcludeList avoidSet, ExcludeList removeSet) { + if (avoidSet.getDataCentersToAvoid() != null && removeSet.getDataCentersToAvoid() != null) { + avoidSet.getDataCentersToAvoid().removeAll(removeSet.getDataCentersToAvoid()); + } + if (avoidSet.getPodsToAvoid() != null && removeSet.getPodsToAvoid() != null) { + avoidSet.getPodsToAvoid().removeAll(removeSet.getPodsToAvoid()); + } + if (avoidSet.getClustersToAvoid() != null && removeSet.getClustersToAvoid() != null) { + avoidSet.getClustersToAvoid().removeAll(removeSet.getClustersToAvoid()); + } + if (avoidSet.getHostsToAvoid() != null && removeSet.getHostsToAvoid() != null) { + avoidSet.getHostsToAvoid().removeAll(removeSet.getHostsToAvoid()); + } + if (avoidSet.getPoolsToAvoid() != null && removeSet.getPoolsToAvoid() != null) { + avoidSet.getPoolsToAvoid().removeAll(removeSet.getPoolsToAvoid()); + } + } + + private PlannerResourceUsage getPlannerUsage(DeploymentPlanner planner) { + if (planner != null && planner instanceof DeploymentClusterPlanner) { + return ((DeploymentClusterPlanner) planner).getResourceUsage(); + } else { + return DeploymentPlanner.PlannerResourceUsage.Shared; + } + + } + + @DB + private boolean checkIfHostFitsPlannerUsage(long hostId, PlannerResourceUsage resourceUsageRequired) { + // TODO Auto-generated method stub + // check if this host has been picked up by some other planner + // exclusively + // if planner can work with shared host, check if this host has + // been marked as 'shared' + // else if planner needs dedicated host, + + PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); + if (reservationEntry != null) { + long id = reservationEntry.getId(); + PlannerResourceUsage hostResourceType = reservationEntry.getResourceUsage(); + + if (hostResourceType != null) { + if (hostResourceType == resourceUsageRequired) { + return true; + } else { + s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired + + ", since this host has been reserved for planner usage : " + hostResourceType); + return false; + } + } else { + // reserve the host for required resourceType + // let us lock the reservation entry before updating. + final Transaction txn = Transaction.currentTxn(); + + try { + txn.start(); + + final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); + if (lockedEntry == null) { + s_logger.error("Unable to lock the host entry for reservation, host: " + hostId); + return false; + } + // check before updating + if (lockedEntry.getResourceUsage() == null) { + lockedEntry.setResourceUsage(resourceUsageRequired); + _plannerHostReserveDao.persist(lockedEntry); + return true; + } else { + // someone updated it earlier. check if we can still use it + if (lockedEntry.getResourceUsage() == resourceUsageRequired) { + return true; + } else { + s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired + + ", since this host has been reserved for planner usage : " + hostResourceType); + return false; + } + } + } finally { + txn.commit(); + } + } + + } + + return false; + } + + @DB + public boolean checkHostReservationRelease(Long hostId) { + + if (hostId != null) { + PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); + if (reservationEntry != null && reservationEntry.getResourceUsage() != null) { + + // check if any VMs are starting or running on this host + List vms = _vmInstanceDao.listUpByHostId(hostId); + if (vms.size() > 0) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot release reservation, Found " + vms.size() + " VMs Running on host " + + hostId); + } + return false; + } + + List vmsByLastHostId = _vmInstanceDao.listByLastHostId(hostId); + if (vmsByLastHostId.size() > 0) { + // check if any VMs are within skip.counting.hours, if yes + // we + // cannot release the host + for (VMInstanceVO stoppedVM : vmsByLastHostId) { + long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - stoppedVM.getUpdateTime() + .getTime()) / 1000; + if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot release reservation, Found VM: " + stoppedVM + + " Stopped but reserved on host " + hostId); + } + return false; + } + } + } + + // check if any VMs are stopping on or migrating to this host + List vmsStoppingMigratingByHostId = _vmInstanceDao.findByHostInStates(hostId, + State.Stopping, State.Migrating, State.Starting); + if (vmsStoppingMigratingByHostId.size() > 0) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot release reservation, Found " + vms.size() + + " VMs stopping/migrating on host " + hostId); + } + return false; + } + + // check if any VMs are in starting state with no hostId set yet + // - + // just ignore host release to avoid race condition + List vmsStartingNoHost = _vmInstanceDao.listStartingWithNoHostId(); + + if (vmsStartingNoHost.size() > 0) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot release reservation, Found " + vms.size() + + " VMs starting as of now and no hostId yet stored"); + } + return false; + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Host has no VMs associated, releasing the planner reservation for host " + hostId); + } + + long id = reservationEntry.getId(); + final Transaction txn = Transaction.currentTxn(); + + try { + txn.start(); + + final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); + if (lockedEntry == null) { + s_logger.error("Unable to lock the host entry for reservation, host: " + hostId); + return false; + } + // check before updating + if (lockedEntry.getResourceUsage() != null) { + lockedEntry.setResourceUsage(null); + _plannerHostReserveDao.persist(lockedEntry); + return true; + } + } finally { + txn.commit(); + } + } + + } + return false; + } + + class HostReservationReleaseChecker extends TimerTask { + @Override + public void run() { + try { + s_logger.debug("Checking if any host reservation can be released ... "); + checkHostReservations(); + s_logger.debug("Done running HostReservationReleaseChecker ... "); + } catch (Throwable t) { + s_logger.error("Exception in HostReservationReleaseChecker", t); + } + } + } + + private void checkHostReservations() { + List reservedHosts = _plannerHostReserveDao.listAllReservedHosts(); + + for (PlannerHostReservationVO hostReservation : reservedHosts) { + HostVO host = _hostDao.findById(hostReservation.getHostId()); + if (host != null && host.getManagementServerId() != null && host.getManagementServerId() == _nodeId) { + checkHostReservationRelease(hostReservation.getHostId()); + } + } + + } + + @Override + public boolean processAnswers(long agentId, long seq, Answer[] answers) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean processCommands(long agentId, long seq, Command[] commands) { + // TODO Auto-generated method stub + return false; + } + + @Override + public AgentControlAnswer processControlCommand(long agentId, AgentControlCommand cmd) { + // TODO Auto-generated method stub + return null; + } + + @Override + public void processConnect(HostVO host, StartupCommand cmd, boolean forRebalance) throws ConnectionException { + if (!(cmd instanceof StartupRoutingCommand)) { + return; + } + + PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(host.getId()); + if (reservationEntry == null) { + // record the host in this table + PlannerHostReservationVO newHost = new PlannerHostReservationVO(host.getId(), host.getDataCenterId(), + host.getPodId(), host.getClusterId()); + _plannerHostReserveDao.persist(newHost); + } + + } + + @Override + public boolean processDisconnect(long agentId, Status state) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean isRecurring() { + // TODO Auto-generated method stub + return false; + } + + @Override + public int getTimeout() { + // TODO Auto-generated method stub + return 0; + } + + @Override + public boolean processTimeout(long agentId, long seq) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean configure(final String name, final Map params) throws ConfigurationException { + _agentMgr.registerForHostEvents(this, true, false, true); + _messageBus.subscribe("VM_ReservedCapacity_Free", new MessageSubscriber() { + @Override + public void onPublishMessage(String senderAddress, String subject, Object obj) { + VMInstanceVO vm = ((VMInstanceVO) obj); + s_logger.debug("MessageBus message: host reserved capacity released for VM: " + vm.getLastHostId() + + ", checking if host reservation can be released for host:" + vm.getLastHostId()); + Long hostId = vm.getLastHostId(); + checkHostReservationRelease(hostId); + } + }); + + _vmCapacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), + 3600); + + String hostReservationReleasePeriod = _configDao.getValue(Config.HostReservationReleasePeriod.key()); + if (hostReservationReleasePeriod != null) { + _hostReservationReleasePeriod = Long.parseLong(hostReservationReleasePeriod); + if (_hostReservationReleasePeriod <= 0) + _hostReservationReleasePeriod = Long.parseLong(Config.HostReservationReleasePeriod.getDefaultValue()); + } + + _timer = new Timer("HostReservationReleaseChecker"); + + _nodeId = ManagementServerNode.getManagementServerId(); + + return super.configure(name, params); + } + + @Override + public boolean start() { + _timer.schedule(new HostReservationReleaseChecker(), INITIAL_RESERVATION_RELEASE_CHECKER_DELAY, + _hostReservationReleasePeriod); + return true; + } + + @Override + public boolean stop() { + _timer.cancel(); + return true; + } + + // /refactoring planner methods + private DeployDestination checkClustersforDestination(List clusterList, + VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, + DataCenter dc, DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, ExcludeList PlannerAvoidOutput) { + + if (s_logger.isTraceEnabled()) { + s_logger.trace("ClusterId List to consider: " + clusterList); + } + + for (Long clusterId : clusterList) { + ClusterVO clusterVO = _clusterDao.findById(clusterId); + + if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) { + s_logger.debug("Cluster: " + clusterId + + " has HyperVisorType that does not match the VM, skipping this cluster"); + avoid.addCluster(clusterVO.getId()); + continue; + } + + s_logger.debug("Checking resources in Cluster: " + clusterId + " under Pod: " + clusterVO.getPodId()); + // search for resources(hosts and storage) under this zone, pod, + // cluster. + DataCenterDeployment potentialPlan = new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), + clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext()); + + // find suitable hosts under this cluster, need as many hosts as we + // get. + List suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL); + // if found suitable hosts in this cluster, find suitable storage + // pools for each volume of the VM + if (suitableHosts != null && !suitableHosts.isEmpty()) { + if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) { + Pod pod = _podDao.findById(clusterVO.getPodId()); + DeployDestination dest = new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0)); + return dest; + } + + Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, + potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL); + Map> suitableVolumeStoragePools = result.first(); + List readyAndReusedVolumes = result.second(); + + // choose the potential host and pool for the VM + if (!suitableVolumeStoragePools.isEmpty()) { + Pair> potentialResources = findPotentialDeploymentResources( + suitableHosts, suitableVolumeStoragePools, avoid, resourceUsageRequired); + + if (potentialResources != null) { + Pod pod = _podDao.findById(clusterVO.getPodId()); + Host host = _hostDao.findById(potentialResources.first().getId()); + Map storageVolMap = potentialResources.second(); + // remove the reused vol<->pool from destination, since + // we don't have to prepare this volume. + for (Volume vol : readyAndReusedVolumes) { + storageVolMap.remove(vol); + } + DeployDestination dest = new DeployDestination(dc, pod, clusterVO, host, storageVolMap); + s_logger.debug("Returning Deployment Destination: " + dest); + return dest; + } + } else { + s_logger.debug("No suitable storagePools found under this Cluster: " + clusterId); + } + } else { + s_logger.debug("No suitable hosts found under this Cluster: " + clusterId); + } + + if (canAvoidCluster(clusterVO, avoid, PlannerAvoidOutput)) { + avoid.addCluster(clusterVO.getId()); + } + } + s_logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. "); + return null; + } + + private boolean canAvoidCluster(Cluster clusterVO, ExcludeList avoids, ExcludeList plannerAvoidOutput) { + + ExcludeList allocatorAvoidOutput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(), + avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), avoids.getPoolsToAvoid()); + + // remove any hosts/pools that the planners might have added + // to get the list of hosts/pools that Allocators flagged as 'avoid' + if (allocatorAvoidOutput.getHostsToAvoid() != null && plannerAvoidOutput.getHostsToAvoid() != null) { + allocatorAvoidOutput.getHostsToAvoid().removeAll(plannerAvoidOutput.getHostsToAvoid()); + } + if (allocatorAvoidOutput.getPoolsToAvoid() != null && plannerAvoidOutput.getPoolsToAvoid() != null) { + allocatorAvoidOutput.getPoolsToAvoid().removeAll(plannerAvoidOutput.getPoolsToAvoid()); + } + + // if all hosts or all pools in the cluster are in avoid set after this + // pass, then put the cluster in avoid set. + boolean avoidAllHosts = true, avoidAllPools = true; + + List allhostsInCluster = _hostDao.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, clusterVO.getId(), + clusterVO.getPodId(), clusterVO.getDataCenterId(), null); + for (HostVO host : allhostsInCluster) { + if (allocatorAvoidOutput.getHostsToAvoid() == null + || !allocatorAvoidOutput.getHostsToAvoid().contains(host.getId())) { + // there's some host in the cluster that is not yet in avoid set + avoidAllHosts = false; + } + } + + List allPoolsInCluster = _storagePoolDao.findPoolsByTags(clusterVO.getDataCenterId(), + clusterVO.getPodId(), clusterVO.getId(), null); + for (StoragePoolVO pool : allPoolsInCluster) { + if (allocatorAvoidOutput.getPoolsToAvoid() == null + || !allocatorAvoidOutput.getPoolsToAvoid().contains(pool.getId())) { + // there's some pool in the cluster that is not yet in avoid set + avoidAllPools = false; + } + } + + if (avoidAllHosts || avoidAllPools) { + return true; + } + return false; + } + + protected Pair> findPotentialDeploymentResources(List suitableHosts, + Map> suitableVolumeStoragePools, ExcludeList avoid, + DeploymentPlanner.PlannerResourceUsage resourceUsageRequired) { + s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM"); + + boolean hostCanAccessPool = false; + boolean haveEnoughSpace = false; + Map storage = new HashMap(); + TreeSet volumesOrderBySizeDesc = new TreeSet(new Comparator() { + @Override + public int compare(Volume v1, Volume v2) { + if (v1.getSize() < v2.getSize()) + return 1; + else + return -1; + } + }); + volumesOrderBySizeDesc.addAll(suitableVolumeStoragePools.keySet()); + boolean multipleVolume = volumesOrderBySizeDesc.size() > 1; + for (Host potentialHost : suitableHosts) { + Map> volumeAllocationMap = new HashMap>(); + for (Volume vol : volumesOrderBySizeDesc) { + haveEnoughSpace = false; + s_logger.debug("Checking if host: " + potentialHost.getId() + + " can access any suitable storage pool for volume: " + vol.getVolumeType()); + List volumePoolList = suitableVolumeStoragePools.get(vol); + hostCanAccessPool = false; + for (StoragePool potentialSPool : volumePoolList) { + if (hostCanAccessSPool(potentialHost, potentialSPool)) { + hostCanAccessPool = true; + if (multipleVolume) { + List requestVolumes = null; + if (volumeAllocationMap.containsKey(potentialSPool)) + requestVolumes = volumeAllocationMap.get(potentialSPool); + else + requestVolumes = new ArrayList(); + requestVolumes.add(vol); + + if (!_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool)) + continue; + volumeAllocationMap.put(potentialSPool, requestVolumes); + } + storage.put(vol, potentialSPool); + haveEnoughSpace = true; + break; + } + } + if (!hostCanAccessPool) { + break; + } + if (!haveEnoughSpace) { + s_logger.warn("insufficient capacity to allocate all volumes"); + break; + } + } + if (hostCanAccessPool && haveEnoughSpace + && checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired)) { + s_logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + + potentialHost.getName() + " and associated storage pools for this VM"); + return new Pair>(potentialHost, storage); + } else { + avoid.addHost(potentialHost.getId()); + } + } + s_logger.debug("Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM"); + return null; + } + + protected boolean hostCanAccessSPool(Host host, StoragePool pool) { + boolean hostCanAccessSPool = false; + + StoragePoolHostVO hostPoolLinkage = _poolHostDao.findByPoolHost(pool.getId(), host.getId()); + if (hostPoolLinkage != null) { + hostCanAccessSPool = true; + } + + s_logger.debug("Host: " + host.getId() + (hostCanAccessSPool ? " can" : " cannot") + " access pool: " + + pool.getId()); + return hostCanAccessSPool; + } + + protected List findSuitableHosts(VirtualMachineProfile vmProfile, + DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { + List suitableHosts = new ArrayList(); + for (HostAllocator allocator : _hostAllocators) { + suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, avoid, returnUpTo); + if (suitableHosts != null && !suitableHosts.isEmpty()) { + break; + } + } + + if (suitableHosts.isEmpty()) { + s_logger.debug("No suitable hosts found"); + } + return suitableHosts; + } + + protected Pair>, List> findSuitablePoolsForVolumes( + VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, + int returnUpTo) { + List volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId()); + Map> suitableVolumeStoragePools = new HashMap>(); + List readyAndReusedVolumes = new ArrayList(); + + // for each volume find list of suitable storage pools by calling the + // allocators + for (VolumeVO toBeCreated : volumesTobeCreated) { + s_logger.debug("Checking suitable pools for volume (Id, Type): (" + toBeCreated.getId() + "," + + toBeCreated.getVolumeType().name() + ")"); + + // If the plan specifies a poolId, it means that this VM's ROOT + // volume is ready and the pool should be reused. + // In this case, also check if rest of the volumes are ready and can + // be reused. + if (plan.getPoolId() != null) { + s_logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: " + + toBeCreated.getPoolId()); + List suitablePools = new ArrayList(); + StoragePool pool = null; + if (toBeCreated.getPoolId() != null) { + pool = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId()); + } else { + pool = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(plan.getPoolId()); + } + + if (!pool.isInMaintenance()) { + if (!avoid.shouldAvoid(pool)) { + long exstPoolDcId = pool.getDataCenterId(); + + long exstPoolPodId = pool.getPodId() != null ? pool.getPodId() : -1; + long exstPoolClusterId = pool.getClusterId() != null ? pool.getClusterId() : -1; + if (plan.getDataCenterId() == exstPoolDcId && plan.getPodId() == exstPoolPodId + && plan.getClusterId() == exstPoolClusterId) { + s_logger.debug("Planner need not allocate a pool for this volume since its READY"); + suitablePools.add(pool); + suitableVolumeStoragePools.put(toBeCreated, suitablePools); + if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) { + readyAndReusedVolumes.add(toBeCreated); + } + continue; + } else { + s_logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume"); + } + } else { + s_logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume"); + } + } else { + s_logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume"); + } + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("We need to allocate new storagepool for this volume"); + } + if (!isRootAdmin(plan.getReservationContext())) { + if (!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled"); + s_logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning."); + } + // Cannot find suitable storage pools under this cluster for + // this volume since allocation_state is disabled. + // - remove any suitable pools found for other volumes. + // All volumes should get suitable pools under this cluster; + // else we cant use this cluster. + suitableVolumeStoragePools.clear(); + break; + } + } + + s_logger.debug("Calling StoragePoolAllocators to find suitable pools"); + + DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId()); + DiskProfile diskProfile = new DiskProfile(toBeCreated, diskOffering, vmProfile.getHypervisorType()); + + boolean useLocalStorage = false; + if (vmProfile.getType() != VirtualMachine.Type.User) { + String ssvmUseLocalStorage = _configDao.getValue(Config.SystemVMUseLocalStorage.key()); + if (ssvmUseLocalStorage.equalsIgnoreCase("true")) { + useLocalStorage = true; + } + } else { + useLocalStorage = diskOffering.getUseLocalStorage(); + + // TODO: this is a hacking fix for the problem of deploy + // ISO-based VM on local storage + // when deploying VM based on ISO, we have a service offering + // and an additional disk offering, use-local storage flag is + // actually + // saved in service offering, overrde the flag from service + // offering when it is a ROOT disk + if (!useLocalStorage && vmProfile.getServiceOffering().getUseLocalStorage()) { + if (toBeCreated.getVolumeType() == Volume.Type.ROOT) + useLocalStorage = true; + } + } + diskProfile.setUseLocalStorage(useLocalStorage); + + boolean foundPotentialPools = false; + for (StoragePoolAllocator allocator : _storagePoolAllocators) { + final List suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, + returnUpTo); + if (suitablePools != null && !suitablePools.isEmpty()) { + suitableVolumeStoragePools.put(toBeCreated, suitablePools); + foundPotentialPools = true; + break; + } + } + + if (!foundPotentialPools) { + s_logger.debug("No suitable pools found for volume: " + toBeCreated + " under cluster: " + + plan.getClusterId()); + // No suitable storage pools found under this cluster for this + // volume. - remove any suitable pools found for other volumes. + // All volumes should get suitable pools under this cluster; + // else we cant use this cluster. + suitableVolumeStoragePools.clear(); + break; + } + } + + if (suitableVolumeStoragePools.isEmpty()) { + s_logger.debug("No suitable pools found"); + } + + return new Pair>, List>(suitableVolumeStoragePools, readyAndReusedVolumes); + } + + private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId) { + // Check if the zone exists in the system + DataCenterVO zone = _dcDao.findById(zoneId); + if (zone != null && Grouping.AllocationState.Disabled == zone.getAllocationState()) { + s_logger.info("Zone is currently disabled, cannot allocate to this zone: " + zoneId); + return false; + } + + Pod pod = _podDao.findById(podId); + if (pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()) { + s_logger.info("Pod is currently disabled, cannot allocate to this pod: " + podId); + return false; + } + + Cluster cluster = _clusterDao.findById(clusterId); + if (cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()) { + s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: " + clusterId); + return false; + } + + return true; + } + + private boolean isRootAdmin(ReservationContext reservationContext) { + if (reservationContext != null) { + if (reservationContext.getAccount() != null) { + return _accountMgr.isRootAdmin(reservationContext.getAccount().getType()); + } else { + return false; + } + } + return false; + } } diff --git a/server/src/com/cloud/deploy/FirstFitPlanner.java b/server/src/com/cloud/deploy/FirstFitPlanner.java index e8504a991c1..caf8c6e92db 100755 --- a/server/src/com/cloud/deploy/FirstFitPlanner.java +++ b/server/src/com/cloud/deploy/FirstFitPlanner.java @@ -49,6 +49,7 @@ import com.cloud.dc.Pod; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.host.Host; import com.cloud.host.HostVO; @@ -81,7 +82,7 @@ import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; @Local(value=DeploymentPlanner.class) -public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { +public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPlanner { private static final Logger s_logger = Logger.getLogger(FirstFitPlanner.class); @Inject protected HostDao _hostDao; @Inject protected DataCenterDao _dcDao; @@ -103,28 +104,13 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { @Inject DataStoreManager dataStoreMgr; @Inject protected ClusterDetailsDao _clusterDetailsDao; - protected List _storagePoolAllocators; - public List getStoragePoolAllocators() { - return _storagePoolAllocators; - } - public void setStoragePoolAllocators( - List _storagePoolAllocators) { - this._storagePoolAllocators = _storagePoolAllocators; - } - - protected List _hostAllocators; - public List getHostAllocators() { - return _hostAllocators; - } - public void setHostAllocators(List _hostAllocators) { - this._hostAllocators = _hostAllocators; - } protected String _allocationAlgorithm = "random"; + protected String _globalDeploymentPlanner = "FirstFitPlanner"; @Override - public DeployDestination plan(VirtualMachineProfile vmProfile, + public List orderClusters(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException { VirtualMachine vm = vmProfile.getVirtualMachine(); @@ -138,136 +124,19 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { return null; } - ServiceOffering offering = vmProfile.getServiceOffering(); - int cpu_requested = offering.getCpu() * offering.getSpeed(); - long ram_requested = offering.getRamSize() * 1024L * 1024L; - - - if (s_logger.isDebugEnabled()) { - s_logger.debug("DeploymentPlanner allocation algorithm: "+_allocationAlgorithm); - - s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() + - ", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested); - - s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId()!=null ? "Yes": "No")); - } - - String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); - - if(plan.getHostId() != null && haVmTag == null){ - Long hostIdSpecified = plan.getHostId(); - if (s_logger.isDebugEnabled()){ - s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: " - + hostIdSpecified); - } - HostVO host = _hostDao.findById(hostIdSpecified); - if (host == null) { - s_logger.debug("The specified host cannot be found"); - } else if (avoid.shouldAvoid(host)) { - s_logger.debug("The specified host is in avoid set"); - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Looking for suitable pools for this host under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId()); - } - - // search for storage under the zone, pod, cluster of the host. - DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), - host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, plan.getReservationContext()); - - Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, - lastPlan, avoid, HostAllocator.RETURN_UPTO_ALL); - Map> suitableVolumeStoragePools = result.first(); - List readyAndReusedVolumes = result.second(); - - // choose the potential pool for this VM for this host - if (!suitableVolumeStoragePools.isEmpty()) { - List suitableHosts = new ArrayList(); - suitableHosts.add(host); - - Pair> potentialResources = findPotentialDeploymentResources( - suitableHosts, suitableVolumeStoragePools); - if (potentialResources != null) { - Pod pod = _podDao.findById(host.getPodId()); - Cluster cluster = _clusterDao.findById(host.getClusterId()); - Map storageVolMap = potentialResources.second(); - // remove the reused vol<->pool from destination, since - // we don't have to prepare this volume. - for (Volume vol : readyAndReusedVolumes) { - storageVolMap.remove(vol); - } - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap); - s_logger.debug("Returning Deployment Destination: " + dest); - return dest; - } - } - } - s_logger.debug("Cannnot deploy to specified host, returning."); - return null; - } - - if (vm.getLastHostId() != null && haVmTag == null) { - s_logger.debug("This VM has last host_id specified, trying to choose the same host: " +vm.getLastHostId()); - - HostVO host = _hostDao.findById(vm.getLastHostId()); - if(host == null){ - s_logger.debug("The last host of this VM cannot be found"); - }else if(avoid.shouldAvoid(host)){ - s_logger.debug("The last host of this VM is in avoid set"); - }else if(_capacityMgr.checkIfHostReachMaxGuestLimit(host)){ - s_logger.debug("The last Host, hostId: "+ host.getId() +" already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); - }else{ - if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) { - long cluster_id = host.getClusterId(); - ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,"cpuOvercommitRatio"); - ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio"); - Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); - Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); - if(_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true)){ - s_logger.debug("The last host of this VM is UP and has enough capacity"); - s_logger.debug("Now checking for suitable pools under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId()); - //search for storage under the zone, pod, cluster of the last host. - DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null); - Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoid, HostAllocator.RETURN_UPTO_ALL); - Map> suitableVolumeStoragePools = result.first(); - List readyAndReusedVolumes = result.second(); - //choose the potential pool for this VM for this host - if(!suitableVolumeStoragePools.isEmpty()){ - List suitableHosts = new ArrayList(); - suitableHosts.add(host); - - Pair> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools); - if(potentialResources != null){ - Pod pod = _podDao.findById(host.getPodId()); - Cluster cluster = _clusterDao.findById(host.getClusterId()); - Map storageVolMap = potentialResources.second(); - // remove the reused vol<->pool from destination, since we don't have to prepare this volume. - for(Volume vol : readyAndReusedVolumes){ - storageVolMap.remove(vol); - } - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap); - s_logger.debug("Returning Deployment Destination: "+ dest); - return dest; - } - } - }else{ - s_logger.debug("The last host of this VM does not have enough capacity"); - } - }else{ - s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: "+host.getStatus().name() + ", host resource state is: "+host.getResourceState()); - } - } - s_logger.debug("Cannot choose the last host to deploy this VM "); - } - - List clusterList = new ArrayList(); if (plan.getClusterId() != null) { Long clusterIdSpecified = plan.getClusterId(); s_logger.debug("Searching resources only under specified Cluster: "+ clusterIdSpecified); ClusterVO cluster = _clusterDao.findById(plan.getClusterId()); if (cluster != null ){ - clusterList.add(clusterIdSpecified); - return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc); + if (avoid.shouldAvoid(cluster)) { + s_logger.debug("The specified cluster is in avoid set, returning."); + } else { + clusterList.add(clusterIdSpecified); + removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan); + } + return clusterList; }else{ s_logger.debug("The specified cluster cannot be found, returning."); avoid.addCluster(plan.getClusterId()); @@ -280,11 +149,15 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { HostPodVO pod = _podDao.findById(podIdSpecified); if (pod != null) { - DeployDestination dest = scanClustersForDestinationInZoneOrPod(podIdSpecified, false, vmProfile, plan, avoid); - if(dest == null){ - avoid.addPod(plan.getPodId()); + if (avoid.shouldAvoid(pod)) { + s_logger.debug("The specified pod is in avoid set, returning."); + } else { + clusterList = scanClustersForDestinationInZoneOrPod(podIdSpecified, false, vmProfile, plan, avoid); + if (clusterList == null) { + avoid.addPod(plan.getPodId()); + } } - return dest; + return clusterList; } else { s_logger.debug("The specified Pod cannot be found, returning."); avoid.addPod(plan.getPodId()); @@ -305,7 +178,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } - private DeployDestination scanPodsForDestination(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid){ + private List scanPodsForDestination(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid){ ServiceOffering offering = vmProfile.getServiceOffering(); int requiredCpu = offering.getCpu() * offering.getSpeed(); @@ -341,20 +214,24 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { if(!podsWithCapacity.isEmpty()){ prioritizedPodIds = reorderPods(podCapacityInfo, vmProfile, plan); + if (prioritizedPodIds == null || prioritizedPodIds.isEmpty()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("No Pods found for destination, returning."); + } + return null; + } + List clusterList = new ArrayList(); //loop over pods for(Long podId : prioritizedPodIds){ s_logger.debug("Checking resources under Pod: "+podId); - DeployDestination dest = scanClustersForDestinationInZoneOrPod(podId, false, vmProfile, plan, avoid); - if(dest != null){ - return dest; + List clustersUnderPod = scanClustersForDestinationInZoneOrPod(podId, false, vmProfile, plan, + avoid); + if (clustersUnderPod != null) { + clusterList.addAll(clustersUnderPod); } - avoid.addPod(podId); } - if (s_logger.isDebugEnabled()) { - s_logger.debug("No Pods found for destination, returning."); - } - return null; + return clusterList; }else{ if (s_logger.isDebugEnabled()) { s_logger.debug("No Pods found after removing disabled pods and pods in avoid list, returning."); @@ -363,7 +240,69 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } } - private DeployDestination scanClustersForDestinationInZoneOrPod(long id, boolean isZone, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid){ + private Map getCapacityThresholdMap() { + // Lets build this real time so that the admin wont have to restart MS + // if he changes these values + Map disableThresholdMap = new HashMap(); + + String cpuDisableThresholdString = _configDao.getValue(Config.CPUCapacityDisableThreshold.key()); + float cpuDisableThreshold = NumbersUtil.parseFloat(cpuDisableThresholdString, 0.85F); + disableThresholdMap.put(Capacity.CAPACITY_TYPE_CPU, cpuDisableThreshold); + + String memoryDisableThresholdString = _configDao.getValue(Config.MemoryCapacityDisableThreshold.key()); + float memoryDisableThreshold = NumbersUtil.parseFloat(memoryDisableThresholdString, 0.85F); + disableThresholdMap.put(Capacity.CAPACITY_TYPE_MEMORY, memoryDisableThreshold); + + return disableThresholdMap; + } + + private List getCapacitiesForCheckingThreshold() { + List capacityList = new ArrayList(); + capacityList.add(Capacity.CAPACITY_TYPE_CPU); + capacityList.add(Capacity.CAPACITY_TYPE_MEMORY); + return capacityList; + } + + private void removeClustersCrossingThreshold(List clusterListForVmAllocation, ExcludeList avoid, + VirtualMachineProfile vmProfile, DeploymentPlan plan) { + + List capacityList = getCapacitiesForCheckingThreshold(); + List clustersCrossingThreshold = new ArrayList(); + + ServiceOffering offering = vmProfile.getServiceOffering(); + int cpu_requested = offering.getCpu() * offering.getSpeed(); + long ram_requested = offering.getRamSize() * 1024L * 1024L; + + // For each capacity get the cluster list crossing the threshold and + // remove it from the clusterList that will be used for vm allocation. + for (short capacity : capacityList) { + + if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0) { + return; + } + if (capacity == Capacity.CAPACITY_TYPE_CPU) { + clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, + plan.getDataCenterId(), Config.CPUCapacityDisableThreshold.key(), cpu_requested); + } else if (capacity == Capacity.CAPACITY_TYPE_MEMORY) { + clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, + plan.getDataCenterId(), Config.MemoryCapacityDisableThreshold.key(), ram_requested); + } + + if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0) { + // addToAvoid Set + avoid.addClusterList(clustersCrossingThreshold); + // Remove clusters crossing disabled threshold + clusterListForVmAllocation.removeAll(clustersCrossingThreshold); + + s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" + + " crosses the disable capacity threshold defined at each cluster/ at global value for capacity Type : " + capacity + ", skipping these clusters"); + } + + } + } + + private List scanClustersForDestinationInZoneOrPod(long id, boolean isZone, + VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid) { VirtualMachine vm = vmProfile.getVirtualMachine(); ServiceOffering offering = vmProfile.getServiceOffering(); @@ -396,6 +335,9 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { prioritizedClusterIds.removeAll(disabledClusters); } } + + removeClustersCrossingThreshold(prioritizedClusterIds, avoid, vmProfile, plan); + }else{ if (s_logger.isDebugEnabled()) { s_logger.debug("No clusters found having a host with enough capacity, returning."); @@ -404,7 +346,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } if(!prioritizedClusterIds.isEmpty()){ List clusterList = reorderClusters(id, isZone, clusterCapacityInfo, vmProfile, plan); - return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc); + return clusterList; //return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc); }else{ if (s_logger.isDebugEnabled()) { s_logger.debug("No clusters found after removing disabled clusters and clusters in avoid list, returning."); @@ -452,114 +394,6 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { return disabledPods; } - private List getCapacitiesForCheckingThreshold(){ - List capacityList = new ArrayList(); - capacityList.add(Capacity.CAPACITY_TYPE_CPU); - capacityList.add(Capacity.CAPACITY_TYPE_MEMORY); - return capacityList; - } - - private void removeClustersCrossingThreshold(List clusterListForVmAllocation, ExcludeList avoid, VirtualMachineProfile vmProfile, DeploymentPlan plan){ - - List capacityList = getCapacitiesForCheckingThreshold(); - List clustersCrossingThreshold = new ArrayList(); - - ServiceOffering offering = vmProfile.getServiceOffering(); - int cpu_requested = offering.getCpu() * offering.getSpeed(); - long ram_requested = offering.getRamSize() * 1024L * 1024L; - - // For each capacity get the cluster list crossing the threshold and remove it from the clusterList that will be used for vm allocation. - for(short capacity : capacityList){ - - if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0){ - return; - } - if (capacity == Capacity.CAPACITY_TYPE_CPU) { - clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(), Config.CPUCapacityDisableThreshold.key(), cpu_requested); - } - else if (capacity == Capacity.CAPACITY_TYPE_MEMORY ) { - clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(), - Config.MemoryCapacityDisableThreshold.key(), ram_requested ); - } - - - if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0){ - // addToAvoid Set - avoid.addClusterList(clustersCrossingThreshold); - // Remove clusters crossing disabled threshold - clusterListForVmAllocation.removeAll(clustersCrossingThreshold); - - s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" + - " crosses the disable capacity threshold defined at each cluster/ at global value for capacity Type : " + capacity + ", skipping these clusters"); - } - - } - } - - private DeployDestination checkClustersforDestination(List clusterList, VirtualMachineProfile vmProfile, - DeploymentPlan plan, ExcludeList avoid, DataCenter dc){ - - if (s_logger.isTraceEnabled()) { - s_logger.trace("ClusterId List to consider: " + clusterList); - } - - removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan); - - for(Long clusterId : clusterList){ - Cluster clusterVO = _clusterDao.findById(clusterId); - - if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) { - s_logger.debug("Cluster: "+clusterId + " has HyperVisorType that does not match the VM, skipping this cluster"); - avoid.addCluster(clusterVO.getId()); - continue; - } - - s_logger.debug("Checking resources in Cluster: "+clusterId + " under Pod: "+clusterVO.getPodId()); - //search for resources(hosts and storage) under this zone, pod, cluster. - DataCenterDeployment potentialPlan = new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext()); - - //find suitable hosts under this cluster, need as many hosts as we get. - List suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL); - //if found suitable hosts in this cluster, find suitable storage pools for each volume of the VM - if(suitableHosts != null && !suitableHosts.isEmpty()){ - if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) { - Pod pod = _podDao.findById(clusterVO.getPodId()); - DeployDestination dest = new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0)); - return dest; - } - - Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL); - Map> suitableVolumeStoragePools = result.first(); - List readyAndReusedVolumes = result.second(); - - //choose the potential host and pool for the VM - if(!suitableVolumeStoragePools.isEmpty()){ - Pair> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools); - - if(potentialResources != null){ - Pod pod = _podDao.findById(clusterVO.getPodId()); - Host host = _hostDao.findById(potentialResources.first().getId()); - Map storageVolMap = potentialResources.second(); - // remove the reused vol<->pool from destination, since we don't have to prepare this volume. - for(Volume vol : readyAndReusedVolumes){ - storageVolMap.remove(vol); - } - DeployDestination dest = new DeployDestination(dc, pod, clusterVO, host, storageVolMap ); - s_logger.debug("Returning Deployment Destination: "+ dest); - return dest; - } - }else{ - s_logger.debug("No suitable storagePools found under this Cluster: "+clusterId); - } - }else{ - s_logger.debug("No suitable hosts found under this Cluster: "+clusterId); - } - avoid.addCluster(clusterVO.getId()); - } - s_logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. "); - return null; - } - protected Pair, Map> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone){ //look at the aggregate available cpu and ram per cluster @@ -630,215 +464,6 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } - - protected Pair> findPotentialDeploymentResources(List suitableHosts, Map> suitableVolumeStoragePools){ - s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM"); - - boolean hostCanAccessPool = false; - boolean haveEnoughSpace = false; - Map storage = new HashMap(); - TreeSet volumesOrderBySizeDesc = new TreeSet(new Comparator() { - @Override - public int compare(Volume v1, Volume v2) { - if(v1.getSize() < v2.getSize()) - return 1; - else - return -1; - } - }); - volumesOrderBySizeDesc.addAll(suitableVolumeStoragePools.keySet()); - boolean multipleVolume = volumesOrderBySizeDesc.size() > 1; - for(Host potentialHost : suitableHosts){ - Map> volumeAllocationMap = new HashMap>(); - for(Volume vol : volumesOrderBySizeDesc){ - haveEnoughSpace = false; - s_logger.debug("Checking if host: "+potentialHost.getId() +" can access any suitable storage pool for volume: "+ vol.getVolumeType()); - List volumePoolList = suitableVolumeStoragePools.get(vol); - hostCanAccessPool = false; - for(StoragePool potentialSPool : volumePoolList){ - if(hostCanAccessSPool(potentialHost, potentialSPool)){ - hostCanAccessPool = true; - if(multipleVolume){ - List requestVolumes = null; - if(volumeAllocationMap.containsKey(potentialSPool)) - requestVolumes = volumeAllocationMap.get(potentialSPool); - else - requestVolumes = new ArrayList(); - requestVolumes.add(vol); - - if(!_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool)) - continue; - volumeAllocationMap.put(potentialSPool,requestVolumes); - } - storage.put(vol, potentialSPool); - haveEnoughSpace = true; - break; - } - } - if(!hostCanAccessPool){ - break; - } - if(!haveEnoughSpace) { - s_logger.warn("insufficient capacity to allocate all volumes"); - break; - } - } - if(hostCanAccessPool && haveEnoughSpace){ - s_logger.debug("Found a potential host " + "id: "+potentialHost.getId() + " name: " +potentialHost.getName() + " and associated storage pools for this VM"); - return new Pair>(potentialHost, storage); - } - } - s_logger.debug("Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM"); - return null; - } - - protected boolean hostCanAccessSPool(Host host, StoragePool pool){ - boolean hostCanAccessSPool = false; - - StoragePoolHostVO hostPoolLinkage = _poolHostDao.findByPoolHost(pool.getId(), host.getId()); - if(hostPoolLinkage != null){ - hostCanAccessSPool = true; - } - - s_logger.debug("Host: "+ host.getId() + (hostCanAccessSPool ?" can" : " cannot") + " access pool: "+ pool.getId()); - return hostCanAccessSPool; - } - - protected List findSuitableHosts(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo){ - List suitableHosts = new ArrayList(); - for(HostAllocator allocator : _hostAllocators) { - suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, avoid, returnUpTo); - if (suitableHosts != null && !suitableHosts.isEmpty()) { - break; - } - } - - if(suitableHosts.isEmpty()){ - s_logger.debug("No suitable hosts found"); - } - return suitableHosts; - } - - protected Pair>, List> findSuitablePoolsForVolumes(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo){ - List volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId()); - Map> suitableVolumeStoragePools = new HashMap>(); - List readyAndReusedVolumes = new ArrayList(); - - //for each volume find list of suitable storage pools by calling the allocators - for (VolumeVO toBeCreated : volumesTobeCreated) { - s_logger.debug("Checking suitable pools for volume (Id, Type): ("+toBeCreated.getId() +"," +toBeCreated.getVolumeType().name() + ")"); - - //If the plan specifies a poolId, it means that this VM's ROOT volume is ready and the pool should be reused. - //In this case, also check if rest of the volumes are ready and can be reused. - if(plan.getPoolId() != null){ - s_logger.debug("Volume has pool(" + plan.getPoolId() + ") already allocated, checking if pool can be reused, poolId: "+toBeCreated.getPoolId()); - List suitablePools = new ArrayList(); - StoragePool pool = null; - if(toBeCreated.getPoolId() != null){ - s_logger.debug("finding pool by id '" + toBeCreated.getPoolId() + "'"); - pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId()); - }else{ - s_logger.debug("finding pool by id '" + plan.getPoolId() + "'"); - pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(plan.getPoolId()); - } - - if(pool != null){ - if(!pool.isInMaintenance()){ - if(!avoid.shouldAvoid(pool)){ - long exstPoolDcId = pool.getDataCenterId(); - - long exstPoolPodId = pool.getPodId() != null ? pool.getPodId() : -1; - long exstPoolClusterId = pool.getClusterId() != null ? pool.getClusterId() : -1; - if(plan.getDataCenterId() == exstPoolDcId && plan.getPodId() == exstPoolPodId && plan.getClusterId() == exstPoolClusterId){ - s_logger.debug("Planner need not allocate a pool for this volume since its READY"); - suitablePools.add(pool); - suitableVolumeStoragePools.put(toBeCreated, suitablePools); - if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) { - readyAndReusedVolumes.add(toBeCreated); - } - continue; - }else{ - s_logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume"); - } - }else{ - s_logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume"); - } - }else{ - s_logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume"); - } - }else{ - s_logger.debug("Unable to find pool by provided id"); - } - } - - if(s_logger.isDebugEnabled()){ - s_logger.debug("We need to allocate new storagepool for this volume"); - } - if(!isRootAdmin(plan.getReservationContext())){ - if(!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())){ - if(s_logger.isDebugEnabled()){ - s_logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled"); - s_logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning."); - } - //Cannot find suitable storage pools under this cluster for this volume since allocation_state is disabled. - //- remove any suitable pools found for other volumes. - //All volumes should get suitable pools under this cluster; else we cant use this cluster. - suitableVolumeStoragePools.clear(); - break; - } - } - - s_logger.debug("Calling StoragePoolAllocators to find suitable pools"); - - DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId()); - DiskProfile diskProfile = new DiskProfile(toBeCreated, diskOffering, vmProfile.getHypervisorType()); - - boolean useLocalStorage = false; - if (vmProfile.getType() != VirtualMachine.Type.User) { - String ssvmUseLocalStorage = _configDao.getValue(Config.SystemVMUseLocalStorage.key()); - if (ssvmUseLocalStorage.equalsIgnoreCase("true")) { - useLocalStorage = true; - } - } else { - useLocalStorage = diskOffering.getUseLocalStorage(); - - // TODO: this is a hacking fix for the problem of deploy ISO-based VM on local storage - // when deploying VM based on ISO, we have a service offering and an additional disk offering, use-local storage flag is actually - // saved in service offering, overrde the flag from service offering when it is a ROOT disk - if(!useLocalStorage && vmProfile.getServiceOffering().getUseLocalStorage()) { - if(toBeCreated.getVolumeType() == Volume.Type.ROOT) - useLocalStorage = true; - } - } - diskProfile.setUseLocalStorage(useLocalStorage); - - boolean foundPotentialPools = false; - for(StoragePoolAllocator allocator : _storagePoolAllocators) { - final List suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo); - if (suitablePools != null && !suitablePools.isEmpty()) { - suitableVolumeStoragePools.put(toBeCreated, suitablePools); - foundPotentialPools = true; - break; - } - } - - if(!foundPotentialPools){ - s_logger.debug("No suitable pools found for volume: "+toBeCreated +" under cluster: "+plan.getClusterId()); - //No suitable storage pools found under this cluster for this volume. - remove any suitable pools found for other volumes. - //All volumes should get suitable pools under this cluster; else we cant use this cluster. - suitableVolumeStoragePools.clear(); - break; - } - } - - if(suitableVolumeStoragePools.isEmpty()){ - s_logger.debug("No suitable pools found"); - } - - return new Pair>, List>(suitableVolumeStoragePools, readyAndReusedVolumes); - } - - private boolean isRootAdmin(ReservationContext reservationContext) { if(reservationContext != null){ if(reservationContext.getAccount() != null){ @@ -859,10 +484,17 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { @Override public boolean canHandle(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid) { - if(vm.getHypervisorType() != HypervisorType.BareMetal){ - //check the allocation strategy - if (_allocationAlgorithm != null && (_allocationAlgorithm.equals(AllocationAlgorithm.random.toString()) || _allocationAlgorithm.equals(AllocationAlgorithm.firstfit.toString()))) { - return true; + // check what the ServiceOffering says. If null, check the global config + ServiceOffering offering = vm.getServiceOffering(); + if (vm.getHypervisorType() != HypervisorType.BareMetal) { + if (offering != null && offering.getDeploymentPlanner() != null) { + if (offering.getDeploymentPlanner().equals(this.getName())) { + return true; + } + } else { + if (_globalDeploymentPlanner != null && _globalDeploymentPlanner.equals(this._name)) { + return true; + } } } return false; @@ -872,29 +504,20 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); _allocationAlgorithm = _configDao.getValue(Config.VmAllocationAlgorithm.key()); + _globalDeploymentPlanner = _configDao.getValue(Config.VmDeploymentPlanner.key()); return true; } - private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId){ - // Check if the zone exists in the system - DataCenterVO zone = _dcDao.findById(zoneId); - if(zone != null && Grouping.AllocationState.Disabled == zone.getAllocationState()){ - s_logger.info("Zone is currently disabled, cannot allocate to this zone: "+ zoneId); - return false; - } - Pod pod = _podDao.findById(podId); - if(pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()){ - s_logger.info("Pod is currently disabled, cannot allocate to this pod: "+ podId); - return false; - } + @Override + public DeployDestination plan(VirtualMachineProfile vm, DeploymentPlan plan, + ExcludeList avoid) throws InsufficientServerCapacityException { + // TODO Auto-generated method stub + return null; + } - Cluster cluster = _clusterDao.findById(clusterId); - if(cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()){ - s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: "+ clusterId); - return false; - } - - return true; + @Override + public PlannerResourceUsage getResourceUsage() { + return PlannerResourceUsage.Shared; } } diff --git a/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java b/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java deleted file mode 100755 index ce494051376..00000000000 --- a/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java +++ /dev/null @@ -1,54 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.deploy; - -import javax.ejb.Local; - -import org.apache.log4j.Logger; - -import com.cloud.deploy.DeploymentPlanner.AllocationAlgorithm; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.vm.UserVmVO; - -@Local(value = {DeployPlannerSelector.class}) -public class HypervisorVmPlannerSelector extends AbstractDeployPlannerSelector { - private static final Logger s_logger = Logger.getLogger(HypervisorVmPlannerSelector.class); - - @Override - public String selectPlanner(UserVmVO vm) { - if (vm.getHypervisorType() != HypervisorType.BareMetal) { - //check the allocation strategy - if (_allocationAlgorithm != null) { - if (_allocationAlgorithm.equals(AllocationAlgorithm.random.toString()) - || _allocationAlgorithm.equals(AllocationAlgorithm.firstfit.toString())) { - return "FirstFitPlanner"; - } else if (_allocationAlgorithm.equals(AllocationAlgorithm.userdispersing.toString())) { - return "UserDispersingPlanner"; - } else if (_allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_random.toString()) - || _allocationAlgorithm.equals(AllocationAlgorithm.userconcentratedpod_firstfit.toString())) { - return "UserConcentratedPodPlanner"; - } - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("The allocation algorithm is null, cannot select the planner"); - } - } - } - - return null; - } -} diff --git a/server/src/com/cloud/deploy/PlannerHostReservationVO.java b/server/src/com/cloud/deploy/PlannerHostReservationVO.java new file mode 100644 index 00000000000..cf5f03177f7 --- /dev/null +++ b/server/src/com/cloud/deploy/PlannerHostReservationVO.java @@ -0,0 +1,117 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.deploy; + + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import org.apache.cloudstack.api.InternalIdentity; + +import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage; + +@Entity +@Table(name = "op_host_planner_reservation") +public class PlannerHostReservationVO implements InternalIdentity { + @Id + @GeneratedValue(strategy=GenerationType.IDENTITY) + @Column(name="id") + private long id; + + @Column(name="host_id") + private Long hostId; + + @Column(name="data_center_id") + private Long dataCenterId; + + @Column(name="pod_id") + private Long podId; + + @Column(name="cluster_id") + private Long clusterId; + + @Column(name = "resource_usage") + @Enumerated(EnumType.STRING) + private PlannerResourceUsage resourceUsage; + + public PlannerHostReservationVO() { + } + + public PlannerHostReservationVO(Long hostId, Long dataCenterId, Long podId, Long clusterId) { + this.hostId = hostId; + this.dataCenterId = dataCenterId; + this.podId = podId; + this.clusterId = clusterId; + } + + public PlannerHostReservationVO(Long hostId, Long dataCenterId, Long podId, Long clusterId, + PlannerResourceUsage resourceUsage) { + this.hostId = hostId; + this.dataCenterId = dataCenterId; + this.podId = podId; + this.clusterId = clusterId; + this.resourceUsage = resourceUsage; + } + + @Override + public long getId() { + return id; + } + + public Long getHostId() { + return hostId; + } + + public void setHostId(Long hostId) { + this.hostId = hostId; + } + + public Long getDataCenterId() { + return dataCenterId; + } + public void setDataCenterId(Long dataCenterId) { + this.dataCenterId = dataCenterId; + } + + public Long getPodId() { + return podId; + } + public void setPodId(long podId) { + this.podId = new Long(podId); + } + + public Long getClusterId() { + return clusterId; + } + public void setClusterId(long clusterId) { + this.clusterId = new Long(clusterId); + } + + public PlannerResourceUsage getResourceUsage() { + return resourceUsage; + } + + public void setResourceUsage(PlannerResourceUsage resourceType) { + this.resourceUsage = resourceType; + } + +} diff --git a/server/src/com/cloud/deploy/DeployPlannerSelector.java b/server/src/com/cloud/deploy/dao/PlannerHostReservationDao.java old mode 100755 new mode 100644 similarity index 67% rename from server/src/com/cloud/deploy/DeployPlannerSelector.java rename to server/src/com/cloud/deploy/dao/PlannerHostReservationDao.java index 062b492d8fc..69118f13896 --- a/server/src/com/cloud/deploy/DeployPlannerSelector.java +++ b/server/src/com/cloud/deploy/dao/PlannerHostReservationDao.java @@ -1,24 +1,30 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.deploy; - -import com.cloud.utils.component.Adapter; -import com.cloud.vm.UserVmVO; - -public interface DeployPlannerSelector extends Adapter { - String selectPlanner(UserVmVO vm); -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.deploy.dao; + +import java.util.List; + +import com.cloud.deploy.PlannerHostReservationVO; +import com.cloud.utils.db.GenericDao; + +public interface PlannerHostReservationDao extends GenericDao { + + PlannerHostReservationVO findByHostId(long hostId); + + List listAllReservedHosts(); + +} diff --git a/server/src/com/cloud/deploy/dao/PlannerHostReservationDaoImpl.java b/server/src/com/cloud/deploy/dao/PlannerHostReservationDaoImpl.java new file mode 100644 index 00000000000..41e09647d7e --- /dev/null +++ b/server/src/com/cloud/deploy/dao/PlannerHostReservationDaoImpl.java @@ -0,0 +1,63 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.deploy.dao; + +import java.util.List; + +import javax.annotation.PostConstruct; +import javax.ejb.Local; +import com.cloud.deploy.PlannerHostReservationVO; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@Local(value = { PlannerHostReservationDao.class }) +public class PlannerHostReservationDaoImpl extends GenericDaoBase implements + PlannerHostReservationDao { + + private SearchBuilder _hostIdSearch; + private SearchBuilder _reservedHostSearch; + + public PlannerHostReservationDaoImpl() { + + } + + @PostConstruct + protected void init() { + _hostIdSearch = createSearchBuilder(); + _hostIdSearch.and("hostId", _hostIdSearch.entity().getHostId(), SearchCriteria.Op.EQ); + _hostIdSearch.done(); + + _reservedHostSearch = createSearchBuilder(); + _reservedHostSearch.and("usage", _reservedHostSearch.entity().getResourceUsage(), SearchCriteria.Op.NNULL); + _reservedHostSearch.done(); + } + + @Override + public PlannerHostReservationVO findByHostId(long hostId) { + SearchCriteria sc = _hostIdSearch.create(); + sc.setParameters("hostId", hostId); + return findOneBy(sc); + } + + @Override + public List listAllReservedHosts() { + SearchCriteria sc = _reservedHostSearch.create(); + return listBy(sc); + } + +} diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java index 0ab35dd00a2..c60f0953a78 100755 --- a/server/src/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/com/cloud/resource/ResourceManagerImpl.java @@ -85,6 +85,10 @@ import com.cloud.dc.dao.ClusterVSMMapDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.DataCenterIpAddressDao; import com.cloud.dc.dao.HostPodDao; +import com.cloud.deploy.PlannerHostReservationVO; +import com.cloud.deploy.dao.PlannerHostReservationDao; +import com.cloud.event.ActionEvent; +import com.cloud.event.EventTypes; import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.DiscoveryException; import com.cloud.exception.InvalidParameterValueException; @@ -212,6 +216,8 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, protected HighAvailabilityManager _haMgr; @Inject protected StorageService _storageSvr; + @Inject + PlannerHostReservationDao _plannerHostReserveDao; protected List _discoverers; public List getDiscoverers() { @@ -2851,4 +2857,41 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, ResourceState.Enabled); return sc.list(); } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_HOST_RESERVATION_RELEASE, eventDescription = "releasing host reservation", async = true) + public boolean releaseHostReservation(Long hostId) { + Transaction txn = Transaction.currentTxn(); + try { + txn.start(); + PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); + if (reservationEntry != null) { + long id = reservationEntry.getId(); + PlannerHostReservationVO hostReservation = _plannerHostReserveDao.lockRow(id, true); + if (hostReservation == null) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored."); + } + txn.rollback(); + return false; + } + hostReservation.setResourceUsage(null); + _plannerHostReserveDao.persist(hostReservation); + txn.commit(); + return true; + } + if (s_logger.isDebugEnabled()) { + s_logger.debug("Host reservation for host: " + hostId + + " does not even exist. Release reservartion call is ignored."); + } + return false; + } catch (CloudRuntimeException e) { + throw e; + } catch (Throwable t) { + s_logger.error("Unable to release host reservation for host: " + hostId, t); + txn.rollback(); + return false; + } + } } diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index bc3728203bc..f74b7ad964c 100755 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -78,6 +78,7 @@ import org.apache.cloudstack.api.command.admin.host.FindHostsForMigrationCmd; import org.apache.cloudstack.api.command.admin.host.ListHostsCmd; import org.apache.cloudstack.api.command.admin.host.PrepareForMaintenanceCmd; import org.apache.cloudstack.api.command.admin.host.ReconnectHostCmd; +import org.apache.cloudstack.api.command.admin.host.ReleaseHostReservationCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd; import org.apache.cloudstack.api.command.admin.internallb.ConfigureInternalLoadBalancerElementCmd; @@ -462,6 +463,7 @@ import com.cloud.dc.dao.HostPodDao; import com.cloud.dc.dao.PodVlanMapDao; import com.cloud.dc.dao.VlanDao; import com.cloud.deploy.DataCenterDeployment; +import com.cloud.deploy.DeploymentPlanner; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; @@ -589,6 +591,7 @@ import com.cloud.vm.dao.VMInstanceDao; import edu.emory.mathcs.backport.java.util.Arrays; import edu.emory.mathcs.backport.java.util.Collections; +import org.apache.cloudstack.api.command.admin.config.ListDeploymentPlannersCmd; public class ManagementServerImpl extends ManagerBase implements ManagementServer { @@ -726,11 +729,21 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe private List _userAuthenticators; private List _userPasswordEncoders; + protected List _planners; + + public List getPlanners() { + return _planners; + } + + public void setPlanners(List _planners) { + this._planners = _planners; + } + @Inject ClusterManager _clusterMgr; private String _hashKey = null; private String _encryptionKey = null; private String _encryptionIV = null; - + @Inject protected AffinityGroupVMMapDao _affinityGroupVMMapDao; @@ -976,29 +989,29 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe String zoneType = cmd.getZoneType(); String keyword = cmd.getKeyword(); zoneId = _accountMgr.checkAccessAndSpecifyAuthority(UserContext.current().getCaller(), zoneId); - - + + Filter searchFilter = new Filter(ClusterVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal()); - - SearchBuilder sb = _clusterDao.createSearchBuilder(); - sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); - sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE); - sb.and("podId", sb.entity().getPodId(), SearchCriteria.Op.EQ); - sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); + + SearchBuilder sb = _clusterDao.createSearchBuilder(); + sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE); + sb.and("podId", sb.entity().getPodId(), SearchCriteria.Op.EQ); + sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); sb.and("hypervisorType", sb.entity().getHypervisorType(), SearchCriteria.Op.EQ); sb.and("clusterType", sb.entity().getClusterType(), SearchCriteria.Op.EQ); sb.and("allocationState", sb.entity().getAllocationState(), SearchCriteria.Op.EQ); - + if(zoneType != null) { SearchBuilder zoneSb = _dcDao.createSearchBuilder(); - zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ); + zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ); sb.join("zoneSb", zoneSb, sb.entity().getDataCenterId(), zoneSb.entity().getId(), JoinBuilder.JoinType.INNER); } - - - SearchCriteria sc = sb.create(); + + + SearchCriteria sc = sb.create(); if (id != null) { - sc.setParameters("id", id); + sc.setParameters("id", id); } if (name != null) { @@ -1026,9 +1039,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } if(zoneType != null) { - sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType); + sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType); } - + if (keyword != null) { SearchCriteria ssc = _clusterDao.createSearchCriteria(); ssc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%"); @@ -1441,26 +1454,26 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe public Pair, Integer> searchForPods(ListPodsByCmd cmd) { String podName = cmd.getPodName(); Long id = cmd.getId(); - Long zoneId = cmd.getZoneId(); + Long zoneId = cmd.getZoneId(); Object keyword = cmd.getKeyword(); Object allocationState = cmd.getAllocationState(); String zoneType = cmd.getZoneType(); zoneId = _accountMgr.checkAccessAndSpecifyAuthority(UserContext.current().getCaller(), zoneId); - + Filter searchFilter = new Filter(HostPodVO.class, "dataCenterId", true, cmd.getStartIndex(), cmd.getPageSizeVal()); - SearchBuilder sb = _hostPodDao.createSearchBuilder(); + SearchBuilder sb = _hostPodDao.createSearchBuilder(); sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); - sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE); - sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE); + sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); sb.and("allocationState", sb.entity().getAllocationState(), SearchCriteria.Op.EQ); - + if(zoneType != null) { SearchBuilder zoneSb = _dcDao.createSearchBuilder(); - zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ); + zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ); sb.join("zoneSb", zoneSb, sb.entity().getDataCenterId(), zoneSb.entity().getId(), JoinBuilder.JoinType.INNER); } - + SearchCriteria sc = sb.create(); if (keyword != null) { SearchCriteria ssc = _hostPodDao.createSearchCriteria(); @@ -1473,23 +1486,23 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (id != null) { sc.setParameters("id", id); } - + if (podName != null) { sc.setParameters("name", "%" + podName + "%"); } - + if (zoneId != null) { sc.setParameters("dataCenterId", zoneId); } - + if (allocationState != null) { sc.setParameters("allocationState", allocationState); - } - - if(zoneType != null) { - sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType); } - + + if(zoneType != null) { + sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType); + } + Pair, Integer> result = _hostPodDao.searchAndCount(sc, searchFilter); return new Pair, Integer>(result.first(), result.second()); } @@ -2903,7 +2916,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(ListAffinityGroupsCmd.class); cmdList.add(UpdateVMAffinityGroupCmd.class); cmdList.add(ListAffinityGroupTypesCmd.class); - + cmdList.add(ListDeploymentPlannersCmd.class); + cmdList.add(ReleaseHostReservationCmd.class); cmdList.add(AddResourceDetailCmd.class); cmdList.add(RemoveResourceDetailCmd.class); cmdList.add(ListResourceDetailsCmd.class); @@ -3105,10 +3119,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if(zoneType != null) { SearchBuilder zoneSb = _dcDao.createSearchBuilder(); - zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ); + zoneSb.and("zoneNetworkType", zoneSb.entity().getNetworkType(), SearchCriteria.Op.EQ); sb.join("zoneSb", zoneSb, sb.entity().getDataCenterId(), zoneSb.entity().getId(), JoinBuilder.JoinType.INNER); - } - + } + SearchCriteria sc = sb.create(); if (keyword != null) { @@ -3150,9 +3164,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } if(zoneType != null) { - sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType); + sc.setJoinParameters("zoneSb", "zoneNetworkType", zoneType); } - + Pair, Integer> result = _vmInstanceDao.searchAndCount(sc, searchFilter); return new Pair, Integer>(result.first(), result.second()); } @@ -3677,7 +3691,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe // although we may have race conditioning here, database transaction serialization should // give us the same key if (_hashKey == null) { - _hashKey = _configDao.getValueAndInitIfNotExist(Config.HashKey.key(), Config.HashKey.getCategory(), + _hashKey = _configDao.getValueAndInitIfNotExist(Config.HashKey.key(), Config.HashKey.getCategory(), getBase64EncodedRandomKey(128)); } return _hashKey; @@ -3686,41 +3700,41 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override public String getEncryptionKey() { if (_encryptionKey == null) { - _encryptionKey = _configDao.getValueAndInitIfNotExist(Config.EncryptionKey.key(), - Config.EncryptionKey.getCategory(), + _encryptionKey = _configDao.getValueAndInitIfNotExist(Config.EncryptionKey.key(), + Config.EncryptionKey.getCategory(), getBase64EncodedRandomKey(128)); } return _encryptionKey; } - + @Override public String getEncryptionIV() { if (_encryptionIV == null) { - _encryptionIV = _configDao.getValueAndInitIfNotExist(Config.EncryptionIV.key(), - Config.EncryptionIV.getCategory(), + _encryptionIV = _configDao.getValueAndInitIfNotExist(Config.EncryptionIV.key(), + Config.EncryptionIV.getCategory(), getBase64EncodedRandomKey(128)); } return _encryptionIV; } - + @Override @DB public void resetEncryptionKeyIV() { - + SearchBuilder sb = _configDao.createSearchBuilder(); sb.and("name1", sb.entity().getName(), SearchCriteria.Op.EQ); sb.or("name2", sb.entity().getName(), SearchCriteria.Op.EQ); sb.done(); - + SearchCriteria sc = sb.create(); sc.setParameters("name1", Config.EncryptionKey.key()); sc.setParameters("name2", Config.EncryptionIV.key()); - + _configDao.expunge(sc); _encryptionKey = null; _encryptionIV = null; } - + private static String getBase64EncodedRandomKey(int nBits) { SecureRandom random; try { @@ -4056,4 +4070,15 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } } + + @Override + public List listDeploymentPlanners() { + List plannersAvailable = new ArrayList(); + for (DeploymentPlanner planner : _planners) { + plannersAvailable.add(planner.getName()); + } + + return plannersAvailable; + } + } diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index aa065294f8c..a3b731ab2a5 100755 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -98,7 +98,6 @@ import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeployDestination; -import com.cloud.deploy.DeployPlannerSelector; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; @@ -402,9 +401,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use @Inject AffinityGroupDao _affinityGroupDao; - @Inject - List plannerSelectors; - protected ScheduledExecutorService _executor = null; protected int _expungeInterval; protected int _expungeDelay; @@ -2836,7 +2832,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } return result; } - + @Override public boolean finalizeDeployment(Commands cmds, VirtualMachineProfile profile, DeployDestination dest, @@ -3036,7 +3032,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use + " stop due to exception ", ex); } } - + VMInstanceVO vm = profile.getVirtualMachine(); List nics = _nicDao.listByVmId(vm.getId()); for (NicVO nic : nics) { @@ -3174,15 +3170,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use VirtualMachineEntity vmEntity = _orchSrvc.getVirtualMachine(vm.getUuid()); - String plannerName = null; - for (DeployPlannerSelector dps : plannerSelectors) { - plannerName = dps.selectPlanner(vm); - if (plannerName != null) { - break; - } - } + // Get serviceOffering for Virtual Machine + ServiceOfferingVO offering = _serviceOfferingDao.findByIdIncludingRemoved(vm.getServiceOfferingId()); + String plannerName = offering.getDeploymentPlanner(); if (plannerName == null) { - throw new CloudRuntimeException(String.format("cannot find DeployPlannerSelector for vm[uuid:%s, hypervisorType:%s]", vm.getUuid(), vm.getHypervisorType())); + if (vm.getHypervisorType() == HypervisorType.BareMetal) { + plannerName = "BareMetalPlanner"; + } else { + plannerName = _configDao.getValue(Config.VmDeploymentPlanner.key()); + } } String reservationId = vmEntity.reserve(plannerName, plan, new ExcludeList(), new Long(callerUser.getId()).toString()); @@ -3826,7 +3822,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use + cmd.getAccountName() + " is disabled."); } - //check caller has access to both the old and new account + //check caller has access to both the old and new account _accountMgr.checkAccess(caller, null, true, oldAccount); _accountMgr.checkAccess(caller, null, true, newAccount); diff --git a/server/test/com/cloud/resource/MockResourceManagerImpl.java b/server/test/com/cloud/resource/MockResourceManagerImpl.java index 5202c317e56..1fff3a63b1d 100644 --- a/server/test/com/cloud/resource/MockResourceManagerImpl.java +++ b/server/test/com/cloud/resource/MockResourceManagerImpl.java @@ -608,4 +608,10 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana return null; } + @Override + public boolean releaseHostReservation(Long hostId) { + // TODO Auto-generated method stub + return false; + } + } diff --git a/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java b/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java new file mode 100644 index 00000000000..e3b7d311ba7 --- /dev/null +++ b/server/test/com/cloud/vm/DeploymentPlanningManagerImplTest.java @@ -0,0 +1,359 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm; + +import static org.junit.Assert.*; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import com.cloud.service.ServiceOfferingVO; +import com.cloud.storage.StorageManager; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.GuestOSCategoryDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.capacity.CapacityManager; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; +import com.cloud.agent.AgentManager; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.deploy.DataCenterDeployment; +import com.cloud.deploy.DeployDestination; +import com.cloud.deploy.DeploymentClusterPlanner; +import com.cloud.deploy.DeploymentPlanner; +import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage; +import com.cloud.deploy.DeploymentPlanningManagerImpl; +import com.cloud.deploy.FirstFitPlanner; +import com.cloud.deploy.PlannerHostReservationVO; +import com.cloud.deploy.dao.PlannerHostReservationDao; +import org.apache.cloudstack.affinity.AffinityGroupProcessor; +import org.apache.cloudstack.affinity.dao.AffinityGroupDao; +import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.framework.messagebus.MessageBus; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.test.utils.SpringUtils; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.FilterType; +import org.springframework.context.annotation.ComponentScan.Filter; +import org.springframework.core.type.classreading.MetadataReader; +import org.springframework.core.type.classreading.MetadataReaderFactory; +import org.springframework.core.type.filter.TypeFilter; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.springframework.test.context.support.AnnotationConfigContextLoader; + +import com.cloud.exception.AffinityConflictException; +import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.user.AccountManager; +import com.cloud.utils.component.ComponentContext; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(loader = AnnotationConfigContextLoader.class) +public class DeploymentPlanningManagerImplTest { + + @Inject + DeploymentPlanningManagerImpl _dpm; + + @Inject + PlannerHostReservationDao _plannerHostReserveDao; + + @Inject VirtualMachineProfileImpl vmProfile; + + @Inject + AffinityGroupVMMapDao _affinityGroupVMMapDao; + + @Inject + ExcludeList avoids; + + @Inject + DataCenterVO dc; + + @Inject + DataCenterDao _dcDao; + + @Inject + FirstFitPlanner _planner; + + @Inject + ClusterDao _clusterDao; + + private static long domainId = 5L; + + private static long dataCenterId = 1L; + + + @BeforeClass + public static void setUp() throws ConfigurationException { + } + + @Before + public void testSetUp() { + ComponentContext.initComponentsLifeCycle(); + + PlannerHostReservationVO reservationVO = new PlannerHostReservationVO(200L, 1L, 2L, 3L, PlannerResourceUsage.Shared); + Mockito.when(_plannerHostReserveDao.persist(Mockito.any(PlannerHostReservationVO.class))).thenReturn(reservationVO); + Mockito.when(_plannerHostReserveDao.findById(Mockito.anyLong())).thenReturn(reservationVO); + Mockito.when(_affinityGroupVMMapDao.countAffinityGroupsForVm(Mockito.anyLong())).thenReturn(0L); + + VMInstanceVO vm = new VMInstanceVO(); + Mockito.when(vmProfile.getVirtualMachine()).thenReturn(vm); + + Mockito.when(_dcDao.findById(Mockito.anyLong())).thenReturn(dc); + Mockito.when(dc.getId()).thenReturn(dataCenterId); + + ClusterVO clusterVO = new ClusterVO(); + clusterVO.setHypervisorType(HypervisorType.XenServer.toString()); + Mockito.when(_clusterDao.findById(Mockito.anyLong())).thenReturn(clusterVO); + + Mockito.when(_planner.getName()).thenReturn("FirstFitPlanner"); + List planners = new ArrayList(); + planners.add(_planner); + _dpm.setPlanners(planners); + + } + + @Test + public void dataCenterAvoidTest() throws InsufficientServerCapacityException, AffinityConflictException { + ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false, + "test dpm", false, false, null, false, VirtualMachine.Type.User, domainId, null, "FirstFitPlanner"); + Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering); + + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId); + + Mockito.when(avoids.shouldAvoid((DataCenterVO) Mockito.anyObject())).thenReturn(true); + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids); + assertNull("DataCenter is in avoid set, destination should be null! ", dest); + } + + @Test + public void plannerCannotHandleTest() throws InsufficientServerCapacityException, AffinityConflictException { + ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false, + "test dpm", false, false, null, false, VirtualMachine.Type.User, domainId, null, + "UserDispersingPlanner"); + Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering); + + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId); + Mockito.when(avoids.shouldAvoid((DataCenterVO) Mockito.anyObject())).thenReturn(false); + + Mockito.when(_planner.canHandle(vmProfile, plan, avoids)).thenReturn(false); + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids); + assertNull("Planner cannot handle, destination should be null! ", dest); + } + + @Test + public void emptyClusterListTest() throws InsufficientServerCapacityException, AffinityConflictException { + ServiceOfferingVO svcOffering = new ServiceOfferingVO("testOffering", 1, 512, 500, 1, 1, false, false, false, + "test dpm", false, false, null, false, VirtualMachine.Type.User, domainId, null, "FirstFitPlanner"); + Mockito.when(vmProfile.getServiceOffering()).thenReturn(svcOffering); + + DataCenterDeployment plan = new DataCenterDeployment(dataCenterId); + Mockito.when(avoids.shouldAvoid((DataCenterVO) Mockito.anyObject())).thenReturn(false); + Mockito.when(_planner.canHandle(vmProfile, plan, avoids)).thenReturn(true); + + Mockito.when(((DeploymentClusterPlanner) _planner).orderClusters(vmProfile, plan, avoids)).thenReturn(null); + DeployDestination dest = _dpm.planDeployment(vmProfile, plan, avoids); + assertNull("Planner cannot handle, destination should be null! ", dest); + } + + + @Configuration + @ComponentScan(basePackageClasses = { DeploymentPlanningManagerImpl.class }, includeFilters = { @Filter(value = TestConfiguration.Library.class, type = FilterType.CUSTOM) }, useDefaultFilters = false) + public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration { + + @Bean + public FirstFitPlanner firstFitPlanner() { + return Mockito.mock(FirstFitPlanner.class); + } + + @Bean + public DeploymentPlanner deploymentPlanner() { + return Mockito.mock(DeploymentPlanner.class); + } + + @Bean + public DataCenterVO dataCenter() { + return Mockito.mock(DataCenterVO.class); + } + + @Bean + public ExcludeList excludeList() { + return Mockito.mock(ExcludeList.class); + } + + @Bean + public VirtualMachineProfileImpl virtualMachineProfileImpl() { + return Mockito.mock(VirtualMachineProfileImpl.class); + } + + @Bean + public ClusterDetailsDao clusterDetailsDao() { + return Mockito.mock(ClusterDetailsDao.class); + } + + @Bean + public DataStoreManager cataStoreManager() { + return Mockito.mock(DataStoreManager.class); + } + + @Bean + public StorageManager storageManager() { + return Mockito.mock(StorageManager.class); + } + + @Bean + public HostDao hostDao() { + return Mockito.mock(HostDao.class); + } + + @Bean + public HostPodDao hostPodDao() { + return Mockito.mock(HostPodDao.class); + } + + @Bean + public ClusterDao clusterDao() { + return Mockito.mock(ClusterDao.class); + } + + @Bean + public GuestOSDao guestOSDao() { + return Mockito.mock(GuestOSDao.class); + } + + @Bean + public GuestOSCategoryDao guestOSCategoryDao() { + return Mockito.mock(GuestOSCategoryDao.class); + } + + @Bean + public CapacityManager capacityManager() { + return Mockito.mock(CapacityManager.class); + } + + @Bean + public StoragePoolHostDao storagePoolHostDao() { + return Mockito.mock(StoragePoolHostDao.class); + } + + @Bean + public VolumeDao volumeDao() { + return Mockito.mock(VolumeDao.class); + } + + @Bean + public ConfigurationDao configurationDao() { + return Mockito.mock(ConfigurationDao.class); + } + + @Bean + public DiskOfferingDao diskOfferingDao() { + return Mockito.mock(DiskOfferingDao.class); + } + + @Bean + public PrimaryDataStoreDao primaryDataStoreDao() { + return Mockito.mock(PrimaryDataStoreDao.class); + } + + @Bean + public CapacityDao capacityDao() { + return Mockito.mock(CapacityDao.class); + } + + @Bean + public PlannerHostReservationDao plannerHostReservationDao() { + return Mockito.mock(PlannerHostReservationDao.class); + } + + @Bean + public AffinityGroupProcessor affinityGroupProcessor() { + return Mockito.mock(AffinityGroupProcessor.class); + } + + @Bean + public AffinityGroupDao affinityGroupDao() { + return Mockito.mock(AffinityGroupDao.class); + } + + @Bean + public AffinityGroupVMMapDao affinityGroupVMMapDao() { + return Mockito.mock(AffinityGroupVMMapDao.class); + } + + @Bean + public AccountManager accountManager() { + return Mockito.mock(AccountManager.class); + } + + @Bean + public AgentManager agentManager() { + return Mockito.mock(AgentManager.class); + } + + @Bean + public MessageBus messageBus() { + return Mockito.mock(MessageBus.class); + } + + + @Bean + public UserVmDao userVMDao() { + return Mockito.mock(UserVmDao.class); + } + + @Bean + public VMInstanceDao vmInstanceDao() { + return Mockito.mock(VMInstanceDao.class); + } + + @Bean + public DataCenterDao dataCenterDao() { + return Mockito.mock(DataCenterDao.class); + } + + public static class Library implements TypeFilter { + + @Override + public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException { + ComponentScan cs = TestConfiguration.class.getAnnotation(ComponentScan.class); + return SpringUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs); + } + } + } +} diff --git a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java index b64278c9709..ba18fa1c11d 100755 --- a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java +++ b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java @@ -431,7 +431,7 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu */ @Override public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, Type vm_typeType, String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired, boolean offerHA, - boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate) { + boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner) { // TODO Auto-generated method stub return null; } diff --git a/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java b/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java index 6f52397251b..7ffbe32d8bd 100644 --- a/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java +++ b/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java @@ -82,6 +82,7 @@ import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; import com.cloud.offerings.dao.NetworkOfferingServiceMapDaoImpl; import com.cloud.projects.ProjectManager; +import com.cloud.server.ManagementService; import com.cloud.service.dao.ServiceOfferingDaoImpl; import com.cloud.storage.dao.DiskOfferingDaoImpl; import com.cloud.storage.dao.S3DaoImpl; @@ -155,162 +156,167 @@ useDefaultFilters=false ) public class ChildTestConfiguration { - + + @Bean + public ManagementService managementService() { + return Mockito.mock(ManagementService.class); + } + @Bean public AccountManager acctMgr() { return Mockito.mock(AccountManager.class); } - + @Bean public NetworkService ntwkSvc() { return Mockito.mock(NetworkService.class); } - + @Bean public NetworkModel ntwkMdl() { return Mockito.mock(NetworkModel.class); } - + @Bean public AlertManager alertMgr() { return Mockito.mock(AlertManager.class); } - + @Bean public SecurityChecker securityChkr() { return Mockito.mock(SecurityChecker.class); } - + @Bean public ResourceLimitService resourceSvc() { return Mockito.mock(ResourceLimitService.class); } - + @Bean public ProjectManager projectMgr() { return Mockito.mock(ProjectManager.class); } - + @Bean public SecondaryStorageVmManager ssvmMgr() { return Mockito.mock(SecondaryStorageVmManager.class); } - + @Bean public SwiftManager swiftMgr() { return Mockito.mock(SwiftManager.class); } - + @Bean public S3Manager s3Mgr() { return Mockito.mock(S3Manager.class); } - + @Bean public VpcManager vpcMgr() { return Mockito.mock(VpcManager.class); } - + @Bean public UserVmDao userVMDao() { return Mockito.mock(UserVmDao.class); } - + @Bean public RulesManager rulesMgr() { return Mockito.mock(RulesManager.class); } - + @Bean public LoadBalancingRulesManager lbRulesMgr() { return Mockito.mock(LoadBalancingRulesManager.class); } - + @Bean public RemoteAccessVpnService vpnMgr() { return Mockito.mock(RemoteAccessVpnService.class); } - + @Bean public NetworkGuru ntwkGuru() { return Mockito.mock(NetworkGuru.class); } - + @Bean public NetworkElement ntwkElement() { return Mockito.mock(NetworkElement.class); } - + @Bean public IpDeployer ipDeployer() { return Mockito.mock(IpDeployer.class); } - + @Bean public DhcpServiceProvider dhcpProvider() { return Mockito.mock(DhcpServiceProvider.class); } - + @Bean public FirewallManager firewallMgr() { return Mockito.mock(FirewallManager.class); } - + @Bean public AgentManager agentMgr() { return Mockito.mock(AgentManager.class); } - + @Bean public StorageNetworkManager storageNtwkMgr() { return Mockito.mock(StorageNetworkManager.class); } - + @Bean public NetworkACLManager ntwkAclMgr() { return Mockito.mock(NetworkACLManager.class); } - + @Bean public Ipv6AddressManager ipv6Mgr() { return Mockito.mock(Ipv6AddressManager.class); } - + @Bean public ConfigurationDao configDao() { return Mockito.mock(ConfigurationDao.class); } - + @Bean public UserContext userContext() { return Mockito.mock(UserContext.class); } - + @Bean public UserContextInitializer userContextInitializer() { return Mockito.mock(UserContextInitializer.class); } - + @Bean public NetworkManager networkManager() { return Mockito.mock(NetworkManager.class); } - + @Bean public NetworkOfferingDao networkOfferingDao() { return Mockito.mock(NetworkOfferingDao.class); } - + @Bean public NetworkDao networkDao() { return Mockito.mock(NetworkDao.class); } - + @Bean public NetworkOfferingServiceMapDao networkOfferingServiceMapDao() { return Mockito.mock(NetworkOfferingServiceMapDao.class); } - + @Bean public DataCenterLinkLocalIpAddressDao datacenterLinkLocalIpAddressDao() { return Mockito.mock(DataCenterLinkLocalIpAddressDao.class); @@ -342,5 +348,5 @@ public class ChildTestConfiguration { } } - + } diff --git a/server/test/resources/affinityContext.xml b/server/test/resources/affinityContext.xml new file mode 100644 index 00000000000..ed880dd5648 --- /dev/null +++ b/server/test/resources/affinityContext.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql index 334aae76f33..442a5446be5 100644 --- a/setup/db/db/schema-410to420.sql +++ b/setup/db/db/schema-410to420.sql @@ -973,9 +973,61 @@ CREATE TABLE `cloud`.`network_asa1000v_map` ( ALTER TABLE `cloud`.`network_offerings` ADD COLUMN `eip_associate_public_ip` int(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if public IP is associated with user VM creation by default when EIP service is enabled.' AFTER `elastic_ip_service`; --- Re-enable foreign key checking, at the end of the upgrade path -SET foreign_key_checks = 1; +CREATE TABLE `cloud`.`op_host_planner_reservation` ( + `id` bigint unsigned NOT NULL auto_increment, + `data_center_id` bigint unsigned NOT NULL, + `pod_id` bigint unsigned, + `cluster_id` bigint unsigned, + `host_id` bigint unsigned, + `resource_usage` varchar(255) COMMENT 'Shared(between planners) Vs Dedicated (exclusive usage to a planner)', + PRIMARY KEY (`id`), + INDEX `i_op_host_planner_reservation__host_resource_usage`(`host_id`, `resource_usage`), + CONSTRAINT `fk_planner_reservation__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_planner_reservation__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `cloud`.`data_center`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_planner_reservation__pod_id` FOREIGN KEY (`pod_id`) REFERENCES `cloud`.`host_pod_ref`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_planner_reservation__cluster_id` FOREIGN KEY (`cluster_id`) REFERENCES `cloud`.`cluster`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `cloud`.`service_offering` ADD COLUMN `deployment_planner` varchar(255) COMMENT 'Planner heuristics used to deploy a VM of this offering; if null global config vm.deployment.planner is used'; + +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vm.deployment.planner', 'FirstFitPlanner', '[''FirstFitPlanner'', ''UserDispersingPlanner'', ''UserConcentratedPodPlanner'']: DeploymentPlanner heuristic that will be used for VM deployment.'); +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'host.reservation.release.period', '300000', 'The interval in milliseconds between host reservation release checks'); + +DROP VIEW IF EXISTS `cloud`.`service_offering_view`; +CREATE VIEW `cloud`.`service_offering_view` AS + select + service_offering.id, + disk_offering.uuid, + disk_offering.name, + disk_offering.display_text, + disk_offering.created, + disk_offering.tags, + disk_offering.removed, + disk_offering.use_local_storage, + disk_offering.system_use, + service_offering.cpu, + service_offering.speed, + service_offering.ram_size, + service_offering.nw_rate, + service_offering.mc_rate, + service_offering.ha_enabled, + service_offering.limit_cpu_use, + service_offering.host_tag, + service_offering.default_use, + service_offering.vm_type, + service_offering.sort_key, + service_offering.deployment_planner, + domain.id domain_id, + domain.uuid domain_uuid, + domain.name domain_name, + domain.path domain_path + from + `cloud`.`service_offering` + inner join + `cloud`.`disk_offering` ON service_offering.id = disk_offering.id + left join + `cloud`.`domain` ON disk_offering.domain_id = domain.id; -- Add "default" field to account/user tables ALTER TABLE `cloud`.`account` ADD COLUMN `default` int(1) unsigned NOT NULL DEFAULT '0' COMMENT '1 if account is default'; @@ -1605,3 +1657,8 @@ CREATE TABLE `cloud`.`nic_ip_alias` ( alter table `cloud`.`vpc_gateways` add column network_acl_id bigint unsigned default 1 NOT NULL; update `cloud`.`vpc_gateways` set network_acl_id = 2; + +-- Re-enable foreign key checking, at the end of the upgrade path +SET foreign_key_checks = 1; + + diff --git a/test/integration/smoke/test_deploy_vms_with_varied_deploymentplanners.py b/test/integration/smoke/test_deploy_vms_with_varied_deploymentplanners.py new file mode 100644 index 00000000000..d904a4cb7d8 --- /dev/null +++ b/test/integration/smoke/test_deploy_vms_with_varied_deploymentplanners.py @@ -0,0 +1,164 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +#!/usr/bin/env python + +import marvin +from marvin import cloudstackTestCase +from marvin.cloudstackTestCase import * + +import unittest +import hashlib +import random + +class TestDeployVmWithVariedPlanners(cloudstackTestCase): + """ + This test tests that we can create serviceOfferings with different deployment Planners and deploy virtual machines into a user account + using these service offerings and builtin template + """ + def setUp(self): + """ + CloudStack internally saves its passwords in md5 form and that is how we + specify it in the API. Python's hashlib library helps us to quickly hash + strings as follows + """ + mdf = hashlib.md5() + mdf.update('password') + mdf_pass = mdf.hexdigest() + + self.apiClient = self.testClient.getApiClient() #Get ourselves an API client + + self.acct = createAccount.createAccountCmd() #The createAccount command + self.acct.accounttype = 0 #We need a regular user. admins have accounttype=1 + self.acct.firstname = 'test' + self.acct.lastname = 'user' #What's up doc? + self.acct.username = 'testuser' + self.acct.password = mdf_pass #The md5 hashed password string + self.acct.email = 'test@domain.com' + self.acct.account = 'testacct' + self.acct.domainid = 1 #The default ROOT domain + self.acctResponse = self.apiClient.createAccount(self.acct) + # And upon successful creation we'll log a helpful message in our logs + # using the default debug logger of the test framework + self.debug("successfully created account: %s, id: \ + %s"%(self.acctResponse.name, \ + self.acctResponse.id)) + + #Create service offerings with varied planners + self.svcOfferingFirstFit = createServiceOffering.createServiceOfferingCmd() + self.svcOfferingFirstFit.name = 'Tiny Instance FirstFit' + self.svcOfferingFirstFit.displaytext = 'Tiny Instance with FirstFitPlanner' + self.svcOfferingFirstFit.cpuspeed = 100 + self.svcOfferingFirstFit.cpunumber = 1 + self.svcOfferingFirstFit.memory = 256 + self.svcOfferingFirstFit.deploymentplanner = 'FirstFitPlanner' + self.svcOfferingFirstFitResponse = self.apiClient.createServiceOffering(self.svcOfferingFirstFit) + + self.debug("successfully created serviceofferring name: %s, id: \ + %s, deploymentPlanner: %s"%(self.svcOfferingFirstFitResponse.name, \ + self.svcOfferingFirstFitResponse.id,self.svcOfferingFirstFitResponse.deploymentplanner)) + + #Create service offerings with varied planners + self.svcOfferingUserDispersing = createServiceOffering.createServiceOfferingCmd() + self.svcOfferingUserDispersing.name = 'Tiny Instance UserDispersing' + self.svcOfferingUserDispersing.displaytext = 'Tiny Instance with UserDispersingPlanner' + self.svcOfferingUserDispersing.cpuspeed = 100 + self.svcOfferingUserDispersing.cpunumber = 1 + self.svcOfferingUserDispersing.memory = 256 + self.svcOfferingUserDispersing.deploymentplanner = 'FirstFitPlanner' + self.svcOfferingUserDispersingResponse = self.apiClient.createServiceOffering(self.svcOfferingUserDispersing) + + self.debug("successfully created serviceofferring name: %s, id: \ + %s, deploymentPlanner: %s"%(self.svcOfferingUserDispersingResponse.name, \ + self.svcOfferingUserDispersingResponse.id,self.svcOfferingUserDispersingResponse.deploymentplanner)) + + def test_DeployVm(self): + """ + Let's start by defining the attributes of our VM that we will be + deploying on CloudStack. We will be assuming a single zone is available + and is configured and all templates are Ready + + The hardcoded values are used only for brevity. + """ + deployVmCmd = deployVirtualMachine.deployVirtualMachineCmd() + deployVmCmd.zoneid = 1 + deployVmCmd.account = self.acct.account + deployVmCmd.domainid = self.acct.domainid + deployVmCmd.templateid = 5 #For default template- CentOS 5.6(64 bit) + deployVmCmd.serviceofferingid = self.svcOfferingFirstFitResponse.id + + deployVmResponse = self.apiClient.deployVirtualMachine(deployVmCmd) + self.debug("VM %s was deployed in the job %s"%(deployVmResponse.id, deployVmResponse.jobid)) + + # At this point our VM is expected to be Running. Let's find out what + # listVirtualMachines tells us about VMs in this account + + listVmCmd = listVirtualMachines.listVirtualMachinesCmd() + listVmCmd.id = deployVmResponse.id + listVmResponse = self.apiClient.listVirtualMachines(listVmCmd) + + self.assertNotEqual(len(listVmResponse), 0, "Check if the list API \ + returns a non-empty response") + + vm1 = listVmResponse[0] + + self.assertEqual(vm1.id, deployVmResponse.id, "Check if the VM returned \ + is the same as the one we deployed") + self.assertEqual(vm1.state, "Running", "Check if VM has reached \ + a state of running") + + + deployVm2Cmd = deployVirtualMachine.deployVirtualMachineCmd() + deployVm2Cmd.zoneid = 1 + deployVm2Cmd.account = self.acct.account + deployVm2Cmd.domainid = self.acct.domainid + deployVm2Cmd.templateid = 5 #For default template- CentOS 5.6(64 bit) + deployVm2Cmd.serviceofferingid = self.svcOfferingFirstFitResponse.id + + deployVm2Response = self.apiClient.deployVirtualMachine(deployVm2Cmd) + self.debug("VM %s was deployed in the job %s"%(deployVm2Response.id, deployVm2Response.jobid)) + + # At this point our VM is expected to be Running. Let's find out what + # listVirtualMachines tells us about VMs in this account + + listVm2Cmd = listVirtualMachines.listVirtualMachinesCmd() + listVm2Cmd.id = deployVm2Response.id + listVm2Response = self.apiClient.listVirtualMachines(listVm2Cmd) + self.assertNotEqual(len(listVm2Response), 0, "Check if the list API \ + returns a non-empty response") + vm2 = listVm2Response[0] + self.assertEqual(vm2.id, deployVm2Response.id, "Check if the VM returned \ + is the same as the one we deployed") + self.assertEqual(vm2.state, "Running", "Check if VM has reached \ + a state of running") + + + def tearDown(self): # Teardown will delete the Account as well as the VM once the VM reaches "Running" state + """ + And finally let us cleanup the resources we created by deleting the + account. All good unittests are atomic and rerunnable this way + """ + deleteAcct = deleteAccount.deleteAccountCmd() + deleteAcct.id = self.acctResponse.id + self.apiClient.deleteAccount(deleteAcct) + deleteSvcOfferingFirstFit = deleteServiceOffering.deleteServiceOfferingCmd() + deleteSvcOfferingFirstFit.id = self.svcOfferingFirstFitResponse.id + self.apiClient.deleteServiceOffering(deleteSvcOfferingFirstFit); + deleteSvcOfferingUserDispersing = deleteServiceOffering.deleteServiceOfferingCmd() + deleteSvcOfferingUserDispersing.id = self.svcOfferingUserDispersingResponse.id + self.apiClient.deleteServiceOffering(deleteSvcOfferingUserDispersing); + \ No newline at end of file diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index 8b6460e5507..375850304b7 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -142,6 +142,7 @@ known_categories = { 'listNics':'Nic', 'AffinityGroup': 'Affinity Group', 'InternalLoadBalancer': 'Internal LB', + 'DeploymentPlanners': 'Configuration', } From 890603be37949308c127701765b31438605ab033 Mon Sep 17 00:00:00 2001 From: Prachi Damle Date: Thu, 16 May 2013 15:28:02 -0700 Subject: [PATCH 04/19] Unit test does not use this file anymore. --- server/test/resources/affinityContext.xml | 42 ----------------------- 1 file changed, 42 deletions(-) delete mode 100644 server/test/resources/affinityContext.xml diff --git a/server/test/resources/affinityContext.xml b/server/test/resources/affinityContext.xml deleted file mode 100644 index ed880dd5648..00000000000 --- a/server/test/resources/affinityContext.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file From 35de50debfadf82dfbec2aab473abeea74fc6248 Mon Sep 17 00:00:00 2001 From: Alena Prokharchyk Date: Thu, 16 May 2013 14:26:46 -0700 Subject: [PATCH 05/19] cloud-sysvmadm: 1) added -z paramters. If specified, restrict system vm restarts to specific zone. If not specified, the intances will be restarted in all zones. 2) Fixed the help for restartNetwork option. It gets triggered by -n, not -e parameter --- setup/bindir/cloud-sysvmadm.in | 50 +++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/setup/bindir/cloud-sysvmadm.in b/setup/bindir/cloud-sysvmadm.in index 0a7b454ef95..3cb7858150b 100755 --- a/setup/bindir/cloud-sysvmadm.in +++ b/setup/bindir/cloud-sysvmadm.in @@ -23,7 +23,7 @@ #set -x usage() { - printf "\nThe tool stopping/starting running system vms and domain routers \n\nUsage: %s: [-d] [-u] [-p] [-m] [-s] [-r] [-a] [-t] [-e]\n\n -d - cloud DB server ip address, defaulted to localhost if not specified \n -u - user name to access cloud DB, defaulted to "root" if not specified \n -p - cloud DB user password, defaulted to no password if not specified \n\n -m - the ip address of management server, defaulted to localhost if not specified\n\n -s - stop then start all running SSVMs and Console Proxies \n -r - stop then start all running Virtual Routers\n -a - stop then start all running SSVMs, Console Proxies, and Virtual Routers \n -e - restart all Guest networks \n -t - number of parallel threads used for stopping Domain Routers. Default is 10.\n -l - log file location. Default is cloud.log under current directory.\n\n" $(basename $0) >&2 + printf "\nThe tool stopping/starting running system vms and domain routers \n\nUsage: %s: [-d] [-u] [-p] [-m] [-s] [-r] [-a] [-t] [-n] [-z]\n\n -d - cloud DB server ip address, defaulted to localhost if not specified \n -u - user name to access cloud DB, defaulted to "root" if not specified \n -p - cloud DB user password, defaulted to no password if not specified \n\n -m - the ip address of management server, defaulted to localhost if not specified\n\n -s - stop then start all running SSVMs and Console Proxies \n -r - stop then start all running Virtual Routers\n -a - stop then start all running SSVMs, Console Proxies, and Virtual Routers \n -n - restart all Guest networks \n -t - number of parallel threads used for stopping Domain Routers. Default is 10.\n -l - log file location. Default is cloud.log under current directory.\n -z - do restart only for the instances in the specific zone. If not specified, restart will apply to instances in all zones\n\n" $(basename $0) >&2 } @@ -37,9 +37,12 @@ password= help= maxthreads=10 LOGFILE=cloud.log +zone="" +inzone="" -while getopts 'sarhnd:m:u:p:t:l:' OPTION + +while getopts 'sarhnd:m:u:p:t:l:z:' OPTION do case $OPTION in s) system=1 @@ -63,6 +66,9 @@ do t) maxthreads="$OPTARG" ;; l) LOGFILE="$OPTARG" + ;; + z) zone=" AND data_center_id=""$OPTARG" + inzone=" in zone id=""$OPTARG" esac done @@ -70,14 +76,14 @@ done stop_start_system() { -secondary=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"SecondaryStorageVm\""`) -console=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"ConsoleProxy\""`) +secondary=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"SecondaryStorageVm\"$zone"`) +console=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"ConsoleProxy\"$zone"`) length_secondary=(${#secondary[@]}) length_console=(${#console[@]}) -echo -e "\nStopping and starting $length_secondary secondary storage vm(s)..." -echo -e "Stopping and starting $length_secondary secondary storage vm(s)..." >>$LOGFILE +echo -e "\nStopping and starting $length_secondary secondary storage vm(s)$inzone..." +echo -e "Stopping and starting $length_secondary secondary storage vm(s)$inzone..." >>$LOGFILE for d in "${secondary[@]}"; do echo "INFO: Stopping secondary storage vm with id $d" >>$LOGFILE @@ -98,12 +104,12 @@ done if [ "$length_secondary" == "0" ];then echo -e "No running secondary storage vms found \n" else - echo -e "Done stopping and starting secondary storage vm(s)" - echo -e "Done stopping and starting secondary storage vm(s)." >>$LOGFILE + echo -e "Done stopping and starting secondary storage vm(s)$inzone" + echo -e "Done stopping and starting secondary storage vm(s)$inzone." >>$LOGFILE fi -echo -e "\nStopping and starting $length_console console proxy vm(s)..." -echo -e "Stopping and starting $length_console console proxy vm(s)..." >>$LOGFILE +echo -e "\nStopping and starting $length_console console proxy vm(s)$inzone..." +echo -e "Stopping and starting $length_console console proxy vm(s)$inzone..." >>$LOGFILE for d in "${console[@]}"; do echo "INFO: Stopping console proxy with id $d" >>$LOGFILE @@ -124,17 +130,17 @@ done if [ "$length_console" == "0" ];then echo -e "No running console proxy vms found \n" else - echo "Done stopping and starting console proxy vm(s)." - echo "Done stopping and starting console proxy vm(s)." >>$LOGFILE + echo "Done stopping and starting console proxy vm(s) $inzone." + echo "Done stopping and starting console proxy vm(s) $inzone." >>$LOGFILE fi } stop_start_router() { - router=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"DomainRouter\""`) + router=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"DomainRouter\"$zone"`) length_router=(${#router[@]}) - echo -e "\nStopping and starting $length_router running routing vm(s)... " - echo -e "Stopping and starting $length_router running routing vm(s)... " >>$LOGFILE + echo -e "\nStopping and starting $length_router running routing vm(s)$inzone... " + echo -e "Stopping and starting $length_router running routing vm(s)$inzone... " >>$LOGFILE #Spawn reboot router in parallel - run commands in chunks - number of threads is configurable @@ -185,8 +191,8 @@ stop_start_router() { sleep 10 done - echo -e "Done restarting router(s). \n" - echo -e "Done restarting router(s). \n" >>$LOGFILE + echo -e "Done restarting router(s)$inzone. \n" + echo -e "Done restarting router(s)$inzone. \n" >>$LOGFILE fi } @@ -231,11 +237,11 @@ reboot_router(){ restart_networks(){ networks=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select n.id - from networks n, network_offerings no where n.network_offering_id = no.id and no.system_only = 0 and n.removed is null"`) + from networks n, network_offerings no where n.network_offering_id = no.id and no.system_only = 0 and n.removed is null$zone"`) length_networks=(${#networks[@]}) - echo -e "\nRestarting networks... " - echo -e "Restarting networks... " >>$LOGFILE + echo -e "\nRestarting $length_networks networks$inzone... " + echo -e "Restarting $length_networks networks$inzone... " >>$LOGFILE #Spawn restart network in parallel - run commands in chunks - number of threads is configurable @@ -287,8 +293,8 @@ restart_networks(){ sleep 10 done - echo -e "Done restarting networks. \n" - echo -e "Done restarting networks. \n" >>$LOGFILE + echo -e "Done restarting networks$inzone. \n" + echo -e "Done restarting networks$inzone. \n" >>$LOGFILE fi } From 3ed9e42dd2b000299aeab51681d96eb7c9d74249 Mon Sep 17 00:00:00 2001 From: Mice Xia Date: Thu, 16 May 2013 16:50:29 +0800 Subject: [PATCH 06/19] fix null pointer dereference in cloud-server --- server/src/com/cloud/network/StorageNetworkManagerImpl.java | 5 +++-- .../region/gslb/GlobalLoadBalancingRulesServiceImpl.java | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/server/src/com/cloud/network/StorageNetworkManagerImpl.java b/server/src/com/cloud/network/StorageNetworkManagerImpl.java index 9a173826576..901e2041490 100755 --- a/server/src/com/cloud/network/StorageNetworkManagerImpl.java +++ b/server/src/com/cloud/network/StorageNetworkManagerImpl.java @@ -315,9 +315,10 @@ public class StorageNetworkManagerImpl extends ManagerBase implements StorageNet List ranges = _sNwIpRangeDao.listByPodId(podId); for (StorageNetworkIpRangeVO r : ranges) { try { - r = _sNwIpRangeDao.acquireInLockTable(r.getId()); + Long rangeId = r.getId(); + r = _sNwIpRangeDao.acquireInLockTable(rangeId); if (r == null) { - String msg = "Unable to acquire lock on storage network ip range id=" + r.getId() + ", delete failed"; + String msg = "Unable to acquire lock on storage network ip range id=" + rangeId + ", delete failed"; s_logger.warn(msg); throw new CloudRuntimeException(msg); } diff --git a/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java b/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java index 0622f77f750..a1865c64af7 100644 --- a/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java +++ b/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java @@ -155,7 +155,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR long gslbRuleId = assignToGslbCmd.getGlobalLoadBalancerRuleId(); GlobalLoadBalancerRuleVO gslbRule = _gslbRuleDao.findById(gslbRuleId); if (gslbRule == null) { - throw new InvalidParameterValueException("Invalid global load balancer rule id: " + gslbRule.getUuid()); + throw new InvalidParameterValueException("Invalid global load balancer rule id: " + gslbRuleId); } _accountMgr.checkAccess(caller, SecurityChecker.AccessType.ModifyEntry, true, gslbRule); @@ -282,7 +282,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR long gslbRuleId = removeFromGslbCmd.getGlobalLoadBalancerRuleId(); GlobalLoadBalancerRuleVO gslbRule = _gslbRuleDao.findById(gslbRuleId); if (gslbRule == null) { - throw new InvalidParameterValueException("Invalid global load balancer rule id: " + gslbRule.getUuid()); + throw new InvalidParameterValueException("Invalid global load balancer rule id: " + gslbRuleId); } _accountMgr.checkAccess(caller, SecurityChecker.AccessType.ModifyEntry, true, gslbRule); From 973c43a152d39b85f53428b00f451fba9cb82003 Mon Sep 17 00:00:00 2001 From: Dave Brosius Date: Thu, 16 May 2013 07:16:12 -0500 Subject: [PATCH 07/19] CLOUDSTACK-2530: fix npe if no network isolation methods Signed-off-by: Mice Xia --- .../command/admin/network/ListNetworkIsolationMethodsCmd.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/src/org/apache/cloudstack/api/command/admin/network/ListNetworkIsolationMethodsCmd.java b/api/src/org/apache/cloudstack/api/command/admin/network/ListNetworkIsolationMethodsCmd.java index 7eef22a78b4..0d23fd67557 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/network/ListNetworkIsolationMethodsCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/network/ListNetworkIsolationMethodsCmd.java @@ -44,7 +44,7 @@ public class ListNetworkIsolationMethodsCmd extends BaseListCmd{ isolationResponses.add(isolationMethod); } } - response.setResponses(isolationResponses, methods.length); + response.setResponses(isolationResponses, isolationResponses.size()); response.setResponseName(getCommandName()); this.setResponseObject(response); From caf0dd22b7de584f87427d0f3039ce391f2d406b Mon Sep 17 00:00:00 2001 From: Likitha Shetty Date: Thu, 16 May 2013 14:04:20 +0530 Subject: [PATCH 08/19] Dedicate Public IP range - If every public ip range in the system is dedicated when an account with no dedicate ranges acquires a new public ip the request should fail --- .../com/cloud/network/NetworkManagerImpl.java | 114 +++++++++--------- 1 file changed, 58 insertions(+), 56 deletions(-) diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java index c58ef220ea1..40fc3d30154 100755 --- a/server/src/com/cloud/network/NetworkManagerImpl.java +++ b/server/src/com/cloud/network/NetworkManagerImpl.java @@ -284,6 +284,10 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L Long guestNetworkId, boolean sourceNat, boolean assign, String requestedIp, boolean isSystem, Long vpcId) throws InsufficientAddressCapacityException { StringBuilder errorMessage = new StringBuilder("Unable to get ip adress in "); + boolean fetchFromDedicatedRange = false; + List dedicatedVlanDbIds = new ArrayList(); + List nonDedicatedVlanDbIds = new ArrayList(); + Transaction txn = Transaction.currentTxn(); txn.start(); SearchCriteria sc = null; @@ -296,9 +300,37 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L errorMessage.append(" zone id=" + dcId); } - if ( vlanDbIds != null && !vlanDbIds.isEmpty() ) { - sc.setParameters("vlanId", vlanDbIds.toArray()); - errorMessage.append(", vlanId id=" + vlanDbIds.toArray()); + // If owner has dedicated Public IP ranges, fetch IP from the dedicated range + // Otherwise fetch IP from the system pool + List maps = _accountVlanMapDao.listAccountVlanMapsByAccount(owner.getId()); + for (AccountVlanMapVO map : maps) { + if (vlanDbIds == null || vlanDbIds.contains(map.getVlanDbId())) + dedicatedVlanDbIds.add(map.getVlanDbId()); + } + List nonDedicatedVlans = _vlanDao.listZoneWideNonDedicatedVlans(dcId); + for (VlanVO nonDedicatedVlan : nonDedicatedVlans) { + if (vlanDbIds == null || vlanDbIds.contains(nonDedicatedVlan.getId())) + nonDedicatedVlanDbIds.add(nonDedicatedVlan.getId()); + } + if (dedicatedVlanDbIds != null && !dedicatedVlanDbIds.isEmpty()) { + fetchFromDedicatedRange = true; + sc.setParameters("vlanId", dedicatedVlanDbIds.toArray()); + errorMessage.append(", vlanId id=" + dedicatedVlanDbIds.toArray()); + } else if (nonDedicatedVlanDbIds != null && !nonDedicatedVlanDbIds.isEmpty()) { + sc.setParameters("vlanId", nonDedicatedVlanDbIds.toArray()); + errorMessage.append(", vlanId id=" + nonDedicatedVlanDbIds.toArray()); + } else { + if (podId != null) { + InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException + ("Insufficient address capacity", Pod.class, podId); + ex.addProxyObject(ApiDBUtils.findPodById(podId).getUuid()); + throw ex; + } + s_logger.warn(errorMessage.toString()); + InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException + ("Insufficient address capacity", DataCenter.class, dcId); + ex.addProxyObject(ApiDBUtils.findZoneById(dcId).getUuid()); + throw ex; } sc.setParameters("dc", dcId); @@ -321,6 +353,16 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L List addrs = _ipAddressDao.lockRows(sc, filter, true); + // If all the dedicated IPs of the owner are in use fetch an IP from the system pool + if (addrs.size() == 0 && fetchFromDedicatedRange) { + if (nonDedicatedVlanDbIds != null && !nonDedicatedVlanDbIds.isEmpty()) { + fetchFromDedicatedRange = false; + sc.setParameters("vlanId", nonDedicatedVlanDbIds.toArray()); + errorMessage.append(", vlanId id=" + nonDedicatedVlanDbIds.toArray()); + addrs = _ipAddressDao.lockRows(sc, filter, true); + } + } + if (addrs.size() == 0) { if (podId != null) { InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException @@ -338,6 +380,16 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L assert (addrs.size() == 1) : "Return size is incorrect: " + addrs.size(); + if (!fetchFromDedicatedRange) { + // Check that the maximum number of public IPs for the given accountId will not be exceeded + try { + _resourceLimitMgr.checkResourceLimit(owner, ResourceType.public_ip); + } catch (ResourceAllocationException ex) { + s_logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner); + throw new AccountLimitException("Maximum number of public IP addresses for account: " + owner.getAccountName() + " has been exceeded."); + } + } + IPAddressVO addr = addrs.get(0); addr.setSourceNat(sourceNat); addr.setAllocatedTime(new Date()); @@ -442,14 +494,6 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L long ownerId = owner.getId(); - // Check that the maximum number of public IPs for the given accountId will not be exceeded - try { - _resourceLimitMgr.checkResourceLimit(owner, ResourceType.public_ip); - } catch (ResourceAllocationException ex) { - s_logger.warn("Failed to allocate resource of type " + ex.getResourceType() + " for account " + owner); - throw new AccountLimitException("Maximum number of public IP addresses for account: " + owner.getAccountName() + " has been exceeded."); - } - PublicIp ip = null; Transaction txn = Transaction.currentTxn(); try { @@ -466,15 +510,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L s_logger.debug("lock account " + ownerId + " is acquired"); } - // If account has Account specific ip ranges, try to allocate ip from there - List vlanIds = new ArrayList(); - List maps = _accountVlanMapDao.listAccountVlanMapsByAccount(ownerId); - if (maps != null && !maps.isEmpty()) { - vlanIds.add(maps.get(0).getVlanDbId()); - } - - - ip = fetchNewPublicIp(dcId, null, vlanIds, owner, VlanType.VirtualNetwork, guestNtwkId, + ip = fetchNewPublicIp(dcId, null, null, owner, VlanType.VirtualNetwork, guestNtwkId, isSourceNat, false, null, false, vpcId); IPAddressVO publicIp = ip.ip(); @@ -610,9 +646,6 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L VlanType vlanType = VlanType.VirtualNetwork; boolean assign = false; - boolean allocateFromDedicatedRange = false; - List dedicatedVlanDbIds = new ArrayList(); - List nonDedicatedVlanDbIds = new ArrayList(); if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getType())) { // zone is of type DataCenter. See DataCenterVO.java. @@ -642,39 +675,8 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L txn.start(); - // If account has dedicated Public IP ranges, allocate IP from the dedicated range - List maps = _accountVlanMapDao.listAccountVlanMapsByAccount(ipOwner.getId()); - for (AccountVlanMapVO map : maps) { - dedicatedVlanDbIds.add(map.getVlanDbId()); - } - if (dedicatedVlanDbIds != null && !dedicatedVlanDbIds.isEmpty()) { - allocateFromDedicatedRange = true; - } - - try { - if (allocateFromDedicatedRange) { - ip = fetchNewPublicIp(zone.getId(), null, dedicatedVlanDbIds, ipOwner, vlanType, null, - false, assign, null, isSystem, null); - } - } catch(InsufficientAddressCapacityException e) { - s_logger.warn("All IPs dedicated to account " + ipOwner.getId() + " has been acquired." + - " Now acquiring from the system pool"); - txn.close(); - allocateFromDedicatedRange = false; - } - - if (!allocateFromDedicatedRange) { - // Check that the maximum number of public IPs for the given - // accountId will not be exceeded - _resourceLimitMgr.checkResourceLimit(accountToLock, ResourceType.public_ip); - - List nonDedicatedVlans = _vlanDao.listZoneWideNonDedicatedVlans(zone.getId()); - for (VlanVO nonDedicatedVlan : nonDedicatedVlans) { - nonDedicatedVlanDbIds.add(nonDedicatedVlan.getId()); - } - ip = fetchNewPublicIp(zone.getId(), null, nonDedicatedVlanDbIds, ipOwner, vlanType, null, false, assign, null, - isSystem, null); - } + ip = fetchNewPublicIp(zone.getId(), null, null, ipOwner, vlanType, null, false, assign, null, + isSystem, null); if (ip == null) { InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException From adbebc1892cf88df8c801291a88bc836c99b8c14 Mon Sep 17 00:00:00 2001 From: Devdeep Singh Date: Fri, 17 May 2013 11:14:25 +0530 Subject: [PATCH 09/19] Changes for implicitly dedicating a resource. It includes a following: 1. A new implicit planner which extends the functionality provided by FirstFitPlanner. 2. Implicit planner can be used in either strict or preferred mode. In strict mode it tries to deploy a vm of a given account on a host on which vms of the account are already running. If no such host is found it'll search for an empty host to service the request. Otherwise the deploy vm request fails. 3. In preferred mode, if a host which is running vms of the account or an empty host isn't found, the planner then tries to deploy on any other host provided it isn't running implicitly dedicated strict vms of any other account. 4. Updated the createServiceOffering api to configure the details for the planner that the service offering is using. 5. Made db changes to store the service offering details for the planner. 6. Unit tests for testing the implicit planner functionality. 7. Marvin test for validating the functionality. --- .../com/cloud/deploy/DeploymentPlanner.java | 7 + .../apache/cloudstack/api/ApiConstants.java | 1 + .../offering/CreateServiceOfferingCmd.java | 16 + client/pom.xml | 5 + client/tomcatconf/applicationContext.xml.in | 6 +- client/tomcatconf/componentContext.xml.in | 1 + .../service/ServiceOfferingDetailsVO.java | 73 +++ .../com/cloud/service/ServiceOfferingVO.java | 27 + .../cloud/service/dao/ServiceOfferingDao.java | 2 + .../service/dao/ServiceOfferingDaoImpl.java | 19 +- .../dao/ServiceOfferingDetailsDao.java | 29 + .../dao/ServiceOfferingDetailsDaoImpl.java | 98 +++ .../implicit-dedication/pom.xml | 29 + .../deploy/ImplicitDedicationPlanner.java | 249 ++++++++ .../implicitplanner/ImplicitPlannerTest.java | 586 ++++++++++++++++++ plugins/pom.xml | 1 + .../configuration/ConfigurationManager.java | 3 +- .../ConfigurationManagerImpl.java | 20 +- .../vpc/MockConfigurationManagerImpl.java | 2 +- .../ChildTestConfiguration.java | 27 +- setup/db/db/schema-410to420.sql | 11 +- .../component/test_implicit_planner.py | 232 +++++++ tools/marvin/marvin/integration/lib/base.py | 4 + 23 files changed, 1430 insertions(+), 18 deletions(-) create mode 100644 engine/schema/src/com/cloud/service/ServiceOfferingDetailsVO.java create mode 100644 engine/schema/src/com/cloud/service/dao/ServiceOfferingDetailsDao.java create mode 100644 engine/schema/src/com/cloud/service/dao/ServiceOfferingDetailsDaoImpl.java create mode 100644 plugins/deployment-planners/implicit-dedication/pom.xml create mode 100644 plugins/deployment-planners/implicit-dedication/src/com/cloud/deploy/ImplicitDedicationPlanner.java create mode 100644 plugins/deployment-planners/implicit-dedication/test/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java create mode 100644 test/integration/component/test_implicit_planner.py diff --git a/api/src/com/cloud/deploy/DeploymentPlanner.java b/api/src/com/cloud/deploy/DeploymentPlanner.java index eb56a591f6b..769da39f3ff 100644 --- a/api/src/com/cloud/deploy/DeploymentPlanner.java +++ b/api/src/com/cloud/deploy/DeploymentPlanner.java @@ -213,6 +213,13 @@ public interface DeploymentPlanner extends Adapter { _hostIds.add(hostId); } + public void addHostList(Collection hostList) { + if (_hostIds == null) { + _hostIds = new HashSet(); + } + _hostIds.addAll(hostList); + } + public boolean shouldAvoid(Host host) { if (_dcIds != null && _dcIds.contains(host.getDataCenterId())) { return true; diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java index 8d7739c13e1..cf093bf4c7c 100755 --- a/api/src/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/org/apache/cloudstack/api/ApiConstants.java @@ -312,6 +312,7 @@ public class ApiConstants { public static final String ACCEPT = "accept"; public static final String SORT_KEY = "sortkey"; public static final String ACCOUNT_DETAILS = "accountdetails"; + public static final String SERVICE_OFFERING_DETAILS = "serviceofferingdetails"; public static final String SERVICE_PROVIDER_LIST = "serviceproviderlist"; public static final String SERVICE_CAPABILITY_LIST = "servicecapabilitylist"; public static final String CAN_CHOOSE_SERVICE_CAPABILITY = "canchooseservicecapability"; diff --git a/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java b/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java index c155b706fc0..4c54a4e5ec6 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java @@ -16,6 +16,9 @@ // under the License. package org.apache.cloudstack.api.command.admin.offering; +import java.util.Collection; +import java.util.Map; + import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; @@ -87,6 +90,9 @@ public class CreateServiceOfferingCmd extends BaseCmd { @Parameter(name = ApiConstants.DEPLOYMENT_PLANNER, type = CommandType.STRING, description = "The deployment planner heuristics used to deploy a VM of this offering. If null, value of global config vm.deployment.planner is used") private String deploymentPlanner; + @Parameter(name = ApiConstants.SERVICE_OFFERING_DETAILS, type = CommandType.MAP, description = "details for planner, used to store specific parameters") + private Map details; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -155,6 +161,16 @@ public class CreateServiceOfferingCmd extends BaseCmd { return deploymentPlanner; } + public Map getDetails() { + if (details == null || details.isEmpty()) { + return null; + } + + Collection paramsCollection = details.values(); + Map params = (Map)(paramsCollection.toArray())[0]; + return params; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/client/pom.xml b/client/pom.xml index 197ba27975c..0c38ecb65d2 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -131,6 +131,11 @@ cloud-plugin-planner-user-concentrated-pod ${project.version} + + org.apache.cloudstack + cloud-plugin-planner-implicit-dedication + ${project.version} + org.apache.cloudstack cloud-plugin-host-allocator-random diff --git a/client/tomcatconf/applicationContext.xml.in b/client/tomcatconf/applicationContext.xml.in index 1d1eca4c191..b500fde8549 100644 --- a/client/tomcatconf/applicationContext.xml.in +++ b/client/tomcatconf/applicationContext.xml.in @@ -370,7 +370,7 @@ - + diff --git a/engine/schema/src/com/cloud/service/ServiceOfferingDetailsVO.java b/engine/schema/src/com/cloud/service/ServiceOfferingDetailsVO.java new file mode 100644 index 00000000000..b005c738e82 --- /dev/null +++ b/engine/schema/src/com/cloud/service/ServiceOfferingDetailsVO.java @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.service; + +import org.apache.cloudstack.api.InternalIdentity; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +@Entity +@Table(name="service_offering_details") +public class ServiceOfferingDetailsVO implements InternalIdentity { + @Id + @GeneratedValue(strategy=GenerationType.IDENTITY) + @Column(name="id") + private long id; + + @Column(name="service_offering_id") + private long serviceOfferingId; + + @Column(name="name") + private String name; + + @Column(name="value") + private String value; + + protected ServiceOfferingDetailsVO() { + } + + public ServiceOfferingDetailsVO(long serviceOfferingId, String name, String value) { + this.serviceOfferingId = serviceOfferingId; + this.name = name; + this.value = value; + } + + public long getServiceOfferingId() { + return serviceOfferingId; + } + + public String getName() { + return name; + } + + public String getValue() { + return value; + } + + public void setValue(String value) { + this.value = value; + } + + public long getId() { + return id; + } +} \ No newline at end of file diff --git a/engine/schema/src/com/cloud/service/ServiceOfferingVO.java b/engine/schema/src/com/cloud/service/ServiceOfferingVO.java index fd31d301bc3..9a262c540b7 100755 --- a/engine/schema/src/com/cloud/service/ServiceOfferingVO.java +++ b/engine/schema/src/com/cloud/service/ServiceOfferingVO.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.service; +import java.util.Map; + import javax.persistence.Column; import javax.persistence.DiscriminatorValue; import javax.persistence.Entity; @@ -71,6 +73,12 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering @Column(name = "deployment_planner") private String deploymentPlanner = null; + // This is a delayed load value. If the value is null, + // then this field has not been loaded yet. + // Call service offering dao to load it. + @Transient + Map details; + protected ServiceOfferingVO() { super(); } @@ -225,4 +233,23 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering return deploymentPlanner; } + public Map getDetails() { + return details; + } + + public String getDetail(String name) { + assert (details != null) : "Did you forget to load the details?"; + + return details != null ? details.get(name) : null; + } + + public void setDetail(String name, String value) { + assert (details != null) : "Did you forget to load the details?"; + + details.put(name, value); + } + + public void setDetails(Map details) { + this.details = details; + } } diff --git a/engine/schema/src/com/cloud/service/dao/ServiceOfferingDao.java b/engine/schema/src/com/cloud/service/dao/ServiceOfferingDao.java index 589de7cc055..7da72088431 100644 --- a/engine/schema/src/com/cloud/service/dao/ServiceOfferingDao.java +++ b/engine/schema/src/com/cloud/service/dao/ServiceOfferingDao.java @@ -31,4 +31,6 @@ public interface ServiceOfferingDao extends GenericDao List findServiceOfferingByDomainId(Long domainId); List findSystemOffering(Long domainId, Boolean isSystem, String vm_type); ServiceOfferingVO persistDeafultServiceOffering(ServiceOfferingVO offering); + void loadDetails(ServiceOfferingVO serviceOffering); + void saveDetails(ServiceOfferingVO serviceOffering); } diff --git a/engine/schema/src/com/cloud/service/dao/ServiceOfferingDaoImpl.java b/engine/schema/src/com/cloud/service/dao/ServiceOfferingDaoImpl.java index 062103e3198..14b2abf8fc4 100644 --- a/engine/schema/src/com/cloud/service/dao/ServiceOfferingDaoImpl.java +++ b/engine/schema/src/com/cloud/service/dao/ServiceOfferingDaoImpl.java @@ -18,15 +18,16 @@ package com.cloud.service.dao; import java.util.Date; import java.util.List; +import java.util.Map; import javax.ejb.Local; +import javax.inject.Inject; import javax.persistence.EntityExistsException; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.service.ServiceOfferingVO; -import com.cloud.storage.DiskOfferingVO; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; @@ -37,6 +38,8 @@ import com.cloud.utils.db.SearchCriteria; public class ServiceOfferingDaoImpl extends GenericDaoBase implements ServiceOfferingDao { protected static final Logger s_logger = Logger.getLogger(ServiceOfferingDaoImpl.class); + @Inject protected ServiceOfferingDetailsDao detailsDao; + protected final SearchBuilder UniqueNameSearch; protected final SearchBuilder ServiceOfferingsByDomainIdSearch; protected final SearchBuilder SystemServiceOffering; @@ -154,4 +157,18 @@ public class ServiceOfferingDaoImpl extends GenericDaoBase details = detailsDao.findDetails(serviceOffering.getId()); + serviceOffering.setDetails(details); + } + + @Override + public void saveDetails(ServiceOfferingVO serviceOffering) { + Map details = serviceOffering.getDetails(); + if (details != null) { + detailsDao.persist(serviceOffering.getId(), details); + } + } } diff --git a/engine/schema/src/com/cloud/service/dao/ServiceOfferingDetailsDao.java b/engine/schema/src/com/cloud/service/dao/ServiceOfferingDetailsDao.java new file mode 100644 index 00000000000..38169105819 --- /dev/null +++ b/engine/schema/src/com/cloud/service/dao/ServiceOfferingDetailsDao.java @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.service.dao; + +import java.util.Map; + +import com.cloud.service.ServiceOfferingDetailsVO; +import com.cloud.utils.db.GenericDao; + +public interface ServiceOfferingDetailsDao extends GenericDao { + Map findDetails(long serviceOfferingId); + void persist(long serviceOfferingId, Map details); + ServiceOfferingDetailsVO findDetail(long serviceOfferingId, String name); + void deleteDetails(long serviceOfferingId); +} \ No newline at end of file diff --git a/engine/schema/src/com/cloud/service/dao/ServiceOfferingDetailsDaoImpl.java b/engine/schema/src/com/cloud/service/dao/ServiceOfferingDetailsDaoImpl.java new file mode 100644 index 00000000000..91d736a38c4 --- /dev/null +++ b/engine/schema/src/com/cloud/service/dao/ServiceOfferingDetailsDaoImpl.java @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.service.dao; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.ejb.Local; + +import org.springframework.stereotype.Component; + +import com.cloud.service.ServiceOfferingDetailsVO; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.Transaction; + +@Component +@Local(value=ServiceOfferingDetailsDao.class) +public class ServiceOfferingDetailsDaoImpl extends GenericDaoBase + implements ServiceOfferingDetailsDao { + protected final SearchBuilder ServiceOfferingSearch; + protected final SearchBuilder DetailSearch; + + public ServiceOfferingDetailsDaoImpl() { + ServiceOfferingSearch = createSearchBuilder(); + ServiceOfferingSearch.and("serviceOfferingId", ServiceOfferingSearch.entity().getServiceOfferingId(), SearchCriteria.Op.EQ); + ServiceOfferingSearch.done(); + + DetailSearch = createSearchBuilder(); + DetailSearch.and("serviceOfferingId", DetailSearch.entity().getServiceOfferingId(), SearchCriteria.Op.EQ); + DetailSearch.and("name", DetailSearch.entity().getName(), SearchCriteria.Op.EQ); + DetailSearch.done(); + } + + @Override + public ServiceOfferingDetailsVO findDetail(long serviceOfferingId, String name) { + SearchCriteria sc = DetailSearch.create(); + sc.setParameters("serviceOfferingId", serviceOfferingId); + sc.setParameters("name", name); + ServiceOfferingDetailsVO detail = findOneIncludingRemovedBy(sc); + return detail; + } + + @Override + public Map findDetails(long serviceOfferingId) { + SearchCriteria sc = ServiceOfferingSearch.create(); + sc.setParameters("serviceOfferingId", serviceOfferingId); + List results = search(sc, null); + Map details = new HashMap(results.size()); + for (ServiceOfferingDetailsVO result : results) { + details.put(result.getName(), result.getValue()); + } + + return details; + } + + @Override + public void deleteDetails(long serviceOfferingId) { + SearchCriteria sc = ServiceOfferingSearch.create(); + sc.setParameters("serviceOfferingId", serviceOfferingId); + List results = search(sc, null); + for (ServiceOfferingDetailsVO result : results) { + remove(result.getId()); + } + } + + @Override + public void persist(long serviceOfferingId, Map details) { + Transaction txn = Transaction.currentTxn(); + txn.start(); + SearchCriteria sc = ServiceOfferingSearch.create(); + sc.setParameters("serviceOfferingId", serviceOfferingId); + expunge(sc); + + for (Map.Entry detail : details.entrySet()) { + String value = detail.getValue(); + ServiceOfferingDetailsVO vo = new ServiceOfferingDetailsVO(serviceOfferingId, detail.getKey(), value); + persist(vo); + } + txn.commit(); + } +} diff --git a/plugins/deployment-planners/implicit-dedication/pom.xml b/plugins/deployment-planners/implicit-dedication/pom.xml new file mode 100644 index 00000000000..18555923668 --- /dev/null +++ b/plugins/deployment-planners/implicit-dedication/pom.xml @@ -0,0 +1,29 @@ + + + 4.0.0 + cloud-plugin-planner-implicit-dedication + Apache CloudStack Plugin - Implicit Dedication Planner + + org.apache.cloudstack + cloudstack-plugins + 4.2.0-SNAPSHOT + ../../pom.xml + + diff --git a/plugins/deployment-planners/implicit-dedication/src/com/cloud/deploy/ImplicitDedicationPlanner.java b/plugins/deployment-planners/implicit-dedication/src/com/cloud/deploy/ImplicitDedicationPlanner.java new file mode 100644 index 00000000000..d47d8f52c46 --- /dev/null +++ b/plugins/deployment-planners/implicit-dedication/src/com/cloud/deploy/ImplicitDedicationPlanner.java @@ -0,0 +1,249 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.deploy; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.log4j.Logger; + +import com.cloud.configuration.Config; +import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.host.HostVO; +import com.cloud.resource.ResourceManager; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.service.dao.ServiceOfferingDetailsDao; +import com.cloud.user.Account; +import com.cloud.utils.DateUtil; +import com.cloud.utils.NumbersUtil; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; + +@Local(value=DeploymentPlanner.class) +public class ImplicitDedicationPlanner extends FirstFitPlanner implements DeploymentClusterPlanner { + + private static final Logger s_logger = Logger.getLogger(ImplicitDedicationPlanner.class); + + @Inject + private ServiceOfferingDao serviceOfferingDao; + @Inject + private ServiceOfferingDetailsDao serviceOfferingDetailsDao; + @Inject + private ResourceManager resourceMgr; + + private int capacityReleaseInterval; + + @Override + public boolean configure(final String name, final Map params) throws ConfigurationException { + super.configure(name, params); + capacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600); + return true; + } + + @Override + public List orderClusters(VirtualMachineProfile vmProfile, + DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException { + List clusterList = super.orderClusters(vmProfile, plan, avoid); + Set hostsToAvoid = avoid.getHostsToAvoid(); + Account account = vmProfile.getOwner(); + + if (clusterList == null || clusterList.isEmpty()) { + return clusterList; + } + + // Check if strict or preferred mode should be used. + boolean preferred = isServiceOfferingUsingPlannerInPreferredMode(vmProfile.getServiceOfferingId()); + + // Get the list of all the hosts in the given clusters + List allHosts = new ArrayList(); + for (Long cluster : clusterList) { + List hostsInCluster = resourceMgr.listAllHostsInCluster(cluster); + for (HostVO hostVO : hostsInCluster) { + allHosts.add(hostVO.getId()); + } + } + + // Go over all the hosts in the cluster and get a list of + // 1. All empty hosts, not running any vms. + // 2. Hosts running vms for this account and created by a service offering which uses an + // implicit dedication planner. + // 3. Hosts running vms created by implicit planner and in strict mode of other accounts. + // 4. Hosts running vms from other account or from this account but created by a service offering which uses + // any planner besides implicit. + Set emptyHosts = new HashSet(); + Set hostRunningVmsOfAccount = new HashSet(); + Set hostRunningStrictImplicitVmsOfOtherAccounts = new HashSet(); + Set allOtherHosts = new HashSet(); + for (Long host : allHosts) { + List userVms = getVmsOnHost(host); + if (userVms == null || userVms.isEmpty()) { + emptyHosts.add(host); + } else if (checkHostSuitabilityForImplicitDedication(account.getAccountId(), userVms)) { + hostRunningVmsOfAccount.add(host); + } else if (checkIfAllVmsCreatedInStrictMode(account.getAccountId(), userVms)) { + hostRunningStrictImplicitVmsOfOtherAccounts.add(host); + } else { + allOtherHosts.add(host); + } + } + + // Hosts running vms of other accounts created by ab implicit planner in strict mode should always be avoided. + avoid.addHostList(hostRunningStrictImplicitVmsOfOtherAccounts); + + if (!hostRunningVmsOfAccount.isEmpty() && (hostsToAvoid == null || + !hostsToAvoid.containsAll(hostRunningVmsOfAccount))) { + // Check if any of hosts that are running implicit dedicated vms are available (not in avoid list). + // If so, we'll try and use these hosts. + avoid.addHostList(emptyHosts); + avoid.addHostList(allOtherHosts); + clusterList = getUpdatedClusterList(clusterList, avoid.getHostsToAvoid()); + } else if (!emptyHosts.isEmpty() && (hostsToAvoid == null || !hostsToAvoid.containsAll(emptyHosts))) { + // If there aren't implicit resources try on empty hosts + avoid.addHostList(allOtherHosts); + clusterList = getUpdatedClusterList(clusterList, avoid.getHostsToAvoid()); + } else if (!preferred) { + // If in strict mode, there is nothing else to try. + clusterList = null; + } else { + // If in preferred mode, check if hosts are available to try, otherwise return an empty cluster list. + if (!allOtherHosts.isEmpty() && (hostsToAvoid == null || !hostsToAvoid.containsAll(allOtherHosts))) { + clusterList = getUpdatedClusterList(clusterList, avoid.getHostsToAvoid()); + } else { + clusterList = null; + } + } + + return clusterList; + } + + private List getVmsOnHost(long hostId) { + List vms = _vmDao.listUpByHostId(hostId); + List vmsByLastHostId = _vmDao.listByLastHostId(hostId); + if (vmsByLastHostId.size() > 0) { + // check if any VMs are within skip.counting.hours, if yes we have to consider the host. + for (UserVmVO stoppedVM : vmsByLastHostId) { + long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - stoppedVM.getUpdateTime() + .getTime()) / 1000; + if (secondsSinceLastUpdate < capacityReleaseInterval) { + vms.add(stoppedVM); + } + } + } + + return vms; + } + + private boolean checkHostSuitabilityForImplicitDedication(Long accountId, List allVmsOnHost) { + boolean suitable = true; + for (UserVmVO vm : allVmsOnHost) { + if (vm.getAccountId() != accountId) { + s_logger.info("Host " + vm.getHostId() + " found to be unsuitable for implicit dedication as it is " + + "running instances of another account"); + suitable = false; + break; + } else { + if (!isImplicitPlannerUsedByOffering(vm.getServiceOfferingId())) { + s_logger.info("Host " + vm.getHostId() + " found to be unsuitable for implicit dedication as it " + + "is running instances of this account which haven't been created using implicit dedication."); + suitable = false; + break; + } + } + } + return suitable; + } + + private boolean checkIfAllVmsCreatedInStrictMode(Long accountId, List allVmsOnHost) { + boolean createdByImplicitStrict = true; + for (UserVmVO vm : allVmsOnHost) { + if (!isImplicitPlannerUsedByOffering(vm.getServiceOfferingId())) { + s_logger.info("Host " + vm.getHostId() + " found to be running a vm created by a planner other" + + " than implicit."); + createdByImplicitStrict = false; + break; + } else if (isServiceOfferingUsingPlannerInPreferredMode(vm.getServiceOfferingId())) { + s_logger.info("Host " + vm.getHostId() + " found to be running a vm created by an implicit planner" + + " in preferred mode."); + createdByImplicitStrict = false; + break; + } + } + return createdByImplicitStrict; + } + + private boolean isImplicitPlannerUsedByOffering(long offeringId) { + boolean implicitPlannerUsed = false; + ServiceOfferingVO offering = serviceOfferingDao.findByIdIncludingRemoved(offeringId); + if (offering == null) { + s_logger.error("Couldn't retrieve the offering by the given id : " + offeringId); + } else { + String plannerName = offering.getDeploymentPlanner(); + if (plannerName == null) { + plannerName = _globalDeploymentPlanner; + } + + if (plannerName != null && this.getName().equals(plannerName)) { + implicitPlannerUsed = true; + } + } + + return implicitPlannerUsed; + } + + private boolean isServiceOfferingUsingPlannerInPreferredMode(long serviceOfferingId) { + boolean preferred = false; + Map details = serviceOfferingDetailsDao.findDetails(serviceOfferingId); + if (details != null && !details.isEmpty()) { + String preferredAttribute = details.get("ImplicitDedicationMode"); + if (preferredAttribute != null && preferredAttribute.equals("Preferred")) { + preferred = true; + } + } + return preferred; + } + + private List getUpdatedClusterList(List clusterList, Set hostsSet) { + List updatedClusterList = new ArrayList(); + for (Long cluster : clusterList) { + List hosts = resourceMgr.listAllHostsInCluster(cluster); + Set hostsInClusterSet = new HashSet(); + for (HostVO host : hosts) { + hostsInClusterSet.add(host.getId()); + } + + if (!hostsSet.containsAll(hostsInClusterSet)) { + updatedClusterList.add(cluster); + } + } + + return updatedClusterList; + } + + @Override + public PlannerResourceUsage getResourceUsage() { + return PlannerResourceUsage.Dedicated; + } +} \ No newline at end of file diff --git a/plugins/deployment-planners/implicit-dedication/test/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java b/plugins/deployment-planners/implicit-dedication/test/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java new file mode 100644 index 00000000000..44507600db9 --- /dev/null +++ b/plugins/deployment-planners/implicit-dedication/test/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java @@ -0,0 +1,586 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.implicitplanner; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.test.utils.SpringUtils; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.ComponentScan.Filter; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.FilterType; +import org.springframework.core.type.classreading.MetadataReader; +import org.springframework.core.type.classreading.MetadataReaderFactory; +import org.springframework.core.type.filter.TypeFilter; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.springframework.test.context.support.AnnotationConfigContextLoader; + +import com.cloud.capacity.CapacityManager; +import com.cloud.capacity.CapacityVO; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.deploy.DataCenterDeployment; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; +import com.cloud.deploy.ImplicitDedicationPlanner; +import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.resource.ResourceManager; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.service.dao.ServiceOfferingDetailsDao; +import com.cloud.storage.StorageManager; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.GuestOSCategoryDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountVO; +import com.cloud.user.UserContext; +import com.cloud.utils.Pair; +import com.cloud.utils.component.ComponentContext; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachineProfileImpl; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(loader = AnnotationConfigContextLoader.class) +public class ImplicitPlannerTest { + + @Inject + ImplicitDedicationPlanner planner = new ImplicitDedicationPlanner(); + @Inject + HostDao hostDao; + @Inject + DataCenterDao dcDao; + @Inject + HostPodDao podDao; + @Inject + ClusterDao clusterDao; + @Inject + GuestOSDao guestOSDao; + @Inject + GuestOSCategoryDao guestOSCategoryDao; + @Inject + DiskOfferingDao diskOfferingDao; + @Inject + StoragePoolHostDao poolHostDao; + @Inject + UserVmDao vmDao; + @Inject + VMInstanceDao vmInstanceDao; + @Inject + VolumeDao volsDao; + @Inject + CapacityManager capacityMgr; + @Inject + ConfigurationDao configDao; + @Inject + PrimaryDataStoreDao storagePoolDao; + @Inject + CapacityDao capacityDao; + @Inject + AccountManager accountMgr; + @Inject + StorageManager storageMgr; + @Inject + DataStoreManager dataStoreMgr; + @Inject + ClusterDetailsDao clusterDetailsDao; + @Inject + ServiceOfferingDao serviceOfferingDao; + @Inject + ServiceOfferingDetailsDao serviceOfferingDetailsDao; + @Inject + ResourceManager resourceMgr; + + private static long domainId = 5L; + long dataCenterId = 1L; + long accountId = 200L; + long offeringId = 12L; + int noOfCpusInOffering = 1; + int cpuSpeedInOffering = 500; + int ramInOffering = 512; + AccountVO acct = new AccountVO(accountId); + + @BeforeClass + public static void setUp() throws ConfigurationException { + } + + @Before + public void testSetUp() { + ComponentContext.initComponentsLifeCycle(); + + acct.setType(Account.ACCOUNT_TYPE_NORMAL); + acct.setAccountName("user1"); + acct.setDomainId(domainId); + acct.setId(accountId); + + UserContext.registerContext(1, acct, null, true); + } + + @Test + public void checkWhenDcInAvoidList() throws InsufficientServerCapacityException { + DataCenterVO mockDc = mock(DataCenterVO.class); + ExcludeList avoids = mock(ExcludeList.class); + @SuppressWarnings("unchecked") + VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class); + VMInstanceVO vm = mock(VMInstanceVO.class); + DataCenterDeployment plan = mock(DataCenterDeployment.class); + + when(avoids.shouldAvoid(mockDc)).thenReturn(true); + when(vmProfile.getVirtualMachine()).thenReturn(vm); + when(vm.getDataCenterId()).thenReturn(1L); + when(dcDao.findById(1L)).thenReturn(mockDc); + + List clusterList = planner.orderClusters(vmProfile, plan, avoids); + assertTrue("Cluster list should be null/empty if the dc is in avoid list", + (clusterList == null || clusterList.isEmpty())); + } + + @Test + public void checkStrictModeWithCurrentAccountVmsPresent() throws InsufficientServerCapacityException { + @SuppressWarnings("unchecked") + VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class); + DataCenterDeployment plan = mock(DataCenterDeployment.class); + ExcludeList avoids = new ExcludeList(); + + initializeForTest(vmProfile, plan); + + initializeForImplicitPlannerTest(false); + + List clusterList = planner.orderClusters(vmProfile, plan, avoids); + + // Validations. + // Check cluster 2 and 3 are not in the cluster list. + // Host 6 and 7 should also be in avoid list. + assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty())); + boolean foundNeededCluster = false; + for (Long cluster : clusterList) { + if (cluster != 1) { + fail("Found a cluster that shouldn't have been present, cluster id : " + cluster); + }else { + foundNeededCluster = true; + } + } + assertTrue("Didn't find cluster 1 in the list. It should have been present", foundNeededCluster); + + Set hostsInAvoidList = avoids.getHostsToAvoid(); + assertFalse("Host 5 shouldn't have be in the avoid list, but it is present", hostsInAvoidList.contains(5L)); + Set hostsThatShouldBeInAvoidList = new HashSet(); + hostsThatShouldBeInAvoidList.add(6L); + hostsThatShouldBeInAvoidList.add(7L); + assertTrue("Hosts 6 and 7 that should have been present were not found in avoid list" , + hostsInAvoidList.containsAll(hostsThatShouldBeInAvoidList)); + } + + @Test + public void checkStrictModeHostWithCurrentAccountVmsFull() throws InsufficientServerCapacityException { + @SuppressWarnings("unchecked") + VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class); + DataCenterDeployment plan = mock(DataCenterDeployment.class); + ExcludeList avoids = new ExcludeList(); + + initializeForTest(vmProfile, plan); + + initializeForImplicitPlannerTest(false); + + // Mark the host 5 with current account vms to be in avoid list. + avoids.addHost(5L); + List clusterList = planner.orderClusters(vmProfile, plan, avoids); + + // Validations. + // Check cluster 1 and 3 are not in the cluster list. + // Host 5 and 7 should also be in avoid list. + assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty())); + boolean foundNeededCluster = false; + for (Long cluster : clusterList) { + if (cluster != 2) { + fail("Found a cluster that shouldn't have been present, cluster id : " + cluster); + }else { + foundNeededCluster = true; + } + } + assertTrue("Didn't find cluster 2 in the list. It should have been present", foundNeededCluster); + + Set hostsInAvoidList = avoids.getHostsToAvoid(); + assertFalse("Host 6 shouldn't have be in the avoid list, but it is present", hostsInAvoidList.contains(6L)); + Set hostsThatShouldBeInAvoidList = new HashSet(); + hostsThatShouldBeInAvoidList.add(5L); + hostsThatShouldBeInAvoidList.add(7L); + assertTrue("Hosts 5 and 7 that should have been present were not found in avoid list" , + hostsInAvoidList.containsAll(hostsThatShouldBeInAvoidList)); + } + + @Test + public void checkStrictModeNoHostsAvailable() throws InsufficientServerCapacityException { + @SuppressWarnings("unchecked") + VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class); + DataCenterDeployment plan = mock(DataCenterDeployment.class); + ExcludeList avoids = new ExcludeList(); + + initializeForTest(vmProfile, plan); + + initializeForImplicitPlannerTest(false); + + // Mark the host 5 and 6 to be in avoid list. + avoids.addHost(5L); + avoids.addHost(6L); + List clusterList = planner.orderClusters(vmProfile, plan, avoids); + + // Validations. + // Check cluster list is empty. + assertTrue("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty())); + } + + @Test + public void checkPreferredModePreferredHostAvailable() throws InsufficientServerCapacityException { + @SuppressWarnings("unchecked") + VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class); + DataCenterDeployment plan = mock(DataCenterDeployment.class); + ExcludeList avoids = new ExcludeList(); + + initializeForTest(vmProfile, plan); + + initializeForImplicitPlannerTest(true); + + // Mark the host 5 and 6 to be in avoid list. + avoids.addHost(5L); + avoids.addHost(6L); + List clusterList = planner.orderClusters(vmProfile, plan, avoids); + + // Validations. + // Check cluster 1 and 2 are not in the cluster list. + // Host 5 and 6 should also be in avoid list. + assertFalse("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty())); + boolean foundNeededCluster = false; + for (Long cluster : clusterList) { + if (cluster != 3) { + fail("Found a cluster that shouldn't have been present, cluster id : " + cluster); + } else { + foundNeededCluster = true; + } + } + assertTrue("Didn't find cluster 3 in the list. It should have been present", foundNeededCluster); + + Set hostsInAvoidList = avoids.getHostsToAvoid(); + assertFalse("Host 7 shouldn't have be in the avoid list, but it is present", hostsInAvoidList.contains(7L)); + Set hostsThatShouldBeInAvoidList = new HashSet(); + hostsThatShouldBeInAvoidList.add(5L); + hostsThatShouldBeInAvoidList.add(6L); + assertTrue("Hosts 5 and 6 that should have been present were not found in avoid list" , + hostsInAvoidList.containsAll(hostsThatShouldBeInAvoidList)); + } + + @Test + public void checkPreferredModeNoHostsAvailable() throws InsufficientServerCapacityException { + @SuppressWarnings("unchecked") + VirtualMachineProfileImpl vmProfile = mock(VirtualMachineProfileImpl.class); + DataCenterDeployment plan = mock(DataCenterDeployment.class); + ExcludeList avoids = new ExcludeList(); + + initializeForTest(vmProfile, plan); + + initializeForImplicitPlannerTest(false); + + // Mark the host 5, 6 and 7 to be in avoid list. + avoids.addHost(5L); + avoids.addHost(6L); + avoids.addHost(7L); + List clusterList = planner.orderClusters(vmProfile, plan, avoids); + + // Validations. + // Check cluster list is empty. + assertTrue("Cluster list should not be null/empty", (clusterList == null || clusterList.isEmpty())); + } + + private void initializeForTest(VirtualMachineProfileImpl vmProfile, DataCenterDeployment plan) { + DataCenterVO mockDc = mock(DataCenterVO.class); + VMInstanceVO vm = mock(VMInstanceVO.class); + UserVmVO userVm = mock(UserVmVO.class); + ServiceOfferingVO offering = mock(ServiceOfferingVO.class); + + AccountVO account = mock(AccountVO.class); + when(account.getId()).thenReturn(accountId); + when(account.getAccountId()).thenReturn(accountId); + when(vmProfile.getOwner()).thenReturn(account); + when(vmProfile.getVirtualMachine()).thenReturn(vm); + when(vmProfile.getId()).thenReturn(12L); + when(vmDao.findById(12L)).thenReturn(userVm); + when(userVm.getAccountId()).thenReturn(accountId); + + when(vm.getDataCenterId()).thenReturn(dataCenterId); + when(dcDao.findById(1L)).thenReturn(mockDc); + when(plan.getDataCenterId()).thenReturn(dataCenterId); + when(plan.getClusterId()).thenReturn(null); + when(plan.getPodId()).thenReturn(null); + when(configDao.getValue(anyString())).thenReturn("false").thenReturn("CPU"); + + // Mock offering details. + when(vmProfile.getServiceOffering()).thenReturn(offering); + when(offering.getId()).thenReturn(offeringId); + when(vmProfile.getServiceOfferingId()).thenReturn(offeringId); + when(offering.getCpu()).thenReturn(noOfCpusInOffering); + when(offering.getSpeed()).thenReturn(cpuSpeedInOffering); + when(offering.getRamSize()).thenReturn(ramInOffering); + + List clustersWithEnoughCapacity = new ArrayList(); + clustersWithEnoughCapacity.add(1L); + clustersWithEnoughCapacity.add(2L); + clustersWithEnoughCapacity.add(3L); + when(capacityDao.listClustersInZoneOrPodByHostCapacities(dataCenterId, noOfCpusInOffering * cpuSpeedInOffering, + ramInOffering * 1024L * 1024L, CapacityVO.CAPACITY_TYPE_CPU, true)).thenReturn(clustersWithEnoughCapacity); + + Map clusterCapacityMap = new HashMap(); + clusterCapacityMap.put(1L, 2048D); + clusterCapacityMap.put(2L, 2048D); + clusterCapacityMap.put(3L, 2048D); + Pair, Map> clustersOrderedByCapacity = + new Pair, Map>(clustersWithEnoughCapacity, clusterCapacityMap); + when(capacityDao.orderClustersByAggregateCapacity(dataCenterId, CapacityVO.CAPACITY_TYPE_CPU, + true)).thenReturn(clustersOrderedByCapacity); + + List disabledClusters = new ArrayList(); + List clustersWithDisabledPods = new ArrayList(); + when(clusterDao.listDisabledClusters(dataCenterId, null)).thenReturn(disabledClusters); + when(clusterDao.listClustersWithDisabledPods(dataCenterId)).thenReturn(clustersWithDisabledPods); + } + + private void initializeForImplicitPlannerTest(boolean preferred) { + String plannerMode = new String("Strict"); + if (preferred) { + plannerMode = new String("Preferred"); + } + + Map details = new HashMap(); + details.put("ImplicitDedicationMode", plannerMode); + when(serviceOfferingDetailsDao.findDetails(offeringId)).thenReturn(details); + + // Initialize hosts in clusters + HostVO host1 = mock(HostVO.class); + when(host1.getId()).thenReturn(5L); + HostVO host2 = mock(HostVO.class); + when(host2.getId()).thenReturn(6L); + HostVO host3 = mock(HostVO.class); + when(host3.getId()).thenReturn(7L); + List hostsInCluster1 = new ArrayList(); + List hostsInCluster2 = new ArrayList(); + List hostsInCluster3 = new ArrayList(); + hostsInCluster1.add(host1); + hostsInCluster2.add(host2); + hostsInCluster3.add(host3); + when(resourceMgr.listAllHostsInCluster(1)).thenReturn(hostsInCluster1); + when(resourceMgr.listAllHostsInCluster(2)).thenReturn(hostsInCluster2); + when(resourceMgr.listAllHostsInCluster(3)).thenReturn(hostsInCluster3); + + // Mock vms on each host. + long offeringIdForVmsOfThisAccount = 15L; + long offeringIdForVmsOfOtherAccount = 16L; + UserVmVO vm1 = mock(UserVmVO.class); + when(vm1.getAccountId()).thenReturn(accountId); + when(vm1.getServiceOfferingId()).thenReturn(offeringIdForVmsOfThisAccount); + UserVmVO vm2 = mock(UserVmVO.class); + when(vm2.getAccountId()).thenReturn(accountId); + when(vm2.getServiceOfferingId()).thenReturn(offeringIdForVmsOfThisAccount); + // Vm from different account + UserVmVO vm3 = mock(UserVmVO.class); + when(vm3.getAccountId()).thenReturn(201L); + when(vm3.getServiceOfferingId()).thenReturn(offeringIdForVmsOfOtherAccount); + List userVmsForHost1 = new ArrayList(); + List userVmsForHost2 = new ArrayList(); + List userVmsForHost3 = new ArrayList(); + List stoppedVmsForHost = new ArrayList(); + // Host 2 is empty. + userVmsForHost1.add(vm1); + userVmsForHost1.add(vm2); + userVmsForHost3.add(vm3); + when(vmDao.listUpByHostId(5L)).thenReturn(userVmsForHost1); + when(vmDao.listUpByHostId(6L)).thenReturn(userVmsForHost2); + when(vmDao.listUpByHostId(7L)).thenReturn(userVmsForHost3); + when(vmDao.listByLastHostId(5L)).thenReturn(stoppedVmsForHost); + when(vmDao.listByLastHostId(6L)).thenReturn(stoppedVmsForHost); + when(vmDao.listByLastHostId(7L)).thenReturn(stoppedVmsForHost); + + // Mock the offering with which the vm was created. + ServiceOfferingVO offeringForVmOfThisAccount = mock(ServiceOfferingVO.class); + when(serviceOfferingDao.findByIdIncludingRemoved(offeringIdForVmsOfThisAccount)).thenReturn(offeringForVmOfThisAccount); + when(offeringForVmOfThisAccount.getDeploymentPlanner()).thenReturn(planner.getName()); + + ServiceOfferingVO offeringForVMOfOtherAccount = mock(ServiceOfferingVO.class); + when(serviceOfferingDao.findByIdIncludingRemoved(offeringIdForVmsOfOtherAccount)).thenReturn(offeringForVMOfOtherAccount); + when(offeringForVMOfOtherAccount.getDeploymentPlanner()).thenReturn("FirstFitPlanner"); + } + + @Configuration + @ComponentScan(basePackageClasses = { ImplicitDedicationPlanner.class }, + includeFilters = {@Filter(value = TestConfiguration.Library.class, type = FilterType.CUSTOM)}, + useDefaultFilters = false) + public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration { + + @Bean + public HostDao hostDao() { + return Mockito.mock(HostDao.class); + } + + @Bean + public DataCenterDao dcDao() { + return Mockito.mock(DataCenterDao.class); + } + + @Bean + public HostPodDao hostPodDao() { + return Mockito.mock(HostPodDao.class); + } + + @Bean + public ClusterDao clusterDao() { + return Mockito.mock(ClusterDao.class); + } + + @Bean + public GuestOSDao guestOsDao() { + return Mockito.mock(GuestOSDao.class); + } + + @Bean + public GuestOSCategoryDao guestOsCategoryDao() { + return Mockito.mock(GuestOSCategoryDao.class); + } + + @Bean + public DiskOfferingDao diskOfferingDao() { + return Mockito.mock(DiskOfferingDao.class); + } + + @Bean + public StoragePoolHostDao storagePoolHostDao() { + return Mockito.mock(StoragePoolHostDao.class); + } + + @Bean + public UserVmDao userVmDao() { + return Mockito.mock(UserVmDao.class); + } + + @Bean + public VMInstanceDao vmInstanceDao() { + return Mockito.mock(VMInstanceDao.class); + } + + @Bean + public VolumeDao volumeDao() { + return Mockito.mock(VolumeDao.class); + } + + @Bean + public CapacityManager capacityManager() { + return Mockito.mock(CapacityManager.class); + } + + @Bean + public ConfigurationDao configurationDao() { + return Mockito.mock(ConfigurationDao.class); + } + + @Bean + public PrimaryDataStoreDao primaryDataStoreDao() { + return Mockito.mock(PrimaryDataStoreDao.class); + } + + @Bean + public CapacityDao capacityDao() { + return Mockito.mock(CapacityDao.class); + } + + @Bean + public AccountManager accountManager() { + return Mockito.mock(AccountManager.class); + } + + @Bean + public StorageManager storageManager() { + return Mockito.mock(StorageManager.class); + } + + @Bean + public DataStoreManager dataStoreManager() { + return Mockito.mock(DataStoreManager.class); + } + + @Bean + public ClusterDetailsDao clusterDetailsDao() { + return Mockito.mock(ClusterDetailsDao.class); + } + + @Bean + public ServiceOfferingDao serviceOfferingDao() { + return Mockito.mock(ServiceOfferingDao.class); + } + + @Bean + public ServiceOfferingDetailsDao serviceOfferingDetailsDao() { + return Mockito.mock(ServiceOfferingDetailsDao.class); + } + + @Bean + public ResourceManager resourceManager() { + return Mockito.mock(ResourceManager.class); + } + + public static class Library implements TypeFilter { + @Override + public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException { + ComponentScan cs = TestConfiguration.class.getAnnotation(ComponentScan.class); + return SpringUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs); + } + } + } +} \ No newline at end of file diff --git a/plugins/pom.xml b/plugins/pom.xml index e49fac9533a..2efa2488e86 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -38,6 +38,7 @@ affinity-group-processors/host-anti-affinity deployment-planners/user-concentrated-pod deployment-planners/user-dispersing + deployment-planners/implicit-dedication host-allocators/random hypervisors/ovm hypervisors/xen diff --git a/server/src/com/cloud/configuration/ConfigurationManager.java b/server/src/com/cloud/configuration/ConfigurationManager.java index d0ae914c20f..8db037b24ff 100755 --- a/server/src/com/cloud/configuration/ConfigurationManager.java +++ b/server/src/com/cloud/configuration/ConfigurationManager.java @@ -80,10 +80,11 @@ public interface ConfigurationManager extends ConfigurationService, Manager { * @param id * @param useVirtualNetwork * @param deploymentPlanner + * @param details * @return ID */ ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vm_typeType, String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired, - boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner); + boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner, Map details); /** * Creates a new disk offering diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index 9e0c847ed57..52d617646af 100755 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -39,7 +39,6 @@ import javax.naming.NamingException; import javax.naming.directory.DirContext; import javax.naming.directory.InitialDirContext; - import com.cloud.dc.*; import com.cloud.dc.dao.*; import com.cloud.user.*; @@ -105,7 +104,6 @@ import com.cloud.dc.dao.DcDetailsDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.dc.dao.PodVlanMapDao; import com.cloud.dc.dao.VlanDao; - import com.cloud.deploy.DataCenterDeployment; import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; @@ -165,6 +163,7 @@ import com.cloud.server.ConfigurationServer; import com.cloud.server.ManagementService; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.service.dao.ServiceOfferingDetailsDao; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.SwiftVO; import com.cloud.storage.dao.DiskOfferingDao; @@ -277,6 +276,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Inject ServiceOfferingDao _serviceOfferingDao; @Inject + ServiceOfferingDetailsDao _serviceOfferingDetailsDao; + @Inject DiskOfferingDao _diskOfferingDao; @Inject NetworkOfferingDao _networkOfferingDao; @@ -2050,19 +2051,26 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } - return createServiceOffering(userId, cmd.getIsSystem(), vmType, cmd.getServiceOfferingName(), cpuNumber.intValue(), memory.intValue(), cpuSpeed.intValue(), cmd.getDisplayText(), - localStorageRequired, offerHA, limitCpuUse, volatileVm, cmd.getTags(), cmd.getDomainId(), cmd.getHostTag(), cmd.getNetworkRate(), cmd.getDeploymentPlanner()); + return createServiceOffering(userId, cmd.getIsSystem(), vmType, cmd.getServiceOfferingName(), + cpuNumber.intValue(), memory.intValue(), cpuSpeed.intValue(), cmd.getDisplayText(), + localStorageRequired, offerHA, limitCpuUse, volatileVm, cmd.getTags(), cmd.getDomainId(), + cmd.getHostTag(), cmd.getNetworkRate(), cmd.getDeploymentPlanner(), cmd.getDetails()); } @Override @ActionEvent(eventType = EventTypes.EVENT_SERVICE_OFFERING_CREATE, eventDescription = "creating service offering") - public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vm_type, String name, int cpu, int ramSize, int speed, String displayText, - boolean localStorageRequired, boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner) { + public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vm_type, + String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired, + boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, + Integer networkRate, String deploymentPlanner, Map details) { tags = cleanupTags(tags); ServiceOfferingVO offering = new ServiceOfferingVO(name, cpu, ramSize, speed, networkRate, null, offerHA, limitResourceUse, volatileVm, displayText, localStorageRequired, false, tags, isSystem, vm_type, domainId, hostTag, deploymentPlanner); if ((offering = _serviceOfferingDao.persist(offering)) != null) { + if (details != null) { + _serviceOfferingDetailsDao.persist(offering.getId(), details); + } UserContext.current().setEventDetails("Service offering id=" + offering.getId()); return offering; } else { diff --git a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java index ba18fa1c11d..4fb182aae14 100755 --- a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java +++ b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java @@ -431,7 +431,7 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu */ @Override public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, Type vm_typeType, String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired, boolean offerHA, - boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner) { + boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate, String deploymentPlanner, Map details) { // TODO Auto-generated method stub return null; } diff --git a/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java b/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java index 7ffbe32d8bd..a8256990973 100644 --- a/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java +++ b/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java @@ -19,14 +19,8 @@ package org.apache.cloudstack.networkoffering; import java.io.IOException; -import com.cloud.dc.ClusterDetailsDao; -import com.cloud.dc.dao.*; -import com.cloud.server.ConfigurationServer; -import com.cloud.user.*; import org.apache.cloudstack.acl.SecurityChecker; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDaoImpl; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.test.utils.SpringUtils; import org.mockito.Mockito; import org.springframework.context.annotation.Bean; @@ -44,6 +38,18 @@ import com.cloud.api.query.dao.UserAccountJoinDaoImpl; import com.cloud.capacity.dao.CapacityDaoImpl; import com.cloud.cluster.agentlb.dao.HostTransferMapDaoImpl; import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.dao.AccountVlanMapDaoImpl; +import com.cloud.dc.dao.ClusterDaoImpl; +import com.cloud.dc.dao.DataCenterDaoImpl; +import com.cloud.dc.dao.DataCenterIpAddressDaoImpl; +import com.cloud.dc.dao.DataCenterLinkLocalIpAddressDao; +import com.cloud.dc.dao.DataCenterVnetDaoImpl; +import com.cloud.dc.dao.DcDetailsDaoImpl; +import com.cloud.dc.dao.HostPodDaoImpl; +import com.cloud.dc.dao.PodVlanDaoImpl; +import com.cloud.dc.dao.PodVlanMapDaoImpl; +import com.cloud.dc.dao.VlanDaoImpl; import com.cloud.domain.dao.DomainDaoImpl; import com.cloud.event.dao.UsageEventDaoImpl; import com.cloud.host.dao.HostDaoImpl; @@ -80,10 +86,11 @@ import com.cloud.network.vpc.dao.PrivateIpDaoImpl; import com.cloud.network.vpn.RemoteAccessVpnService; import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; -import com.cloud.offerings.dao.NetworkOfferingServiceMapDaoImpl; import com.cloud.projects.ProjectManager; +import com.cloud.server.ConfigurationServer; import com.cloud.server.ManagementService; import com.cloud.service.dao.ServiceOfferingDaoImpl; +import com.cloud.service.dao.ServiceOfferingDetailsDaoImpl; import com.cloud.storage.dao.DiskOfferingDaoImpl; import com.cloud.storage.dao.S3DaoImpl; import com.cloud.storage.dao.SnapshotDaoImpl; @@ -94,6 +101,11 @@ import com.cloud.storage.s3.S3Manager; import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.storage.swift.SwiftManager; import com.cloud.tags.dao.ResourceTagsDaoImpl; +import com.cloud.user.AccountDetailsDao; +import com.cloud.user.AccountManager; +import com.cloud.user.ResourceLimitService; +import com.cloud.user.UserContext; +import com.cloud.user.UserContextInitializer; import com.cloud.user.dao.AccountDaoImpl; import com.cloud.user.dao.UserDaoImpl; import com.cloud.vm.dao.InstanceGroupDaoImpl; @@ -110,6 +122,7 @@ import com.cloud.vm.dao.VMInstanceDaoImpl; DomainDaoImpl.class, SwiftDaoImpl.class, ServiceOfferingDaoImpl.class, + ServiceOfferingDetailsDaoImpl.class, VlanDaoImpl.class, IPAddressDaoImpl.class, ResourceTagsDaoImpl.class, diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql index 442a5446be5..fe66207e5e5 100644 --- a/setup/db/db/schema-410to420.sql +++ b/setup/db/db/schema-410to420.sql @@ -393,7 +393,16 @@ CREATE TABLE `cloud`.`vm_snapshots` ( ALTER TABLE `cloud`.`hypervisor_capabilities` ADD COLUMN `vm_snapshot_enabled` tinyint(1) DEFAULT 0 NOT NULL COMMENT 'Whether VM snapshot is supported by hypervisor'; UPDATE `cloud`.`hypervisor_capabilities` SET `vm_snapshot_enabled`=1 WHERE `hypervisor_type` in ('VMware', 'XenServer'); - +CREATE TABLE `cloud`.`service_offering_details` ( + `id` bigint unsigned NOT NULL auto_increment, + `service_offering_id` bigint unsigned NOT NULL COMMENT 'service offering id', + `name` varchar(255) NOT NULL, + `value` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + CONSTRAINT `fk_service_offering_details__service_offering_id` FOREIGN KEY (`service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE, + CONSTRAINT UNIQUE KEY `uk_service_offering_id_name` (`service_offering_id`, `name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + DROP VIEW IF EXISTS `cloud`.`user_vm_view`; CREATE VIEW `cloud`.`user_vm_view` AS select diff --git a/test/integration/component/test_implicit_planner.py b/test/integration/component/test_implicit_planner.py new file mode 100644 index 00000000000..ffcd248b462 --- /dev/null +++ b/test/integration/component/test_implicit_planner.py @@ -0,0 +1,232 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" P1 tests for Storage motion +""" +#Import Local Modules +import marvin +from marvin.cloudstackTestCase import * +from marvin.cloudstackAPI import * +from marvin.remoteSSHClient import remoteSSHClient +from marvin.integration.lib.utils import * +from marvin.integration.lib.base import * +from marvin.integration.lib.common import * +from nose.plugins.attrib import attr +#Import System modules +import time + +_multiprocess_shared_ = True +class Services: + """Test VM Life Cycle Services + """ + + def __init__(self): + self.services = { + "disk_offering":{ + "displaytext": "Small", + "name": "Small", + "disksize": 1 + }, + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + # Random characters are appended in create account to + # ensure unique username generated each time + "password": "password", + }, + "small": + # Create a small virtual machine instance with disk offering + { + "displayname": "testserver", + "username": "root", # VM creds for SSH + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "service_offerings": + { + "implicitplanner": + { + # Small service offering ID to for change VM + # service offering from medium to small + "name": "Implicit Strict", + "displaytext": "Implicit Strict", + "cpunumber": 1, + "cpuspeed": 500, + "memory": 512, + "deploymentplanner": "ImplicitDedicationPlanner" + } + }, + "template": { + "displaytext": "Cent OS Template", + "name": "Cent OS Template", + "passwordenabled": True, + }, + "diskdevice": '/dev/xvdd', + # Disk device where ISO is attached to instance + "mount_dir": "/mnt/tmp", + "sleep": 60, + "timeout": 10, + #Migrate VM to hostid + "ostype": 'CentOS 5.3 (64-bit)', + # CentOS 5.3 (64-bit) + } + +class TestImplicitPlanner(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + cls.api_client = super(TestImplicitPlanner, cls).getClsTestClient().getApiClient() + cls.services = Services().services + + # Get Zone, Domain and templates + domain = get_domain(cls.api_client, cls.services) + cls.zone = get_zone(cls.api_client, cls.services) + cls.services['mode'] = cls.zone.networktype + + template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) + # Set Zones and disk offerings + cls.services["small"]["zoneid"] = cls.zone.id + cls.services["small"]["template"] = template.id + + # Create VMs, NAT Rules etc + cls.account = Account.create( + cls.api_client, + cls.services["account"], + domainid=domain.id + ) + + cls.small_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offerings"]["implicitplanner"] + ) + + cls._cleanup = [ + cls.small_offering, + cls.account + ] + + @classmethod + def tearDownClass(cls): + cls.api_client = super(TestImplicitPlanner, cls).getClsTestClient().getApiClient() + cleanup_resources(cls.api_client, cls._cleanup) + return + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + + def tearDown(self): + #Clean up, terminate the created ISOs + cleanup_resources(self.apiclient, self.cleanup) + return + + # This test requires multi host and at least one host which is empty (no vms should + # be running on that host). It uses an implicit planner to deploy instances and the + # instances of a new account should go to an host that doesn't have vms of any other + # account. + @attr(tags = ["advanced", "basic", "multihosts", "implicitplanner"]) + def test_01_deploy_vm_with_implicit_planner(self): + """Test implicit planner is placing vms of an account on implicitly dedicated hosts. + """ + # Validate the following + # 1. Deploy a vm using implicit planner. It should go on to a + # host that is empty (not running vms of any other account) + # 2. Deploy another vm it should get deployed on the same host. + + #create a virtual machine + virtual_machine_1 = VirtualMachine.create( + self.api_client, + self.services["small"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.small_offering.id, + mode=self.services["mode"] + ) + + list_vm_response_1 = list_virtual_machines( + self.apiclient, + id=virtual_machine_1.id + ) + self.assertEqual( + isinstance(list_vm_response_1, list), + True, + "Check list response returns a valid list" + ) + + self.assertNotEqual( + list_vm_response_1, + None, + "Check virtual machine is listVirtualMachines" + ) + + vm_response_1 = list_vm_response_1[0] + + self.assertEqual( + vm_response_1.id, + virtual_machine_1.id, + "Check virtual machine ID of VM" + ) + + virtual_machine_2 = VirtualMachine.create( + self.api_client, + self.services["small"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.small_offering.id, + mode=self.services["mode"] + ) + + list_vm_response_2 = list_virtual_machines( + self.apiclient, + id=virtual_machine_2.id + ) + self.assertEqual( + isinstance(list_vm_response_2, list), + True, + "Check list response returns a valid list" + ) + + self.assertNotEqual( + list_vm_response_2, + None, + "Check virtual machine is listVirtualMachines" + ) + + vm_response_2 = list_vm_response_2[0] + + self.assertEqual( + vm_response_2.id, + virtual_machine_2.id, + "Check virtual machine ID of VM" + ) + + self.assertEqual( + vm_response_1.hostid, + vm_response_2.hostid, + "Check both vms have the same host id" + ) + return \ No newline at end of file diff --git a/tools/marvin/marvin/integration/lib/base.py b/tools/marvin/marvin/integration/lib/base.py index ecdc8412fdb..a811f144980 100755 --- a/tools/marvin/marvin/integration/lib/base.py +++ b/tools/marvin/marvin/integration/lib/base.py @@ -1268,6 +1268,10 @@ class ServiceOffering: if "tags" in services: cmd.tags = services["tags"] + + if "deploymentplanner" in services: + cmd.deploymentplanner = services["deploymentplanner"] + # Service Offering private to that domain if domainid: cmd.domainid = domainid From 046580fcf117aadf77179011ecfb5dfffdcca65f Mon Sep 17 00:00:00 2001 From: Likitha Shetty Date: Fri, 17 May 2013 13:12:36 +0530 Subject: [PATCH 10/19] CLOUDSTACK-2552. Modify AWSAPI to decrypt db values using the decrypted database_key and not management_server_key --- .../bridge/persist/dao/CloudStackUserDaoImpl.java | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/awsapi/src/com/cloud/bridge/persist/dao/CloudStackUserDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/CloudStackUserDaoImpl.java index f108a20e5b4..5aac3960d02 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/CloudStackUserDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/CloudStackUserDaoImpl.java @@ -19,15 +19,14 @@ package com.cloud.bridge.persist.dao; import javax.ejb.Local; import org.apache.log4j.Logger; -import org.jasypt.encryption.pbe.StandardPBEStringEncryptor; import org.springframework.stereotype.Component; import com.cloud.bridge.model.CloudStackUserVO; -import com.cloud.bridge.util.EncryptionSecretKeyCheckerUtil; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.crypt.DBEncryptionUtil; @Component @Local(value={CloudStackUserDao.class}) @@ -51,13 +50,8 @@ public class CloudStackUserDaoImpl extends GenericDaoBase Date: Wed, 15 May 2013 17:50:43 +0530 Subject: [PATCH 11/19] CLOUDSTACK-2513: VPN tests refer to invalid connection.user in cloudConnection cloudConnection object should always have "user" and "passwd" attributes. And they are "None" while creating userAPIClient. As we already have "user" and "password" for mgmt server. Signed-off-by: Prasanna Santhanam --- tools/marvin/marvin/cloudstackConnection.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tools/marvin/marvin/cloudstackConnection.py b/tools/marvin/marvin/cloudstackConnection.py index b5ff5bf7b3f..e3977dcf7d4 100644 --- a/tools/marvin/marvin/cloudstackConnection.py +++ b/tools/marvin/marvin/cloudstackConnection.py @@ -43,10 +43,8 @@ class cloudConnection(object): self.securityKey = securityKey self.mgtSvr = mgtSvr self.port = port - if user: - self.user = user - if passwd: - self.passwd = passwd + self.user = user + self.passwd = passwd self.logging = logging self.path = path self.retries = 5 From 55d304a5bbca27994c01c0f7c63b6168d2a90f60 Mon Sep 17 00:00:00 2001 From: SrikanteswaraRao Talluri Date: Tue, 14 May 2013 20:06:47 +0530 Subject: [PATCH 12/19] CLOUDSTACK-2478: Fix test_volumes.py script for BVT failures removed storage type in compute offering and disk offering Signed-off-by: Prasanna Santhanam --- test/integration/smoke/test_volumes.py | 84 ++++++++++++++++---------- 1 file changed, 52 insertions(+), 32 deletions(-) diff --git a/test/integration/smoke/test_volumes.py b/test/integration/smoke/test_volumes.py index 4bf8203e74c..89b013a516f 100644 --- a/test/integration/smoke/test_volumes.py +++ b/test/integration/smoke/test_volumes.py @@ -53,19 +53,17 @@ class Services: "displaytext": "Tiny Instance", "cpunumber": 1, "cpuspeed": 100, # in MHz - "memory": 128, # In MBs - "storagetype": "local" + "memory": 260 # In MBs + }, "disk_offering": { "displaytext": "Small", "name": "Small", - "storagetype": "local", "disksize": 1 }, 'resized_disk_offering': { "displaytext": "Resized", "name": "Resized", - "storagetype": "local", "disksize": 3 }, "volume_offerings": { @@ -152,7 +150,7 @@ class TestCreateVolume(cloudstackTestCase): self.dbclient = self.testClient.getDbConnection() self.cleanup = [] - @attr(tags = ["advanced", "advancedns", "smoke"]) + @attr(tags = ["advanced", "advancedns", "smoke", "basic"]) def test_01_create_volume(self): """Test Volume creation for all Disk Offerings (incl. custom) """ @@ -346,8 +344,9 @@ class TestVolumes(cloudstackTestCase): cls.custom_resized_disk_offering, cls.service_offering, cls.disk_offering, + cls.volume, cls.account - ] + ] @classmethod def tearDownClass(cls): @@ -359,14 +358,17 @@ class TestVolumes(cloudstackTestCase): def setUp(self): self.apiClient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() + self.attached = False self.cleanup = [] def tearDown(self): #Clean up, terminate the created volumes + if self.attached: + self.virtual_machine.detach_volume(self.apiClient, self.volume) cleanup_resources(self.apiClient, self.cleanup) return - @attr(tags = ["advanced", "advancedns", "smoke"]) + @attr(tags = ["advanced", "advancedns", "smoke", "basic"]) def test_02_attach_volume(self): """Attach a created Volume to a Running VM """ @@ -381,7 +383,7 @@ class TestVolumes(cloudstackTestCase): self.virtual_machine.id )) self.virtual_machine.attach_volume(self.apiClient, self.volume) - + self.attached = True list_volume_response = list_volumes( self.apiClient, id=self.volume.id @@ -412,7 +414,7 @@ class TestVolumes(cloudstackTestCase): (self.virtual_machine.ipaddress, e)) return - @attr(tags = ["advanced", "advancedns", "smoke"]) + @attr(tags = ["advanced", "advancedns", "smoke", "basic"]) def test_03_download_attached_volume(self): """Download a Volume attached to a VM """ @@ -423,6 +425,8 @@ class TestVolumes(cloudstackTestCase): self.debug("Extract attached Volume ID: %s" % self.volume.id) + self.virtual_machine.attach_volume(self.apiClient, self.volume) + self.attached = True cmd = extractVolume.extractVolumeCmd() cmd.id = self.volume.id cmd.mode = "HTTP_DOWNLOAD" @@ -432,7 +436,7 @@ class TestVolumes(cloudstackTestCase): with self.assertRaises(Exception): self.apiClient.extractVolume(cmd) - @attr(tags = ["advanced", "advancedns", "smoke"]) + @attr(tags = ["advanced", "advancedns", "smoke", "basic"]) def test_04_delete_attached_volume(self): """Delete a Volume attached to a VM """ @@ -444,19 +448,16 @@ class TestVolumes(cloudstackTestCase): self.debug("Trying to delete attached Volume ID: %s" % self.volume.id) - + self.virtual_machine.attach_volume(self.apiClient, self.volume) + self.attached = True cmd = deleteVolume.deleteVolumeCmd() cmd.id = self.volume.id #Proper exception should be raised; deleting attach VM is not allowed #with self.assertRaises(Exception): - result = self.apiClient.deleteVolume(cmd) - self.assertEqual( - result, - None, - "Check for delete download error while volume is attached" - ) + with self.assertRaises(Exception): + self.apiClient.deleteVolume(cmd) - @attr(tags = ["advanced", "advancedns", "smoke"]) + @attr(tags = ["advanced", "advancedns", "smoke", "basic"]) def test_05_detach_volume(self): """Detach a Volume attached to a VM """ @@ -470,8 +471,9 @@ class TestVolumes(cloudstackTestCase): self.volume.id, self.virtual_machine.id )) - + self.virtual_machine.attach_volume(self.apiClient, self.volume) self.virtual_machine.detach_volume(self.apiClient, self.volume) + self.attached = False #Sleep to ensure the current state will reflected in other calls time.sleep(self.services["sleep"]) list_volume_response = list_volumes( @@ -497,7 +499,7 @@ class TestVolumes(cloudstackTestCase): ) return - @attr(tags = ["advanced", "advancedns", "smoke"]) + @attr(tags = ["advanced", "advancedns", "smoke", "basic"]) def test_06_download_detached_volume(self): """Download a Volume unattached to an VM """ @@ -506,6 +508,10 @@ class TestVolumes(cloudstackTestCase): self.debug("Extract detached Volume ID: %s" % self.volume.id) + self.virtual_machine.attach_volume(self.apiClient, self.volume) + self.virtual_machine.detach_volume(self.apiClient, self.volume) + self.attached = False + cmd = extractVolume.extractVolumeCmd() cmd.id = self.volume.id cmd.mode = "HTTP_DOWNLOAD" @@ -528,7 +534,7 @@ class TestVolumes(cloudstackTestCase): % (extract_vol.url, self.volume.id) ) - @attr(tags = ["advanced", "advancedns", "smoke"]) + @attr(tags = ["advanced", "advancedns", "smoke", "basic"]) def test_07_resize_fail(self): """Verify invalid options fail to Resize a volume""" # Verify the size is the new size is what we wanted it to be. @@ -543,7 +549,7 @@ class TestVolumes(cloudstackTestCase): response = self.apiClient.resizeVolume(cmd) except Exception as ex: #print str(ex) - if "HTTP Error 431:" in str(ex): + if "invalid" in str(ex): success = True self.assertEqual( success, @@ -557,7 +563,7 @@ class TestVolumes(cloudstackTestCase): try: response = self.apiClient.resizeVolume(cmd) except Exception as ex: - if "HTTP Error 431:" in str(ex): + if "invalid" in str(ex): success = True self.assertEqual( success, @@ -576,6 +582,7 @@ class TestVolumes(cloudstackTestCase): ) #attach the volume self.virtual_machine.attach_volume(self.apiClient, self.volume) + self.attached = True #stop the vm if it is on xenserver if self.services['hypervisor'].lower() == "xenserver": self.virtual_machine.stop(self.apiClient) @@ -603,10 +610,11 @@ class TestVolumes(cloudstackTestCase): True, "Verify the volume did not resize" ) - self.virtual_machine.detach_volume(self.apiClient, self.volume) - self.cleanup.append(self.volume) + if self.services['hypervisor'].lower() == "xenserver": + self.virtual_machine.start(self.apiClient) - @attr(tags = ["advanced", "advancedns", "smoke"]) + + @attr(tags = ["advanced", "advancedns", "smoke", "basic"]) def test_08_resize_volume(self): """Resize a volume""" # Verify the size is the new size is what we wanted it to be. @@ -616,6 +624,8 @@ class TestVolumes(cloudstackTestCase): self.virtual_machine.id )) self.virtual_machine.attach_volume(self.apiClient, self.volume) + self.attached = True + if self.services['hypervisor'].lower() == "xenserver": self.virtual_machine.stop(self.apiClient) self.debug("Resize Volume ID: %s" % self.volume.id) @@ -635,7 +645,7 @@ class TestVolumes(cloudstackTestCase): type='DATADISK' ) for vol in list_volume_response: - if vol.id == self.volume.id and vol.size == 3221225472L: + if vol.id == self.volume.id and vol.size == 3221225472L and vol.state == 'Ready': success = True if success: break @@ -649,10 +659,10 @@ class TestVolumes(cloudstackTestCase): "Check if the volume resized appropriately" ) - self.virtual_machine.detach_volume(self.apiClient, self.volume) - self.cleanup.append(self.volume) + if self.services['hypervisor'].lower() == "xenserver": + self.virtual_machine.start(self.apiClient) - @attr(tags = ["advanced", "advancedns", "smoke"]) + @attr(tags = ["advanced", "advancedns", "smoke","basic"]) def test_09_delete_detached_volume(self): """Delete a Volume unattached to an VM """ @@ -665,13 +675,23 @@ class TestVolumes(cloudstackTestCase): self.debug("Delete Volume ID: %s" % self.volume.id) + self.volume_1 = Volume.create( + self.api_client, + self.services, + account=self.account.name, + domainid=self.account.domainid + ) + + self.virtual_machine.attach_volume(self.apiClient, self.volume_1) + self.virtual_machine.detach_volume(self.apiClient, self.volume_1) + cmd = deleteVolume.deleteVolumeCmd() - cmd.id = self.volume.id + cmd.id = self.volume_1.id self.apiClient.deleteVolume(cmd) list_volume_response = list_volumes( self.apiClient, - id=self.volume.id, + id=self.volume_1.id, type='DATADISK' ) self.assertEqual( From 2867c6a6df6fe552cd8cf1f8bb7447bf8084d859 Mon Sep 17 00:00:00 2001 From: SrikanteswaraRao Talluri Date: Thu, 16 May 2013 17:29:12 +0530 Subject: [PATCH 13/19] CLOUDSTACK-2542: Fix router test for basic zone Signed-off-by: Prasanna Santhanam --- test/integration/smoke/test_routers.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/test/integration/smoke/test_routers.py b/test/integration/smoke/test_routers.py index 9ec2e918c42..f6ca2790069 100644 --- a/test/integration/smoke/test_routers.py +++ b/test/integration/smoke/test_routers.py @@ -141,11 +141,17 @@ class TestRouterServices(cloudstackTestCase): # by checking status of dnsmasq process # Find router associated with user account - list_router_response = list_routers( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid - ) + if self.zone.networktype == "Basic": + list_router_response = list_routers( + self.apiclient, + listall="true" + ) + else: + list_router_response = list_routers( + self.apiclient, + account=self.account.name, + domainid=self.account.domainid + ) self.assertEqual( isinstance(list_router_response, list), True, From 28c5fbcb05750b91a54acfa84f0b59b03c7ce794 Mon Sep 17 00:00:00 2001 From: SrikanteswaraRao Talluri Date: Wed, 15 May 2013 00:22:32 +0530 Subject: [PATCH 14/19] CLOUDSTACK-2483: Fix base.py migrate volume method Signed-off-by: Prasanna Santhanam --- tools/marvin/marvin/integration/lib/base.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/marvin/marvin/integration/lib/base.py b/tools/marvin/marvin/integration/lib/base.py index a811f144980..f3a96bd9bec 100755 --- a/tools/marvin/marvin/integration/lib/base.py +++ b/tools/marvin/marvin/integration/lib/base.py @@ -685,6 +685,7 @@ class Volume: timeout = timeout - 1 return + @classmethod def migrate(cls, apiclient, **kwargs): """Migrate a volume""" cmd = migrateVolume.migrateVolumeCmd() From 107f4924757d6d59bff4256a56df3c8e81763818 Mon Sep 17 00:00:00 2001 From: Rajesh Battala Date: Thu, 9 May 2013 18:02:41 +0530 Subject: [PATCH 15/19] Fixed CLOUDSTACK-2081 Volume which is added thru upload volume is failed to attach to the instance saying Volume state must be in Allocated, Ready or in Uploaded state( Though uploaded Volume state is uploaded) --- .../apache/cloudstack/storage/volume/VolumeServiceImpl.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 26253544e77..7fdf6bb18d3 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -748,11 +748,11 @@ public class VolumeServiceImpl implements VolumeService { protected Void registerVolumeCallback(AsyncCallbackDispatcher callback, CreateVolumeContext context) { CreateCmdResult result = callback.getResult(); VolumeObject vo = (VolumeObject)context.volume; - /*if (result.isFailed()) { + if (result.isFailed()) { vo.stateTransit(Volume.Event.OperationFailed); } else { vo.stateTransit(Volume.Event.OperationSucceeded); - }*/ + } VolumeApiResult res = new VolumeApiResult(vo); context.future.complete(res); return null; From e520ff456801e9a7b57fe56ee78f4f649675c082 Mon Sep 17 00:00:00 2001 From: Devdeep Singh Date: Fri, 17 May 2013 17:01:17 +0530 Subject: [PATCH 16/19] CLOUDSTACK-2122. Virtual machine id should be a required parameter for findHostsForMigration api. Fixing it. --- .../api/command/admin/host/FindHostsForMigrationCmd.java | 2 +- server/src/com/cloud/server/ManagementServerImpl.java | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/api/src/org/apache/cloudstack/api/command/admin/host/FindHostsForMigrationCmd.java b/api/src/org/apache/cloudstack/api/command/admin/host/FindHostsForMigrationCmd.java index e6e45cc7246..b2d77b85dd2 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/host/FindHostsForMigrationCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/host/FindHostsForMigrationCmd.java @@ -45,7 +45,7 @@ public class FindHostsForMigrationCmd extends BaseListCmd { ///////////////////////////////////////////////////// @Parameter(name=ApiConstants.VIRTUAL_MACHINE_ID, type=CommandType.UUID, entityType = UserVmResponse.class, - required=false, description="find hosts to which this VM can be migrated and flag the hosts with enough " + + required=true, description="find hosts to which this VM can be migrated and flag the hosts with enough " + "CPU/RAM to host the VM") private Long virtualMachineId; diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index f74b7ad964c..06c0f964ccb 100755 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -1086,17 +1086,16 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe VMInstanceVO vm = _vmInstanceDao.findById(vmId); if (vm == null) { - InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the VM with specified id"); - ex.addProxyObject(vm, vmId, "vmId"); + InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the VM with given id"); throw ex; } if (vm.getState() != State.Running) { if (s_logger.isDebugEnabled()) { - s_logger.debug("VM is not Running, unable to migrate the vm" + vm); + s_logger.debug("VM is not running, cannot migrate the vm" + vm); } - InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, unable to" + - " migrate the vm with specified id"); + InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, cannot " + + "migrate the vm with specified id"); ex.addProxyObject(vm, vmId, "vmId"); throw ex; } From 4eb310e926771c30515034bb056f71f52afe1a19 Mon Sep 17 00:00:00 2001 From: Nitin Mehta Date: Fri, 17 May 2013 17:38:21 +0530 Subject: [PATCH 17/19] iCLOUDSTACK-2321 Fix the response of scaleVMCmd Add Scale System vm command --- api/src/com/cloud/event/EventTypes.java | 3 + .../com/cloud/server/ManagementService.java | 12 +- api/src/com/cloud/vm/UserVmService.java | 2 +- .../admin/systemvm/ScaleSystemVMCmd.java | 131 ++++++++++++++++++ .../api/command/user/vm/ScaleVMCmd.java | 23 ++- .../api/command/test/ScaleVMCmdTest.java | 32 ++++- client/tomcatconf/commands.properties.in | 1 + .../com/cloud/server/ManagementServer.java | 6 + .../cloud/server/ManagementServerImpl.java | 47 ++++--- server/src/com/cloud/vm/UserVmManager.java | 6 +- .../src/com/cloud/vm/UserVmManagerImpl.java | 17 ++- .../com/cloud/vm/MockUserVmManagerImpl.java | 9 +- 12 files changed, 239 insertions(+), 50 deletions(-) create mode 100644 api/src/org/apache/cloudstack/api/command/admin/systemvm/ScaleSystemVMCmd.java diff --git a/api/src/com/cloud/event/EventTypes.java b/api/src/com/cloud/event/EventTypes.java index ee7f5b7d89f..9c83f13ea2a 100755 --- a/api/src/com/cloud/event/EventTypes.java +++ b/api/src/com/cloud/event/EventTypes.java @@ -92,6 +92,8 @@ public class EventTypes { public static final String EVENT_PROXY_STOP = "PROXY.STOP"; public static final String EVENT_PROXY_REBOOT = "PROXY.REBOOT"; public static final String EVENT_PROXY_HA = "PROXY.HA"; + public static final String EVENT_PROXY_SCALE = "PROXY.SCALE"; + // VNC Console Events public static final String EVENT_VNC_CONNECT = "VNC.CONNECT"; @@ -213,6 +215,7 @@ public class EventTypes { public static final String EVENT_SSVM_STOP = "SSVM.STOP"; public static final String EVENT_SSVM_REBOOT = "SSVM.REBOOT"; public static final String EVENT_SSVM_HA = "SSVM.HA"; + public static final String EVENT_SSVM_SCALE = "SSVM.SCALE"; // Service Offerings public static final String EVENT_SERVICE_OFFERING_CREATE = "SERVICE.OFFERING.CREATE"; diff --git a/api/src/com/cloud/server/ManagementService.java b/api/src/com/cloud/server/ManagementService.java index 59b83c9bbce..24d33d5a3f8 100755 --- a/api/src/com/cloud/server/ManagementService.java +++ b/api/src/com/cloud/server/ManagementService.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import com.cloud.exception.*; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.command.admin.cluster.ListClustersCmd; import org.apache.cloudstack.api.command.admin.config.ListCfgsByCmd; @@ -34,11 +35,7 @@ import org.apache.cloudstack.api.command.admin.resource.DeleteAlertsCmd; import org.apache.cloudstack.api.command.admin.resource.ListAlertsCmd; import org.apache.cloudstack.api.command.admin.resource.ListCapacityCmd; import org.apache.cloudstack.api.command.admin.resource.UploadCustomCertificateCmd; -import org.apache.cloudstack.api.command.admin.systemvm.DestroySystemVmCmd; -import org.apache.cloudstack.api.command.admin.systemvm.ListSystemVMsCmd; -import org.apache.cloudstack.api.command.admin.systemvm.RebootSystemVmCmd; -import org.apache.cloudstack.api.command.admin.systemvm.StopSystemVmCmd; -import org.apache.cloudstack.api.command.admin.systemvm.UpgradeSystemVMCmd; +import org.apache.cloudstack.api.command.admin.systemvm.*; import org.apache.cloudstack.api.command.admin.vlan.ListVlanIpRangesCmd; import org.apache.cloudstack.api.command.user.address.ListPublicIpAddressesCmd; import org.apache.cloudstack.api.command.user.config.ListCapabilitiesCmd; @@ -64,10 +61,6 @@ import com.cloud.configuration.Configuration; import com.cloud.dc.Pod; import com.cloud.dc.Vlan; import com.cloud.domain.Domain; -import com.cloud.exception.ConcurrentOperationException; -import com.cloud.exception.InternalErrorException; -import com.cloud.exception.PermissionDeniedException; -import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorCapabilities; @@ -422,4 +415,5 @@ public interface ManagementService { List listDeploymentPlanners(); + VirtualMachine upgradeSystemVM(ScaleSystemVMCmd cmd) throws ResourceUnavailableException, ManagementServerException, VirtualMachineMigrationException, ConcurrentOperationException; } diff --git a/api/src/com/cloud/vm/UserVmService.java b/api/src/com/cloud/vm/UserVmService.java index 0a0660ad493..7d459b99a9e 100755 --- a/api/src/com/cloud/vm/UserVmService.java +++ b/api/src/com/cloud/vm/UserVmService.java @@ -461,6 +461,6 @@ public interface UserVmService { UserVm restoreVM(RestoreVMCmd cmd) throws InsufficientCapacityException, ResourceUnavailableException; - boolean upgradeVirtualMachine(ScaleVMCmd scaleVMCmd) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException; + UserVm upgradeVirtualMachine(ScaleVMCmd cmd) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException; } diff --git a/api/src/org/apache/cloudstack/api/command/admin/systemvm/ScaleSystemVMCmd.java b/api/src/org/apache/cloudstack/api/command/admin/systemvm/ScaleSystemVMCmd.java new file mode 100644 index 00000000000..a077e246a46 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/admin/systemvm/ScaleSystemVMCmd.java @@ -0,0 +1,131 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.systemvm; + +import com.cloud.event.EventTypes; +import com.cloud.exception.*; +import org.apache.cloudstack.api.*; +import org.apache.cloudstack.api.command.user.vm.UpgradeVMCmd; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; +import org.apache.cloudstack.api.response.SystemVmResponse; +import org.apache.log4j.Logger; + +import com.cloud.offering.ServiceOffering; +import com.cloud.user.Account; +import com.cloud.user.UserContext; +import com.cloud.vm.VirtualMachine; + +@APICommand(name = "scaleSystemVm", responseObject=SystemVmResponse.class, description="Scale the service offering for a system vm (console proxy or secondary storage). " + + "The system vm must be in a \"Stopped\" state for " + + "this command to take effect.") +public class ScaleSystemVMCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(UpgradeVMCmd.class.getName()); + private static final String s_name = "changeserviceforsystemvmresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=SystemVmResponse.class, + required=true, description="The ID of the system vm") + private Long id; + + @Parameter(name=ApiConstants.SERVICE_OFFERING_ID, type=CommandType.UUID, entityType=ServiceOfferingResponse.class, + required=true, description="the service offering ID to apply to the system vm") + private Long serviceOfferingId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public Long getServiceOfferingId() { + return serviceOfferingId; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + Account account = UserContext.current().getCaller(); + if (account != null) { + return account.getId(); + } + + return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked + } + + @Override + public void execute(){ + UserContext.current().setEventDetails("SystemVm Id: "+getId()); + + ServiceOffering serviceOffering = _configService.getServiceOffering(serviceOfferingId); + if (serviceOffering == null) { + throw new InvalidParameterValueException("Unable to find service offering: " + serviceOfferingId); + } + + VirtualMachine result = null; + try { + result = _mgr.upgradeSystemVM(this); + } catch (ResourceUnavailableException ex) { + s_logger.warn("Exception: ", ex); + throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); + } catch (ConcurrentOperationException ex) { + s_logger.warn("Exception: ", ex); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } catch (ManagementServerException ex) { + s_logger.warn("Exception: ", ex); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } catch (VirtualMachineMigrationException ex) { + s_logger.warn("Exception: ", ex); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); + } + if (result != null) { + SystemVmResponse response = _responseGenerator.createSystemVmResponse(result); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to scale system vm"); + } + } + + @Override + public String getEventType() { + VirtualMachine.Type type = _mgr.findSystemVMTypeById(getId()); + if(type == VirtualMachine.Type.ConsoleProxy){ + return EventTypes.EVENT_PROXY_SCALE; + } + else{ + return EventTypes.EVENT_SSVM_SCALE; + } + } + + @Override + public String getEventDescription() { + return "scaling system vm: " + getId() + " to service offering: " + getServiceOfferingId(); + } +} diff --git a/api/src/org/apache/cloudstack/api/command/user/vm/ScaleVMCmd.java b/api/src/org/apache/cloudstack/api/command/user/vm/ScaleVMCmd.java index 4f2ac750ce5..758d9c1667b 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vm/ScaleVMCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vm/ScaleVMCmd.java @@ -16,6 +16,7 @@ // under the License. package org.apache.cloudstack.api.command.user.vm; +import com.cloud.event.EventTypes; import com.cloud.exception.*; import com.cloud.user.Account; import com.cloud.user.UserContext; @@ -26,9 +27,11 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.log4j.Logger; +import java.util.List; + @APICommand(name = "scaleVirtualMachine", description="Scales the virtual machine to a new service offering.", responseObject=SuccessResponse.class) -public class ScaleVMCmd extends BaseCmd { +public class ScaleVMCmd extends BaseAsyncCmd { public static final Logger s_logger = Logger.getLogger(ScaleVMCmd.class.getName()); private static final String s_name = "scalevirtualmachineresponse"; @@ -84,7 +87,7 @@ public class ScaleVMCmd extends BaseCmd { @Override public void execute(){ //UserContext.current().setEventDetails("Vm Id: "+getId()); - boolean result; + UserVm result; try { result = _userVmService.upgradeVirtualMachine(this); } catch (ResourceUnavailableException ex) { @@ -100,11 +103,23 @@ public class ScaleVMCmd extends BaseCmd { s_logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } - if (result){ - SuccessResponse response = new SuccessResponse(getCommandName()); + if (result != null){ + List responseList = _responseGenerator.createUserVmResponse("virtualmachine", result); + UserVmResponse response = responseList.get(0); + response.setResponseName(getCommandName()); this.setResponseObject(response); } else { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to scale vm"); } } + + @Override + public String getEventType() { + return EventTypes.EVENT_VM_SCALE; + } + + @Override + public String getEventDescription() { + return "scaling volume: " + getId() + " to service offering: " + getServiceOfferingId(); + } } \ No newline at end of file diff --git a/api/test/org/apache/cloudstack/api/command/test/ScaleVMCmdTest.java b/api/test/org/apache/cloudstack/api/command/test/ScaleVMCmdTest.java index 8a28290e04b..bb022986e2d 100644 --- a/api/test/org/apache/cloudstack/api/command/test/ScaleVMCmdTest.java +++ b/api/test/org/apache/cloudstack/api/command/test/ScaleVMCmdTest.java @@ -24,11 +24,18 @@ import org.apache.cloudstack.api.ResponseGenerator; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.command.user.vm.ScaleVMCmd; +import org.apache.cloudstack.api.response.SwiftResponse; +import org.apache.cloudstack.api.response.UserVmResponse; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.mockito.Mockito; +import static org.mockito.Matchers.anyInt; + + +import java.util.LinkedList; +import java.util.List; public class ScaleVMCmdTest extends TestCase{ @@ -58,19 +65,34 @@ public class ScaleVMCmdTest extends TestCase{ public void testCreateSuccess() { UserVmService userVmService = Mockito.mock(UserVmService.class); + UserVm userVm = Mockito.mock(UserVm.class); + try { Mockito.when( userVmService.upgradeVirtualMachine(scaleVMCmd)) - .thenReturn(true); + .thenReturn(userVm); }catch (Exception e){ Assert.fail("Received exception when success expected " +e.getMessage()); } - scaleVMCmd._userVmService = userVmService; - responseGenerator = Mockito.mock(ResponseGenerator.class); - + ResponseGenerator responseGenerator = Mockito.mock(ResponseGenerator.class); scaleVMCmd._responseGenerator = responseGenerator; + + UserVmResponse userVmResponse = Mockito.mock(UserVmResponse.class); + //List list = Mockito.mock(UserVmResponse.class); + //list.add(userVmResponse); + //LinkedList mockedList = Mockito.mock(LinkedList.class); + //Mockito.when(mockedList.get(0)).thenReturn(userVmResponse); + + List list = new LinkedList(); + list.add(userVmResponse); + + Mockito.when(responseGenerator.createUserVmResponse("virtualmachine", userVm)).thenReturn( + list); + + scaleVMCmd._userVmService = userVmService; + scaleVMCmd.execute(); } @@ -83,7 +105,7 @@ public class ScaleVMCmdTest extends TestCase{ try { Mockito.when( userVmService.upgradeVirtualMachine(scaleVMCmd)) - .thenReturn(false); + .thenReturn(null); }catch (Exception e){ Assert.fail("Received exception when success expected " +e.getMessage()); } diff --git a/client/tomcatconf/commands.properties.in b/client/tomcatconf/commands.properties.in index 4cd9065b641..68a7511560b 100644 --- a/client/tomcatconf/commands.properties.in +++ b/client/tomcatconf/commands.properties.in @@ -205,6 +205,7 @@ destroySystemVm=1 listSystemVms=3 migrateSystemVm=1 changeServiceForSystemVm=1 +scaleSystemVm=1 #### configuration commands updateConfiguration=1 diff --git a/server/src/com/cloud/server/ManagementServer.java b/server/src/com/cloud/server/ManagementServer.java index 240464e4938..969bc6557e1 100755 --- a/server/src/com/cloud/server/ManagementServer.java +++ b/server/src/com/cloud/server/ManagementServer.java @@ -19,6 +19,11 @@ package com.cloud.server; import java.util.Date; import java.util.List; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.ManagementServerException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.VirtualMachineMigrationException; +import org.apache.cloudstack.api.command.admin.systemvm.ScaleSystemVMCmd; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import com.cloud.event.EventVO; @@ -100,4 +105,5 @@ public interface ManagementServer extends ManagementService, PluggableService { void resetEncryptionKeyIV(); public void enableAdminUser(String password); + } diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index 06c0f964ccb..8b3eea4a1f2 100755 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -43,6 +43,8 @@ import javax.crypto.spec.SecretKeySpec; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.exception.*; +import com.cloud.vm.*; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.affinity.AffinityGroupProcessor; @@ -151,13 +153,7 @@ import org.apache.cloudstack.api.command.admin.storage.PreparePrimaryStorageForM import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.swift.AddSwiftCmd; import org.apache.cloudstack.api.command.admin.swift.ListSwiftsCmd; -import org.apache.cloudstack.api.command.admin.systemvm.DestroySystemVmCmd; -import org.apache.cloudstack.api.command.admin.systemvm.ListSystemVMsCmd; -import org.apache.cloudstack.api.command.admin.systemvm.MigrateSystemVMCmd; -import org.apache.cloudstack.api.command.admin.systemvm.RebootSystemVmCmd; -import org.apache.cloudstack.api.command.admin.systemvm.StartSystemVMCmd; -import org.apache.cloudstack.api.command.admin.systemvm.StopSystemVmCmd; -import org.apache.cloudstack.api.command.admin.systemvm.UpgradeSystemVMCmd; +import org.apache.cloudstack.api.command.admin.systemvm.*; import org.apache.cloudstack.api.command.admin.template.PrepareTemplateCmd; import org.apache.cloudstack.api.command.admin.usage.AddTrafficMonitorCmd; import org.apache.cloudstack.api.command.admin.usage.AddTrafficTypeCmd; @@ -472,12 +468,6 @@ import com.cloud.event.ActionEventUtils; import com.cloud.event.EventTypes; import com.cloud.event.EventVO; import com.cloud.event.dao.EventDao; -import com.cloud.exception.ConcurrentOperationException; -import com.cloud.exception.InvalidParameterValueException; -import com.cloud.exception.OperationTimedoutException; -import com.cloud.exception.PermissionDeniedException; -import com.cloud.exception.ResourceUnavailableException; -import com.cloud.exception.StorageUnavailableException; import com.cloud.ha.HighAvailabilityManager; import com.cloud.host.DetailVO; import com.cloud.host.Host; @@ -571,17 +561,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.MacAddress; import com.cloud.utils.net.NetUtils; import com.cloud.utils.ssh.SSHKeysHelper; -import com.cloud.vm.ConsoleProxyVO; -import com.cloud.vm.DiskProfile; -import com.cloud.vm.InstanceGroupVO; -import com.cloud.vm.SecondaryStorageVmVO; -import com.cloud.vm.UserVmVO; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.State; -import com.cloud.vm.VirtualMachineManager; -import com.cloud.vm.VirtualMachineProfile; -import com.cloud.vm.VirtualMachineProfileImpl; import com.cloud.vm.dao.ConsoleProxyDao; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.InstanceGroupDao; @@ -717,6 +697,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Inject ConfigurationServer _configServer; + @Inject + UserVmManager _userVmMgr; private final ScheduledExecutorService _eventExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("EventChecker")); private final ScheduledExecutorService _alertExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("AlertChecker")); @@ -2917,6 +2899,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(ListAffinityGroupTypesCmd.class); cmdList.add(ListDeploymentPlannersCmd.class); cmdList.add(ReleaseHostReservationCmd.class); + cmdList.add(ScaleSystemVMCmd.class); cmdList.add(AddResourceDetailCmd.class); cmdList.add(RemoveResourceDetailCmd.class); cmdList.add(ListResourceDetailsCmd.class); @@ -4020,10 +4003,28 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } } + @Override + public VirtualMachine upgradeSystemVM(ScaleSystemVMCmd cmd) throws ResourceUnavailableException, ManagementServerException, VirtualMachineMigrationException, ConcurrentOperationException { + + boolean result = _userVmMgr.upgradeVirtualMachine(cmd.getId(), cmd.getServiceOfferingId()); + if(result){ + VirtualMachine vm = _vmInstanceDao.findById(cmd.getId()); + return vm; + }else{ + return null; + } + } + + @Override public VirtualMachine upgradeSystemVM(UpgradeSystemVMCmd cmd) { Long systemVmId = cmd.getId(); Long serviceOfferingId = cmd.getServiceOfferingId(); + return upgradeStoppedSystemVm(systemVmId, serviceOfferingId); + + } + + private VirtualMachine upgradeStoppedSystemVm(Long systemVmId, Long serviceOfferingId){ Account caller = UserContext.current().getCaller(); VMInstanceVO systemVm = _vmInstanceDao.findByIdTypes(systemVmId, VirtualMachine.Type.ConsoleProxy, VirtualMachine.Type.SecondaryStorageVm); diff --git a/server/src/com/cloud/vm/UserVmManager.java b/server/src/com/cloud/vm/UserVmManager.java index cc1fffd780b..0f8e36804bb 100755 --- a/server/src/com/cloud/vm/UserVmManager.java +++ b/server/src/com/cloud/vm/UserVmManager.java @@ -22,9 +22,7 @@ import java.util.Map; import com.cloud.agent.api.VmStatsEntry; import com.cloud.api.query.vo.UserVmJoinVO; -import com.cloud.exception.ConcurrentOperationException; -import com.cloud.exception.InsufficientCapacityException; -import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.*; import com.cloud.projects.Project.ListProjectResourcesCriteria; import com.cloud.server.Criteria; import com.cloud.user.Account; @@ -94,4 +92,6 @@ public interface UserVmManager extends VirtualMachineGuru, UserVmServi Pair> startVirtualMachine(long vmId, Long hostId, Map additionalParams) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException; + boolean upgradeVirtualMachine(Long id, Long serviceOfferingId) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException; + } diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index a3b731ab2a5..860daaf9a5e 100755 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -1076,11 +1076,22 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use @Override @ActionEvent(eventType = EventTypes.EVENT_VM_SCALE, eventDescription = "scaling Vm") - public boolean - upgradeVirtualMachine(ScaleVMCmd cmd) throws InvalidParameterValueException, ResourceAllocationException { + public UserVm + upgradeVirtualMachine(ScaleVMCmd cmd) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException{ Long vmId = cmd.getId(); Long newServiceOfferingId = cmd.getServiceOfferingId(); + boolean result = upgradeVirtualMachine(vmId, newServiceOfferingId); + if(result){ + return _vmDao.findById(vmId); + }else{ + return null; + } + + } + + @Override + public boolean upgradeVirtualMachine(Long vmId, Long newServiceOfferingId) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException{ Account caller = UserContext.current().getCaller(); // Verify input parameters @@ -1147,9 +1158,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } return success; - } + @Override public HashMap getVirtualMachineStatistics(long hostId, String hostName, List vmIds) throws CloudRuntimeException { diff --git a/server/test/com/cloud/vm/MockUserVmManagerImpl.java b/server/test/com/cloud/vm/MockUserVmManagerImpl.java index a88625a42fa..50a90f200c9 100644 --- a/server/test/com/cloud/vm/MockUserVmManagerImpl.java +++ b/server/test/com/cloud/vm/MockUserVmManagerImpl.java @@ -409,8 +409,8 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager, } @Override - public boolean upgradeVirtualMachine(ScaleVMCmd scaleVMCmd) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException { - return false; //To change body of implemented methods use File | Settings | File Templates. + public UserVm upgradeVirtualMachine(ScaleVMCmd scaleVMCmd) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException { + return null; //To change body of implemented methods use File | Settings | File Templates. } @@ -420,6 +420,11 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager, return null; } + @Override + public boolean upgradeVirtualMachine(Long id, Long serviceOfferingId) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException { + return false; //To change body of implemented methods use File | Settings | File Templates. + } + @Override public void prepareStop(VirtualMachineProfile profile) { // TODO Auto-generated method stub From b3e9b2a5dc0439cad60058d693cba9d3c714af70 Mon Sep 17 00:00:00 2001 From: radhikap Date: Fri, 17 May 2013 18:57:59 +0530 Subject: [PATCH 18/19] portable IP --- docs/en-US/elastic-ip.xml | 161 ++++++++++++++++++++------------------ 1 file changed, 87 insertions(+), 74 deletions(-) diff --git a/docs/en-US/elastic-ip.xml b/docs/en-US/elastic-ip.xml index 8ecbd75be70..672fc5aef0c 100644 --- a/docs/en-US/elastic-ip.xml +++ b/docs/en-US/elastic-ip.xml @@ -26,78 +26,91 @@ choice from the EIP pool of your account. Later if required you can reassign the IP address to a different VM. This feature is extremely helpful during VM failure. Instead of replacing the VM which is down, the IP address can be reassigned to a new VM in your account. - Similar to the public IP address, Elastic IP addresses are mapped to their associated - private IP addresses by using StaticNAT. The EIP service is equipped with StaticNAT (1:1) - service in an EIP-enabled basic zone. The default network offering, - DefaultSharedNetscalerEIPandELBNetworkOffering, provides your network with EIP and ELB network - services if a NetScaler device is deployed in your zone. Consider the following illustration for - more details. - - - - - - eip-ns-basiczone.png: Elastic IP in a NetScaler-enabled Basic Zone. - - - In the illustration, a NetScaler appliance is the default entry or exit point for the - &PRODUCT; instances, and firewall is the default entry or exit point for the rest of the data - center. Netscaler provides LB services and staticNAT service to the guest networks. The guest - traffic in the pods and the Management Server are on different subnets / VLANs. The policy-based - routing in the data center core switch sends the public traffic through the NetScaler, whereas - the rest of the data center goes through the firewall. - The EIP work flow is as follows: - - - When a user VM is deployed, a public IP is automatically acquired from the pool of - public IPs configured in the zone. This IP is owned by the VM's account. - - - Each VM will have its own private IP. When the user VM starts, Static NAT is provisioned - on the NetScaler device by using the Inbound Network Address Translation (INAT) and Reverse - NAT (RNAT) rules between the public IP and the private IP. - - Inbound NAT (INAT) is a type of NAT supported by NetScaler, in which the destination - IP address is replaced in the packets from the public network, such as the Internet, with - the private IP address of a VM in the private network. Reverse NAT (RNAT) is a type of NAT - supported by NetScaler, in which the source IP address is replaced in the packets - generated by a VM in the private network with the public IP address. - - - - This default public IP will be released in two cases: - - - When the VM is stopped. When the VM starts, it again receives a new public IP, not - necessarily the same one allocated initially, from the pool of Public IPs. - - - The user acquires a public IP (Elastic IP). This public IP is associated with the - account, but will not be mapped to any private IP. However, the user can enable Static - NAT to associate this IP to the private IP of a VM in the account. The Static NAT rule - for the public IP can be disabled at any time. When Static NAT is disabled, a new public - IP is allocated from the pool, which is not necessarily be the same one allocated - initially. - - - - - For the deployments where public IPs are limited resources, you have the flexibility to - choose not to allocate a public IP by default. You can use the Associate Public IP option to - turn on or off the automatic public IP assignment in the EIP-enabled Basic zones. If you turn - off the automatic public IP assignment while creating a network offering, only a private IP is - assigned to a VM when the VM is deployed with that network offering. Later, the user can acquire - an IP for the VM and enable static NAT. - For more information on the Associate Public IP option, see . - For more information on the Associate Public IP option, see the - Administration Guide. - - The Associate Public IP feature is designed only for use with user VMs. The System VMs - continue to get both public IP and private by default, irrespective of the network offering - configuration. - - New deployments which use the default shared network offering with EIP and ELB services to - create a shared network in the Basic zone will continue allocating public IPs to each user - VM. +
+ Elastic IPs in Basic Zone + Similar to the public IP address, Elastic IP addresses are mapped to their associated + private IP addresses by using StaticNAT. The EIP service is equipped with StaticNAT (1:1) + service in an EIP-enabled basic zone. The default network offering, + DefaultSharedNetscalerEIPandELBNetworkOffering, provides your network with EIP and ELB network + services if a NetScaler device is deployed in your zone. Consider the following illustration + for more details. + + + + + + eip-ns-basiczone.png: Elastic IP in a NetScaler-enabled Basic Zone. + + + In the illustration, a NetScaler appliance is the default entry or exit point for the + &PRODUCT; instances, and firewall is the default entry or exit point for the rest of the data + center. Netscaler provides LB services and staticNAT service to the guest networks. The guest + traffic in the pods and the Management Server are on different subnets / VLANs. The + policy-based routing in the data center core switch sends the public traffic through the + NetScaler, whereas the rest of the data center goes through the firewall. + The EIP work flow is as follows: + + + When a user VM is deployed, a public IP is automatically acquired from the pool of + public IPs configured in the zone. This IP is owned by the VM's account. + + + Each VM will have its own private IP. When the user VM starts, Static NAT is + provisioned on the NetScaler device by using the Inbound Network Address Translation + (INAT) and Reverse NAT (RNAT) rules between the public IP and the private IP. + + Inbound NAT (INAT) is a type of NAT supported by NetScaler, in which the destination + IP address is replaced in the packets from the public network, such as the Internet, + with the private IP address of a VM in the private network. Reverse NAT (RNAT) is a type + of NAT supported by NetScaler, in which the source IP address is replaced in the packets + generated by a VM in the private network with the public IP address. + + + + This default public IP will be released in two cases: + + + When the VM is stopped. When the VM starts, it again receives a new public IP, not + necessarily the same one allocated initially, from the pool of Public IPs. + + + The user acquires a public IP (Elastic IP). This public IP is associated with the + account, but will not be mapped to any private IP. However, the user can enable Static + NAT to associate this IP to the private IP of a VM in the account. The Static NAT rule + for the public IP can be disabled at any time. When Static NAT is disabled, a new + public IP is allocated from the pool, which is not necessarily be the same one + allocated initially. + + + + + For the deployments where public IPs are limited resources, you have the flexibility to + choose not to allocate a public IP by default. You can use the Associate Public IP option to + turn on or off the automatic public IP assignment in the EIP-enabled Basic zones. If you turn + off the automatic public IP assignment while creating a network offering, only a private IP is + assigned to a VM when the VM is deployed with that network offering. Later, the user can + acquire an IP for the VM and enable static NAT. + For more information on the Associate Public IP option, see . + For more information on the Associate Public IP option, see the + Administration Guide. + + The Associate Public IP feature is designed only for use with user VMs. The System VMs + continue to get both public IP and private by default, irrespective of the network offering + configuration. + + New deployments which use the default shared network offering with EIP and ELB services to + create a shared network in the Basic zone will continue allocating public IPs to each user + VM. +
+
+ About Portable IP + Portable IPs in &PRODUCT; are nothing but elastic IPs that can be transferred across + geographically separated zones. As an administrator, you can provision a pool of portable IPs + at region level and are available for user consumption. The users can acquire portable IPs if + admin has provisioned portable public IPs at the region level they are part of. These IPs can + be use for any service within an advanced zone. You can also use portable IPs for EIP service + in basic zones. Additionally, a portable IP can be transferred from one network to another + network. +
From 5646f5e9772380cc91a8ccb9894c8ed05ffc891f Mon Sep 17 00:00:00 2001 From: Wido den Hollander Date: Fri, 17 May 2013 17:03:21 +0200 Subject: [PATCH 19/19] rbd: Allow RBD disks to be attached to a Instance --- .../hypervisor/kvm/resource/LibvirtComputingResource.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index c3140d3921a..b31fb5dfbe5 100755 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -3581,6 +3581,7 @@ ServerResource { List disks = null; Domain dm = null; DiskDef diskdef = null; + KVMStoragePool attachingPool = attachingDisk.getPool(); try { if (!attach) { dm = conn.domainLookupByUUID(UUID.nameUUIDFromBytes(vmName @@ -3605,7 +3606,12 @@ ServerResource { } } else { diskdef = new DiskDef(); - if (attachingDisk.getFormat() == PhysicalDiskFormat.QCOW2) { + if (attachingPool.getType() == StoragePoolType.RBD) { + diskdef.defNetworkBasedDisk(attachingDisk.getPath(), + attachingPool.getSourceHost(), attachingPool.getSourcePort(), + attachingPool.getAuthUserName(), attachingPool.getUuid(), devId, + DiskDef.diskBus.VIRTIO, diskProtocol.RBD); + } else if (attachingDisk.getFormat() == PhysicalDiskFormat.QCOW2) { diskdef.defFileBasedDisk(attachingDisk.getPath(), devId, DiskDef.diskBus.VIRTIO, DiskDef.diskFmtType.QCOW2); } else if (attachingDisk.getFormat() == PhysicalDiskFormat.RAW) {