diff --git a/LICENSE b/LICENSE index eda958726f8..00afdb41fd6 100644 --- a/LICENSE +++ b/LICENSE @@ -370,7 +370,7 @@ Within the scripts/vm/hypervisor/xenserver directory from OpenStack, LLC http://www.openstack.org swift -Within the tools/appliance/definitions/systemvmtemplate directory +Within the tools/appliance/definitions/systemvmtemplate and tools/appliance/definitions/systemvmtemplate64 directory licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) Copyright (c) 2010-2012 Patrick Debois @@ -471,6 +471,31 @@ Within the ui/lib directory from George McGinley Smith jquery.easing.js + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) + + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + from The Dojo Foundation http://dojofoundation.org/ + require.js from http://github.com/jrburke/requirejs + licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) Copyright (c) 2011, John Resig diff --git a/agent/conf/agent.properties b/agent/conf/agent.properties index 74cfd1c21d6..f7eac674712 100644 --- a/agent/conf/agent.properties +++ b/agent/conf/agent.properties @@ -79,4 +79,4 @@ domr.scripts.dir=scripts/network/domr/kvm # be overridden here. # native = com.cloud.hypervisor.kvm.resource.BridgeVifDriver # openvswitch = com.cloud.hypervisor.kvm.resource.OvsBridgeDriver -#libvirt.vif.driver=com.cloud.hypervisor.kvm.resource.BridgeVifDriver \ No newline at end of file +#libvirt.vif.driver=com.cloud.hypervisor.kvm.resource.BridgeVifDriver diff --git a/agent/pom.xml b/agent/pom.xml index 810f33fc572..0f44c1aa297 100644 --- a/agent/pom.xml +++ b/agent/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT @@ -35,8 +35,6 @@ org.apache.cloudstack cloud-utils ${project.version} - tests - test @@ -83,7 +81,7 @@ + value="${cs.replace.properties}" /> @@ -97,7 +95,7 @@ + value="${cs.replace.properties}" /> @@ -106,6 +104,24 @@ + + org.apache.maven.plugins + maven-dependency-plugin + 2.5.1 + + + copy-dependencies + package + + copy-dependencies + + + ${project.build.directory}/dependencies + runtime + + + + diff --git a/agent/src/com/cloud/agent/AgentShell.java b/agent/src/com/cloud/agent/AgentShell.java index 0e020935e90..2af08e9bf21 100644 --- a/agent/src/com/cloud/agent/AgentShell.java +++ b/agent/src/com/cloud/agent/AgentShell.java @@ -38,16 +38,20 @@ import java.util.UUID; import javax.naming.ConfigurationException; +import org.apache.commons.beanutils.PropertyUtils; import org.apache.commons.httpclient.HttpClient; import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager; import org.apache.commons.httpclient.methods.GetMethod; import org.apache.log4j.Logger; +import org.apache.log4j.PropertyConfigurator; +import org.apache.log4j.xml.DOMConfigurator; import com.cloud.agent.Agent.ExitStatus; import com.cloud.agent.dao.StorageComponent; import com.cloud.agent.dao.impl.PropertiesStorage; import com.cloud.host.Host; import com.cloud.resource.ServerResource; +import com.cloud.utils.LogUtils; import com.cloud.utils.NumbersUtil; import com.cloud.utils.ProcessUtil; import com.cloud.utils.PropertiesUtil; @@ -376,6 +380,18 @@ public class AgentShell implements IAgentShell { public void init(String[] args) throws ConfigurationException { + // PropertiesUtil is used both in management server and agent packages, + // it searches path under class path and common J2EE containers + // For KVM agent, do it specially here + + File file = new File("/etc/cloudstack/agent/log4j-cloud.xml"); + if(file == null || !file.exists()) { + file = PropertiesUtil.findConfigFile("log4j-cloud.xml"); + } + DOMConfigurator.configureAndWatch(file.getAbsolutePath()); + + s_logger.info("Agent started"); + final Class c = this.getClass(); _version = c.getPackage().getImplementationVersion(); if (_version == null) { @@ -552,6 +568,9 @@ public class AgentShell implements IAgentShell { public void start() { try { + /* By default we only search for log4j.xml */ + LogUtils.initLog4j("log4j-cloud.xml"); + System.setProperty("java.net.preferIPv4Stack", "true"); String instance = getProperty(null, "instance"); diff --git a/agent/src/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java b/agent/src/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java index 8a3a271c4f4..516430b2fed 100644 --- a/agent/src/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java +++ b/agent/src/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java @@ -26,6 +26,7 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.net.URL; import java.net.URLConnection; +import java.util.HashMap; import java.util.Map; import java.util.Properties; @@ -77,7 +78,7 @@ import com.google.gson.Gson; * server. * */ -public abstract class ConsoleProxyResource extends ServerResourceBase implements +public class ConsoleProxyResource extends ServerResourceBase implements ServerResource { static final Logger s_logger = Logger.getLogger(ConsoleProxyResource.class); @@ -489,4 +490,26 @@ public abstract class ConsoleProxyResource extends ServerResourceBase implements } } } + + @Override + public void setName(String name) { + } + + @Override + public void setConfigParams(Map params) { + } + + @Override + public Map getConfigParams() { + return new HashMap(); + } + + @Override + public int getRunLevel() { + return 0; + } + + @Override + public void setRunLevel(int level) { + } } diff --git a/api/pom.xml b/api/pom.xml index 7461c67aaa2..8ca258f12e3 100644 --- a/api/pom.xml +++ b/api/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT diff --git a/api/src/com/cloud/agent/api/CreateVolumeFromSnapshotCommand.java b/api/src/com/cloud/agent/api/CreateVolumeFromSnapshotCommand.java index a19d34436f7..fbf6121f543 100644 --- a/api/src/com/cloud/agent/api/CreateVolumeFromSnapshotCommand.java +++ b/api/src/com/cloud/agent/api/CreateVolumeFromSnapshotCommand.java @@ -18,6 +18,7 @@ package com.cloud.agent.api; import com.cloud.storage.StoragePool; + /** * This currently assumes that both primary and secondary storage are mounted on the XenServer. */ diff --git a/server/src/org/apache/cloudstack/region/dao/RegionSyncDao.java b/api/src/com/cloud/agent/api/ScaleVmAnswer.java similarity index 77% rename from server/src/org/apache/cloudstack/region/dao/RegionSyncDao.java rename to api/src/com/cloud/agent/api/ScaleVmAnswer.java index df287e51e32..31d6e44155f 100644 --- a/server/src/org/apache/cloudstack/region/dao/RegionSyncDao.java +++ b/api/src/com/cloud/agent/api/ScaleVmAnswer.java @@ -14,11 +14,15 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package org.apache.cloudstack.region.dao; +package com.cloud.agent.api; -import org.apache.cloudstack.region.RegionSyncVO; +public class ScaleVmAnswer extends Answer { -import com.cloud.utils.db.GenericDao; + protected ScaleVmAnswer() { + } + + public ScaleVmAnswer(ScaleVmCommand cmd, boolean result, String detail) { + super(cmd, result, detail); + } -public interface RegionSyncDao extends GenericDao { } diff --git a/api/src/com/cloud/agent/api/ScaleVmCommand.java b/api/src/com/cloud/agent/api/ScaleVmCommand.java new file mode 100644 index 00000000000..e5078d5e8c0 --- /dev/null +++ b/api/src/com/cloud/agent/api/ScaleVmCommand.java @@ -0,0 +1,113 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api; + +import com.cloud.agent.api.to.VirtualMachineTO; + +public class ScaleVmCommand extends Command { + + VirtualMachineTO vm; + String vmName; + int cpus; + Integer speed; + long minRam; + long maxRam; + + public VirtualMachineTO getVm() { + return vm; + } + + public void setVm(VirtualMachineTO vm) { + this.vm = vm; + } + + public int getCpus() { + return cpus; + } + + public ScaleVmCommand(String vmName, int cpus, + Integer speed, long minRam, long maxRam) { + super(); + this.vmName = vmName; + this.cpus = cpus; + //this.speed = speed; + this.minRam = minRam; + this.maxRam = maxRam; + this.vm = new VirtualMachineTO(1L, vmName, null, cpus, null, minRam, maxRam, null, null, false, false, null); + /*vm.setName(vmName); + vm.setCpus(cpus); + vm.setRam(minRam, maxRam);*/ + } + + public void setCpus(int cpus) { + this.cpus = cpus; + } + + public Integer getSpeed() { + return speed; + } + + public void setSpeed(Integer speed) { + this.speed = speed; + } + + public long getMinRam() { + return minRam; + } + + public void setMinRam(long minRam) { + this.minRam = minRam; + } + + public long getMaxRam() { + return maxRam; + } + + public void setMaxRam(long maxRam) { + this.maxRam = maxRam; + } + + public String getVmName() { + return vmName; + } + + public void setVmName(String vmName) { + this.vmName = vmName; + } + + public VirtualMachineTO getVirtualMachine() { + return vm; + } + + @Override + public boolean executeInSequence() { + return true; + } + + protected ScaleVmCommand() { + } + + public ScaleVmCommand(VirtualMachineTO vm) { + this.vm = vm; + } + + public boolean getLimitCpuUse() { + // TODO Auto-generated method stub + return false; + } + +} diff --git a/api/src/com/cloud/agent/api/SecurityGroupRulesCmd.java b/api/src/com/cloud/agent/api/SecurityGroupRulesCmd.java index affad1f9b9d..4336b4c32b4 100644 --- a/api/src/com/cloud/agent/api/SecurityGroupRulesCmd.java +++ b/api/src/com/cloud/agent/api/SecurityGroupRulesCmd.java @@ -18,6 +18,7 @@ package com.cloud.agent.api; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.util.List; import java.util.zip.DeflaterOutputStream; import org.apache.commons.codec.binary.Base64; @@ -80,6 +81,7 @@ public class SecurityGroupRulesCmd extends Command { Long msId; IpPortAndProto [] ingressRuleSet; IpPortAndProto [] egressRuleSet; + private List secIps; public SecurityGroupRulesCmd() { super(); @@ -103,6 +105,23 @@ public class SecurityGroupRulesCmd extends Command { } + public SecurityGroupRulesCmd(String guestIp, String guestMac, String vmName, Long vmId, String signature, Long seqNum, IpPortAndProto[] ingressRuleSet, IpPortAndProto[] egressRuleSet, List secIps) { + super(); + this.guestIp = guestIp; + this.vmName = vmName; + this.ingressRuleSet = ingressRuleSet; + this.egressRuleSet = egressRuleSet; + this.guestMac = guestMac; + this.signature = signature; + this.seqNum = seqNum; + this.vmId = vmId; + if (signature == null) { + String stringified = stringifyRules(); + this.signature = DigestUtils.md5Hex(stringified); + } + this.secIps = secIps; + } + @Override public boolean executeInSequence() { return true; @@ -131,6 +150,10 @@ public class SecurityGroupRulesCmd extends Command { return guestIp; } + public List getSecIps() { + return secIps; + } + public String getVmName() { return vmName; @@ -165,6 +188,20 @@ public class SecurityGroupRulesCmd extends Command { } + public String getSecIpsString() { + StringBuilder sb = new StringBuilder(); + List ips = getSecIps(); + if (ips == null) { + return "0:"; + } else { + for (String ip : ips) { + sb.append(ip).append(":"); + } + } + return sb.toString(); + } + + public String stringifyCompressedRules() { StringBuilder ruleBuilder = new StringBuilder(); for (SecurityGroupRulesCmd.IpPortAndProto ipPandP : getIngressRuleSet()) { diff --git a/api/src/com/cloud/agent/api/routing/DhcpEntryCommand.java b/api/src/com/cloud/agent/api/routing/DhcpEntryCommand.java index f0ce70e9a80..fd8d84c8c3a 100644 --- a/api/src/com/cloud/agent/api/routing/DhcpEntryCommand.java +++ b/api/src/com/cloud/agent/api/routing/DhcpEntryCommand.java @@ -31,6 +31,7 @@ public class DhcpEntryCommand extends NetworkElementCommand { String vmIp6Address; String ip6Gateway; String duid; + private boolean isDefault; protected DhcpEntryCommand() { @@ -46,6 +47,7 @@ public class DhcpEntryCommand extends NetworkElementCommand { this.vmIpAddress = vmIpAddress; this.vmName = vmName; this.vmIp6Address = vmIp6Address; + this.setDefault(true); } public DhcpEntryCommand(String vmMac, String vmIpAddress, String vmName, String vmIp6Address, String dns, String gateway, String ip6Gateway) { @@ -129,4 +131,12 @@ public class DhcpEntryCommand extends NetworkElementCommand { public void setVmIp6Address(String ip6Address) { this.vmIp6Address = ip6Address; } + + public boolean isDefault() { + return isDefault; + } + + public void setDefault(boolean isDefault) { + this.isDefault = isDefault; + } } diff --git a/api/src/com/cloud/agent/api/to/NicTO.java b/api/src/com/cloud/agent/api/to/NicTO.java index aa2aa19cc19..ccebe115f97 100644 --- a/api/src/com/cloud/agent/api/to/NicTO.java +++ b/api/src/com/cloud/agent/api/to/NicTO.java @@ -16,12 +16,15 @@ // under the License. package com.cloud.agent.api.to; +import java.util.List; + public class NicTO extends NetworkTO { int deviceId; Integer networkRateMbps; Integer networkRateMulticastMbps; boolean defaultNic; String uuid; + List nicSecIps; public NicTO() { super(); @@ -69,4 +72,12 @@ public class NicTO extends NetworkTO { public String toString() { return new StringBuilder("[Nic:").append(type).append("-").append(ip).append("-").append(broadcastUri).append("]").toString(); } + + public void setNicSecIps(List secIps) { + this.nicSecIps = secIps; + } + + public List getNicSecIps() { + return nicSecIps; + } } diff --git a/api/src/com/cloud/agent/api/to/VirtualMachineTO.java b/api/src/com/cloud/agent/api/to/VirtualMachineTO.java index 8f3f0eb39d6..b84d20a9239 100644 --- a/api/src/com/cloud/agent/api/to/VirtualMachineTO.java +++ b/api/src/com/cloud/agent/api/to/VirtualMachineTO.java @@ -28,7 +28,20 @@ public class VirtualMachineTO { private BootloaderType bootloader; Type type; int cpus; + + /** + 'speed' is still here since 4.0.X/4.1.X management servers do not support + the overcommit feature yet. + + The overcommit feature sends minSpeed and maxSpeed + + So this is here for backwards compatibility with 4.0.X/4.1.X management servers + and newer agents. + */ Integer speed; + Integer minSpeed; + Integer maxSpeed; + long minRam; long maxRam; String hostName; @@ -62,6 +75,22 @@ public class VirtualMachineTO { this.vncPassword = vncPassword; } + public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer minSpeed, Integer maxSpeed, long minRam, long maxRam, BootloaderType bootloader, String os, boolean enableHA, boolean limitCpuUse, String vncPassword) { + this.id = id; + this.name = instanceName; + this.type = type; + this.cpus = cpus; + this.minSpeed = minSpeed; + this.maxSpeed = maxSpeed; + this.minRam = minRam; + this.maxRam = maxRam; + this.bootloader = bootloader; + this.os = os; + this.enableHA = enableHA; + this.limitCpuUse = limitCpuUse; + this.vncPassword = vncPassword; + } + protected VirtualMachineTO() { } @@ -105,6 +134,13 @@ public class VirtualMachineTO { return speed; } + public Integer getMinSpeed() { + return minSpeed; + } + + public Integer getMaxSpeed() { + return maxSpeed; + } public boolean getLimitCpuUse() { return limitCpuUse; } diff --git a/api/src/com/cloud/alert/Alert.java b/api/src/com/cloud/alert/Alert.java index 050f97f2ef3..31768cf193d 100644 --- a/api/src/com/cloud/alert/Alert.java +++ b/api/src/com/cloud/alert/Alert.java @@ -30,4 +30,5 @@ public interface Alert extends Identity, InternalIdentity { Date getCreatedDate(); Date getLastSent(); Date getResolved(); + boolean getArchived(); } diff --git a/api/src/com/cloud/configuration/ConfigurationService.java b/api/src/com/cloud/configuration/ConfigurationService.java index a9595fe7574..e63fcece525 100644 --- a/api/src/com/cloud/configuration/ConfigurationService.java +++ b/api/src/com/cloud/configuration/ConfigurationService.java @@ -264,6 +264,8 @@ public interface ConfigurationService { boolean removeLDAP(LDAPRemoveCmd cmd); + LDAPConfigCmd listLDAPConfig(LDAPConfigCmd cmd); + /** * @param offering * @return diff --git a/api/src/com/cloud/dc/DataCenter.java b/api/src/com/cloud/dc/DataCenter.java index 0c77c670dd1..5f32988da50 100644 --- a/api/src/com/cloud/dc/DataCenter.java +++ b/api/src/com/cloud/dc/DataCenter.java @@ -37,6 +37,10 @@ public interface DataCenter extends InfrastructureEntity, Grouping, Identity, In String getDns2(); + String getIp6Dns1(); + + String getIp6Dns2(); + String getGuestNetworkCidr(); String getName(); diff --git a/api/src/com/cloud/event/Event.java b/api/src/com/cloud/event/Event.java index 1a61636828a..b8def4c6281 100644 --- a/api/src/com/cloud/event/Event.java +++ b/api/src/com/cloud/event/Event.java @@ -40,4 +40,5 @@ public interface Event extends ControlledEntity, Identity, InternalIdentity { String getLevel(); long getStartId(); String getParameters(); + boolean getArchived(); } diff --git a/api/src/com/cloud/exception/PermissionDeniedException.java b/api/src/com/cloud/exception/PermissionDeniedException.java index b95d49b662a..718de0df7a9 100644 --- a/api/src/com/cloud/exception/PermissionDeniedException.java +++ b/api/src/com/cloud/exception/PermissionDeniedException.java @@ -32,6 +32,10 @@ public class PermissionDeniedException extends CloudRuntimeException { super(message); } + public PermissionDeniedException(String message, Throwable cause){ + super(message, cause); + } + protected PermissionDeniedException() { super(); } diff --git a/api/src/com/cloud/exception/RequestLimitException.java b/api/src/com/cloud/exception/RequestLimitException.java index 0142f8e8726..4d7504ed66d 100644 --- a/api/src/com/cloud/exception/RequestLimitException.java +++ b/api/src/com/cloud/exception/RequestLimitException.java @@ -17,14 +17,12 @@ package com.cloud.exception; import com.cloud.utils.SerialVersionUID; -import com.cloud.utils.exception.CloudRuntimeException; /** * Exception thrown if number of requests is over api rate limit set. - * @author minc * */ -public class RequestLimitException extends CloudRuntimeException { +public class RequestLimitException extends PermissionDeniedException { private static final long serialVersionUID = SerialVersionUID.AccountLimitException; diff --git a/api/src/com/cloud/hypervisor/HypervisorCapabilities.java b/api/src/com/cloud/hypervisor/HypervisorCapabilities.java index d52c36b12f5..aff81b0018d 100644 --- a/api/src/com/cloud/hypervisor/HypervisorCapabilities.java +++ b/api/src/com/cloud/hypervisor/HypervisorCapabilities.java @@ -46,4 +46,10 @@ public interface HypervisorCapabilities extends Identity, InternalIdentity{ * @return the max. data volumes per VM supported by hypervisor */ Integer getMaxDataVolumesLimit(); + + /** + * @return the max. hosts per cluster supported by hypervisor + */ + Integer getMaxHostsPerCluster(); + } diff --git a/api/src/com/cloud/network/IpAddress.java b/api/src/com/cloud/network/IpAddress.java index 47df4d6523b..fce8f38c2f2 100644 --- a/api/src/com/cloud/network/IpAddress.java +++ b/api/src/com/cloud/network/IpAddress.java @@ -87,4 +87,7 @@ public interface IpAddress extends ControlledEntity, Identity, InternalIdentity * @param vpcId */ void setVpcId(Long vpcId); + String getVmIp(); + void setVmIp(String vmIp); + } diff --git a/api/src/com/cloud/network/Network.java b/api/src/com/cloud/network/Network.java index 2bf7b7f8000..efed5cd4f8b 100644 --- a/api/src/com/cloud/network/Network.java +++ b/api/src/com/cloud/network/Network.java @@ -283,12 +283,21 @@ public interface Network extends ControlledEntity, StateObject, I String getGateway(); + // "cidr" is the Cloudstack managed address space, all CloudStack managed vms get IP address from "cidr", + // In general "cidr" also serves as the network CIDR + // But in case IP reservation is configured for a Guest network, "networkcidr" is the Effective network CIDR for that network, + // "cidr" will still continue to be the effective address space for CloudStack managed vms in that Guest network String getCidr(); + // "networkcidr" is the network CIDR of the guest network which uses IP reservation. + // It is the summation of "cidr" and the reservedIPrange(the address space used for non CloudStack purposes). + // For networks not configured with IP reservation, "networkcidr" is always null + String getNetworkCidr(); + String getIp6Gateway(); - + String getIp6Cidr(); - + long getDataCenterId(); long getNetworkOfferingId(); diff --git a/api/src/com/cloud/network/NetworkModel.java b/api/src/com/cloud/network/NetworkModel.java index 783e5cc9c85..9731a61667d 100644 --- a/api/src/com/cloud/network/NetworkModel.java +++ b/api/src/com/cloud/network/NetworkModel.java @@ -255,4 +255,6 @@ public interface NetworkModel { boolean isIP6AddressAvailableInVlan(long vlanId); void checkIp6Parameters(String startIPv6, String endIPv6, String ip6Gateway, String ip6Cidr) throws InvalidParameterValueException; + + void checkRequestedIpAddresses(long networkId, String ip4, String ip6) throws InvalidParameterValueException; } \ No newline at end of file diff --git a/api/src/com/cloud/network/NetworkProfile.java b/api/src/com/cloud/network/NetworkProfile.java index 37d46ac8395..2f56645139c 100644 --- a/api/src/com/cloud/network/NetworkProfile.java +++ b/api/src/com/cloud/network/NetworkProfile.java @@ -38,6 +38,7 @@ public class NetworkProfile implements Network { private TrafficType trafficType; private String gateway; private String cidr; + private String networkCidr; private String ip6Gateway; private String ip6Cidr; private long networkOfferingId; @@ -65,6 +66,7 @@ public class NetworkProfile implements Network { this.trafficType = network.getTrafficType(); this.gateway = network.getGateway(); this.cidr = network.getCidr(); + this.networkCidr = network.getNetworkCidr(); this.ip6Gateway = network.getIp6Gateway(); this.ip6Cidr = network.getIp6Cidr(); this.networkOfferingId = network.getNetworkOfferingId(); @@ -162,6 +164,11 @@ public class NetworkProfile implements Network { return cidr; } + @Override + public String getNetworkCidr() { + return networkCidr; + } + @Override public long getNetworkOfferingId() { return networkOfferingId; diff --git a/api/src/com/cloud/network/NetworkService.java b/api/src/com/cloud/network/NetworkService.java index ace1bb6c45e..ab6d7bfd882 100755 --- a/api/src/com/cloud/network/NetworkService.java +++ b/api/src/com/cloud/network/NetworkService.java @@ -19,9 +19,10 @@ package com.cloud.network; import java.util.List; import org.apache.cloudstack.api.command.admin.usage.ListTrafficTypeImplementorsCmd; +import org.apache.cloudstack.api.command.user.network.RestartNetworkCmd; import org.apache.cloudstack.api.command.user.network.CreateNetworkCmd; import org.apache.cloudstack.api.command.user.network.ListNetworksCmd; -import org.apache.cloudstack.api.command.user.network.RestartNetworkCmd; +import org.apache.cloudstack.api.command.user.vm.ListNicsCmd; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientAddressCapacityException; @@ -33,6 +34,8 @@ import com.cloud.network.Networks.TrafficType; import com.cloud.user.Account; import com.cloud.user.User; import com.cloud.utils.Pair; +import com.cloud.vm.Nic; +import com.cloud.vm.NicSecondaryIp; /** * The NetworkService interface is the "public" api to entities that make requests to the orchestration engine @@ -66,10 +69,8 @@ public interface NetworkService { IpAddress getIp(long id); - Network updateGuestNetwork(long networkId, String name, String displayText, Account callerAccount, User callerUser, - String domainSuffix, Long networkOfferingId, Boolean changeCidr); - + String domainSuffix, Long networkOfferingId, Boolean changeCidr, String guestVmCidr); PhysicalNetwork createPhysicalNetwork(Long zoneId, String vnetRange, String networkSpeed, List isolationMethods, String broadcastDomainRange, Long domainId, List tags, String name); @@ -155,5 +156,13 @@ public interface NetworkService { Network createPrivateNetwork(String networkName, String displayText, long physicalNetworkId, String vlan, String startIp, String endIP, String gateway, String netmask, long networkOwnerId, Long vpcId) throws ResourceAllocationException, ConcurrentOperationException, InsufficientCapacityException; - + + /* Requests an IP address for the guest nic */ + String allocateSecondaryGuestIP(Account account, long zoneId, Long nicId, + Long networkId, String ipaddress) throws InsufficientAddressCapacityException; + + boolean releaseSecondaryIpFromNic(long ipAddressId); + + /* lists the nic informaton */ + List listNics(ListNicsCmd listNicsCmd); } diff --git a/core/src/com/cloud/resource/DiskPreparer.java b/api/src/com/cloud/network/TrafficLabel.java similarity index 52% rename from core/src/com/cloud/resource/DiskPreparer.java rename to api/src/com/cloud/network/TrafficLabel.java index 77b8f7c1b7f..782df14b7dc 100644 --- a/core/src/com/cloud/resource/DiskPreparer.java +++ b/api/src/com/cloud/network/TrafficLabel.java @@ -14,29 +14,23 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package com.cloud.resource; +package com.cloud.network; -import com.cloud.storage.VolumeVO; -import com.cloud.template.VirtualMachineTemplate.BootloaderType; -import com.cloud.utils.component.Adapter; +import com.cloud.network.Networks.TrafficType; -/** - * DiskMounter mounts and unmounts disk for VMs - * to consume. - * +/* User can provide a Label, while configuring a zone, to specify + * a physical network that is to be used for a traffic type defined + * by CloudStack. See the enum data type TrafficType. This label is + * called Traffic label. This might encapsulate physical network + * specific properties like VLAN ID, name of virtual network object or more. + * The name of virtual network object is dependent on type of hypervisor. + * For example it is name of xenserver bridge in case of XenServer and + * name of virtual switch in case of VMware hypervisor */ -public interface DiskPreparer extends Adapter { - /** - * Mounts a volumeVO and returns a path. - * - * @param vol - * @return - */ - public String mount(String vmName, VolumeVO vol, BootloaderType type); - - /** - * Unmounts - */ - public boolean unmount(String path); +public interface TrafficLabel { + + public TrafficType getTrafficType(); + + public String getNetworkLabel(); } diff --git a/api/src/com/cloud/network/rules/RulesService.java b/api/src/com/cloud/network/rules/RulesService.java index 921a86e865f..d47b38f9d43 100644 --- a/api/src/com/cloud/network/rules/RulesService.java +++ b/api/src/com/cloud/network/rules/RulesService.java @@ -25,6 +25,7 @@ import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.user.Account; import com.cloud.utils.Pair; +import com.cloud.utils.net.Ip; public interface RulesService { Pair, Integer> searchStaticNatRules(Long ipId, Long id, Long vmId, Long start, Long size, String accountName, Long domainId, Long projectId, boolean isRecursive, boolean listAll); @@ -43,7 +44,7 @@ public interface RulesService { * @throws NetworkRuleConflictException * if conflicts in the network rules are detected. */ - PortForwardingRule createPortForwardingRule(PortForwardingRule rule, Long vmId, boolean openFirewall) throws NetworkRuleConflictException; + PortForwardingRule createPortForwardingRule(PortForwardingRule rule, Long vmId, Ip vmIp, boolean openFirewall) throws NetworkRuleConflictException; /** * Revokes a port forwarding rule @@ -66,7 +67,7 @@ public interface RulesService { boolean applyPortForwardingRules(long ipAdddressId, Account caller) throws ResourceUnavailableException; - boolean enableStaticNat(long ipAddressId, long vmId, long networkId, boolean isSystemVm) throws NetworkRuleConflictException, ResourceUnavailableException; + boolean enableStaticNat(long ipAddressId, long vmId, long networkId, boolean isSystemVm, String vmGuestIp) throws NetworkRuleConflictException, ResourceUnavailableException; PortForwardingRule getPortForwardigRule(long ruleId); diff --git a/api/src/com/cloud/network/security/SecurityGroupService.java b/api/src/com/cloud/network/security/SecurityGroupService.java index c6480323780..397de1ccb46 100644 --- a/api/src/com/cloud/network/security/SecurityGroupService.java +++ b/api/src/com/cloud/network/security/SecurityGroupService.java @@ -24,6 +24,7 @@ import org.apache.cloudstack.api.command.user.securitygroup.CreateSecurityGroupC import org.apache.cloudstack.api.command.user.securitygroup.DeleteSecurityGroupCmd; import org.apache.cloudstack.api.command.user.securitygroup.RevokeSecurityGroupEgressCmd; import org.apache.cloudstack.api.command.user.securitygroup.RevokeSecurityGroupIngressCmd; +import org.apache.cloudstack.api.command.user.vm.AddIpToVmNicCmd; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; @@ -45,5 +46,6 @@ public interface SecurityGroupService { public List authorizeSecurityGroupIngress(AuthorizeSecurityGroupIngressCmd cmd); public List authorizeSecurityGroupEgress(AuthorizeSecurityGroupEgressCmd cmd); - + public boolean securityGroupRulesForVmSecIp(Long nicId, Long networkId, + String secondaryIp, boolean ruleAction); } diff --git a/api/src/com/cloud/network/vpc/VpcService.java b/api/src/com/cloud/network/vpc/VpcService.java index cc66b58fe64..9bf1beea5f0 100644 --- a/api/src/com/cloud/network/vpc/VpcService.java +++ b/api/src/com/cloud/network/vpc/VpcService.java @@ -41,7 +41,7 @@ public interface VpcService { public VpcOffering getVpcOffering(long vpcOfferingId); - public VpcOffering createVpcOffering(String name, String displayText, List supportedServices); + public VpcOffering createVpcOffering(String name, String displayText, List supportedServices, Map> serviceProviders); public Vpc getVpc(long vpcId); @@ -246,5 +246,5 @@ public interface VpcService { InsufficientAddressCapacityException, ConcurrentOperationException; public Network updateVpcGuestNetwork(long networkId, String name, String displayText, Account callerAccount, - User callerUser, String domainSuffix, Long ntwkOffId, Boolean changeCidr); + User callerUser, String domainSuffix, Long ntwkOffId, Boolean changeCidr, String guestVmCidr); } diff --git a/api/src/com/cloud/offering/ServiceOffering.java b/api/src/com/cloud/offering/ServiceOffering.java index 4d715898a75..d6c215f42f0 100755 --- a/api/src/com/cloud/offering/ServiceOffering.java +++ b/api/src/com/cloud/offering/ServiceOffering.java @@ -77,6 +77,11 @@ public interface ServiceOffering extends InfrastructureEntity, InternalIdentity, */ boolean getLimitCpuUse(); + /** + * @return Does this service plan support Volatile VM that is, discard VM's root disk and create a new one on reboot? + */ + boolean getVolatileVm(); + /** * @return the rate in megabits per sec to which a VM's network interface is throttled to */ diff --git a/api/src/com/cloud/resource/ResourceService.java b/api/src/com/cloud/resource/ResourceService.java index 1e77cc8a4e2..08e2585d1a7 100755 --- a/api/src/com/cloud/resource/ResourceService.java +++ b/api/src/com/cloud/resource/ResourceService.java @@ -71,7 +71,7 @@ public interface ResourceService { boolean deleteCluster(DeleteClusterCmd cmd); - Cluster updateCluster(Cluster cluster, String clusterType, String hypervisor, String allocationState, String managedstate); + Cluster updateCluster(Cluster cluster, String clusterType, String hypervisor, String allocationState, String managedstate,Float memoryOvercommitRatio, Float cpuOvercommitRatio); List discoverHosts(AddHostCmd cmd) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException; diff --git a/api/src/com/cloud/server/ManagementService.java b/api/src/com/cloud/server/ManagementService.java index 1736da3778c..1e6ca8d0b67 100755 --- a/api/src/com/cloud/server/ManagementService.java +++ b/api/src/com/cloud/server/ManagementService.java @@ -29,6 +29,8 @@ import org.apache.cloudstack.api.command.admin.domain.UpdateDomainCmd; import org.apache.cloudstack.api.command.admin.host.ListHostsCmd; import org.apache.cloudstack.api.command.admin.host.UpdateHostPasswordCmd; import org.apache.cloudstack.api.command.admin.pod.ListPodsByCmd; +import org.apache.cloudstack.api.command.admin.resource.ArchiveAlertsCmd; +import org.apache.cloudstack.api.command.admin.resource.DeleteAlertsCmd; import org.apache.cloudstack.api.command.admin.resource.ListAlertsCmd; import org.apache.cloudstack.api.command.admin.resource.ListCapacityCmd; import org.apache.cloudstack.api.command.admin.resource.UploadCustomCertificateCmd; @@ -40,12 +42,12 @@ import org.apache.cloudstack.api.command.admin.systemvm.UpgradeSystemVMCmd; import org.apache.cloudstack.api.command.admin.vlan.ListVlanIpRangesCmd; import org.apache.cloudstack.api.command.user.address.ListPublicIpAddressesCmd; import org.apache.cloudstack.api.command.user.config.ListCapabilitiesCmd; +import org.apache.cloudstack.api.command.user.event.ArchiveEventsCmd; +import org.apache.cloudstack.api.command.user.event.DeleteEventsCmd; import org.apache.cloudstack.api.command.user.guest.ListGuestOsCategoriesCmd; import org.apache.cloudstack.api.command.user.guest.ListGuestOsCmd; import org.apache.cloudstack.api.command.user.iso.ListIsosCmd; import org.apache.cloudstack.api.command.user.iso.UpdateIsoCmd; -import org.apache.cloudstack.api.command.user.offering.ListDiskOfferingsCmd; -import org.apache.cloudstack.api.command.user.offering.ListServiceOfferingsCmd; import org.apache.cloudstack.api.command.user.ssh.CreateSSHKeyPairCmd; import org.apache.cloudstack.api.command.user.ssh.DeleteSSHKeyPairCmd; import org.apache.cloudstack.api.command.user.ssh.ListSSHKeyPairsCmd; @@ -55,12 +57,10 @@ import org.apache.cloudstack.api.command.user.template.UpdateTemplateCmd; import org.apache.cloudstack.api.command.user.vm.GetVMPasswordCmd; import org.apache.cloudstack.api.command.user.vmgroup.UpdateVMGroupCmd; import org.apache.cloudstack.api.command.user.volume.ExtractVolumeCmd; -import org.apache.cloudstack.api.command.user.zone.ListZonesByCmd; import com.cloud.alert.Alert; import com.cloud.capacity.Capacity; import com.cloud.configuration.Configuration; -import com.cloud.dc.DataCenter; import com.cloud.dc.Pod; import com.cloud.dc.Vlan; import com.cloud.domain.Domain; @@ -72,8 +72,6 @@ import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorCapabilities; import com.cloud.network.IpAddress; -import com.cloud.offering.DiskOffering; -import com.cloud.offering.ServiceOffering; import com.cloud.org.Cluster; import com.cloud.storage.GuestOS; import com.cloud.storage.GuestOsCategory; @@ -194,6 +192,34 @@ public interface ManagementService { */ Pair, Integer> searchForAlerts(ListAlertsCmd cmd); + /** + * Archive alerts + * @param cmd + * @return True on success. False otherwise. + */ + boolean archiveAlerts(ArchiveAlertsCmd cmd); + + /** + * Delete alerts + * @param cmd + * @return True on success. False otherwise. + */ + boolean deleteAlerts(DeleteAlertsCmd cmd); + + /** + * Archive events + * @param cmd + * @return True on success. False otherwise. + */ + boolean archiveEvents(ArchiveEventsCmd cmd); + + /** + * Delete events + * @param cmd + * @return True on success. False otherwise. + */ + boolean deleteEvents(DeleteEventsCmd cmd); + /** * list all the capacity rows in capacity operations table * diff --git a/api/src/com/cloud/storage/Snapshot.java b/api/src/com/cloud/storage/Snapshot.java index 3f6b8f5a8e4..f71265cd230 100644 --- a/api/src/com/cloud/storage/Snapshot.java +++ b/api/src/com/cloud/storage/Snapshot.java @@ -19,7 +19,6 @@ package com.cloud.storage; import java.util.Date; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.utils.fsm.StateMachine2; import com.cloud.utils.fsm.StateObject; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.api.Identity; @@ -55,28 +54,13 @@ public interface Snapshot extends ControlledEntity, Identity, InternalIdentity, } public enum State { + Allocated, Creating, CreatedOnPrimary, BackingUp, BackedUp, Error; - private final static StateMachine2 s_fsm = new StateMachine2(); - - public static StateMachine2 getStateMachine() { - return s_fsm; - } - - static { - s_fsm.addTransition(null, Event.CreateRequested, Creating); - s_fsm.addTransition(Creating, Event.OperationSucceeded, CreatedOnPrimary); - s_fsm.addTransition(Creating, Event.OperationNotPerformed, BackedUp); - s_fsm.addTransition(Creating, Event.OperationFailed, Error); - s_fsm.addTransition(CreatedOnPrimary, Event.BackupToSecondary, BackingUp); - s_fsm.addTransition(BackingUp, Event.OperationSucceeded, BackedUp); - s_fsm.addTransition(BackingUp, Event.OperationFailed, Error); - } - public String toString() { return this.name(); } @@ -107,7 +91,7 @@ public interface Snapshot extends ControlledEntity, Identity, InternalIdentity, Date getCreated(); - Type getType(); + Type getRecurringType(); State getState(); diff --git a/api/src/com/cloud/storage/StoragePool.java b/api/src/com/cloud/storage/StoragePool.java index f517927eac1..091eef182cc 100644 --- a/api/src/com/cloud/storage/StoragePool.java +++ b/api/src/com/cloud/storage/StoragePool.java @@ -99,10 +99,7 @@ public interface StoragePool extends Identity, InternalIdentity { /** * @return */ - String getStorageProvider(); - - /** - * @return - */ - String getStorageType(); + Long getStorageProviderId(); + + boolean isInMaintenance(); } diff --git a/api/src/com/cloud/storage/StoragePoolStatus.java b/api/src/com/cloud/storage/StoragePoolStatus.java index 94dd686a8f0..a35f706d702 100644 --- a/api/src/com/cloud/storage/StoragePoolStatus.java +++ b/api/src/com/cloud/storage/StoragePoolStatus.java @@ -17,11 +17,6 @@ package com.cloud.storage; public enum StoragePoolStatus { - Creating, - Up, - PrepareForMaintenance, - ErrorInMaintenance, - CancelMaintenance, - Maintenance, - Removed; + Initial, Initialized, Creating, Attaching, Up, PrepareForMaintenance, + ErrorInMaintenance, CancelMaintenance, Maintenance, Removed; } diff --git a/api/src/com/cloud/storage/StorageService.java b/api/src/com/cloud/storage/StorageService.java index bd7dfd3a67a..63c5023ee91 100644 --- a/api/src/com/cloud/storage/StorageService.java +++ b/api/src/com/cloud/storage/StorageService.java @@ -22,17 +22,10 @@ import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaint import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; -import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; -import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; -import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; -import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; -import com.cloud.exception.PermissionDeniedException; -import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceInUseException; import com.cloud.exception.ResourceUnavailableException; -import com.cloud.user.Account; public interface StorageService{ /** @@ -51,37 +44,6 @@ public interface StorageService{ StoragePool createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException, ResourceUnavailableException; - /** - * Creates the database object for a volume based on the given criteria - * - * @param cmd - * the API command wrapping the criteria (account/domainId [admin only], zone, diskOffering, snapshot, - * name) - * @return the volume object - * @throws PermissionDeniedException - */ - Volume allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationException; - - /** - * Creates the volume based on the given criteria - * - * @param cmd - * the API command wrapping the criteria (account/domainId [admin only], zone, diskOffering, snapshot, - * name) - * @return the volume object - */ - Volume createVolume(CreateVolumeCmd cmd); - - - /** - * Resizes the volume based on the given criteria - * - * @param cmd - * the API command wrapping the criteria - * @return the volume object - */ - Volume resizeVolume(ResizeVolumeCmd cmd); - /** * Delete the storage pool * @@ -120,19 +82,4 @@ public interface StorageService{ public StoragePool updateStoragePool(UpdateStoragePoolCmd cmd) throws IllegalArgumentException; public StoragePool getStoragePool(long id); - - Volume migrateVolume(Long volumeId, Long storagePoolId) throws ConcurrentOperationException; - - - /** - * Uploads the volume to secondary storage - * - * @param UploadVolumeCmd cmd - * - * @return Volume object - */ - Volume uploadVolume(UploadVolumeCmd cmd) throws ResourceAllocationException; - - boolean deleteVolume(long volumeId, Account caller) throws ConcurrentOperationException; - } diff --git a/api/src/com/cloud/storage/Volume.java b/api/src/com/cloud/storage/Volume.java index 284c83d9e93..4903594f0af 100755 --- a/api/src/com/cloud/storage/Volume.java +++ b/api/src/com/cloud/storage/Volume.java @@ -39,9 +39,12 @@ public interface Volume extends ControlledEntity, Identity, InternalIdentity, Ba Snapshotting("There is a snapshot created on this volume, not backed up to secondary storage yet"), Resizing("The volume is being resized"), Expunging("The volume is being expunging"), + Expunged("The volume is being expunging"), Destroy("The volume is destroyed, and can't be recovered."), Destroying("The volume is destroying, and can't be recovered."), - UploadOp ("The volume upload operation is in progress or in short the volume is on secondary storage"); + UploadOp ("The volume upload operation is in progress or in short the volume is on secondary storage"), + Uploading("volume is uploading"), + Uploaded("volume is uploaded"); String _description; @@ -70,12 +73,15 @@ public interface Volume extends ControlledEntity, Identity, InternalIdentity, Ba s_fsm.addTransition(Resizing, Event.OperationSucceeded, Ready); s_fsm.addTransition(Resizing, Event.OperationFailed, Ready); s_fsm.addTransition(Allocated, Event.UploadRequested, UploadOp); - s_fsm.addTransition(UploadOp, Event.CopyRequested, Creating);// CopyRequested for volume from sec to primary storage + s_fsm.addTransition(Uploaded, Event.CopyRequested, Creating);// CopyRequested for volume from sec to primary storage s_fsm.addTransition(Creating, Event.CopySucceeded, Ready); - s_fsm.addTransition(Creating, Event.CopyFailed, UploadOp);// Copying volume from sec to primary failed. + s_fsm.addTransition(Creating, Event.CopyFailed, Uploaded);// Copying volume from sec to primary failed. s_fsm.addTransition(UploadOp, Event.DestroyRequested, Destroy); s_fsm.addTransition(Ready, Event.DestroyRequested, Destroy); s_fsm.addTransition(Destroy, Event.ExpungingRequested, Expunging); + s_fsm.addTransition(Expunging, Event.ExpungingRequested, Expunging); + s_fsm.addTransition(Expunging, Event.OperationSucceeded, Expunged); + s_fsm.addTransition(Expunging, Event.OperationFailed, Expunging); s_fsm.addTransition(Ready, Event.SnapshotRequested, Snapshotting); s_fsm.addTransition(Snapshotting, Event.OperationSucceeded, Ready); s_fsm.addTransition(Snapshotting, Event.OperationFailed, Ready); @@ -83,6 +89,9 @@ public interface Volume extends ControlledEntity, Identity, InternalIdentity, Ba s_fsm.addTransition(Migrating, Event.OperationSucceeded, Ready); s_fsm.addTransition(Migrating, Event.OperationFailed, Ready); s_fsm.addTransition(Destroy, Event.OperationSucceeded, Destroy); + s_fsm.addTransition(UploadOp, Event.OperationSucceeded, Uploaded); + s_fsm.addTransition(UploadOp, Event.OperationFailed, Allocated); + s_fsm.addTransition(Uploaded, Event.DestroyRequested, Destroy); } } @@ -110,7 +119,7 @@ public interface Volume extends ControlledEntity, Identity, InternalIdentity, Ba /** * @return total size of the partition */ - long getSize(); + Long getSize(); /** * @return the vm instance id diff --git a/api/src/com/cloud/storage/VolumeApiService.java b/api/src/com/cloud/storage/VolumeApiService.java new file mode 100644 index 00000000000..8517988dfc6 --- /dev/null +++ b/api/src/com/cloud/storage/VolumeApiService.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.storage; + +import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.user.Account; + +public interface VolumeApiService { + /** + * Creates the database object for a volume based on the given criteria + * + * @param cmd + * the API command wrapping the criteria (account/domainId [admin only], zone, diskOffering, snapshot, + * name) + * @return the volume object + * @throws PermissionDeniedException + */ + Volume allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationException; + + /** + * Creates the volume based on the given criteria + * + * @param cmd + * the API command wrapping the criteria (account/domainId [admin only], zone, diskOffering, snapshot, + * name) + * @return the volume object + */ + Volume createVolume(CreateVolumeCmd cmd); + + + /** + * Resizes the volume based on the given criteria + * + * @param cmd + * the API command wrapping the criteria + * @return the volume object + */ + Volume resizeVolume(ResizeVolumeCmd cmd); + + Volume migrateVolume(MigrateVolumeCmd cmd) throws ConcurrentOperationException; + + /** + * Uploads the volume to secondary storage + * + * @param UploadVolumeCmd cmd + * + * @return Volume object + */ + Volume uploadVolume(UploadVolumeCmd cmd) throws ResourceAllocationException; + + boolean deleteVolume(long volumeId, Account caller) throws ConcurrentOperationException; + + Volume attachVolumeToVM(AttachVolumeCmd command); + + Volume detachVolumeFromVM(DetachVolumeCmd cmmd); +} diff --git a/api/src/com/cloud/template/TemplateService.java b/api/src/com/cloud/template/TemplateService.java index 11475d46b8a..7e831fb0055 100755 --- a/api/src/com/cloud/template/TemplateService.java +++ b/api/src/com/cloud/template/TemplateService.java @@ -25,6 +25,7 @@ import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; import org.apache.cloudstack.api.command.user.iso.ExtractIsoCmd; import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; import org.apache.cloudstack.api.command.user.template.CopyTemplateCmd; +import org.apache.cloudstack.api.command.user.template.CreateTemplateCmd; import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd; import org.apache.cloudstack.api.command.user.template.ExtractTemplateCmd; import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; @@ -32,6 +33,8 @@ import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; import com.cloud.exception.InternalErrorException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.StorageUnavailableException; +import com.cloud.user.Account; +import com.cloud.utils.exception.CloudRuntimeException; public interface TemplateService { @@ -87,4 +90,11 @@ public interface TemplateService { List listTemplatePermissions(BaseListTemplateOrIsoPermissionsCmd cmd); boolean updateTemplateOrIsoPermissions(BaseUpdateTemplateOrIsoPermissionsCmd cmd); + + VirtualMachineTemplate createPrivateTemplateRecord(CreateTemplateCmd cmd, + Account templateOwner) throws ResourceAllocationException; + + VirtualMachineTemplate createPrivateTemplate(CreateTemplateCmd command) + throws CloudRuntimeException; + } diff --git a/api/src/com/cloud/template/VirtualMachineTemplate.java b/api/src/com/cloud/template/VirtualMachineTemplate.java index cdfe8d38dc5..cedc793c197 100755 --- a/api/src/com/cloud/template/VirtualMachineTemplate.java +++ b/api/src/com/cloud/template/VirtualMachineTemplate.java @@ -37,6 +37,7 @@ public interface VirtualMachineTemplate extends ControlledEntity, Identity, Inte featured, // returns templates that have been marked as featured and public self, // returns templates that have been registered or created by the calling user selfexecutable, // same as self, but only returns templates that are ready to be deployed with + shared, // including templates that have been granted to the calling user by another user sharedexecutable, // ready templates that have been granted to the calling user by another user executable, // templates that are owned by the calling user, or public templates, that can be used to deploy a community, // returns templates that have been marked as public but not featured diff --git a/api/src/com/cloud/user/AccountService.java b/api/src/com/cloud/user/AccountService.java index 8026891c5fa..f9c6ddb38ef 100755 --- a/api/src/com/cloud/user/AccountService.java +++ b/api/src/com/cloud/user/AccountService.java @@ -62,7 +62,7 @@ public interface AccountService { * @return the user if created successfully, null otherwise */ UserAccount createUserAccount(String userName, String password, String firstName, String lastName, String email, String timezone, String accountName, short accountType, Long domainId, String networkDomain, - Map details, String accountUUID, String userUUID, Integer regionId); + Map details); /** * Locks a user by userId. A locked user cannot access the API, but will still have running VMs/IP addresses @@ -77,7 +77,7 @@ public interface AccountService { User getSystemUser(); - User createUser(String userName, String password, String firstName, String lastName, String email, String timeZone, String accountName, Long domainId, String userUUID, Integer regionId); + User createUser(String userName, String password, String firstName, String lastName, String email, String timeZone, String accountName, Long domainId); boolean isAdmin(short accountType); diff --git a/api/src/com/cloud/user/DomainService.java b/api/src/com/cloud/user/DomainService.java index 1a9635499f1..1f030196420 100644 --- a/api/src/com/cloud/user/DomainService.java +++ b/api/src/com/cloud/user/DomainService.java @@ -27,7 +27,7 @@ import com.cloud.utils.Pair; public interface DomainService { - Domain createDomain(String name, Long parentId, String networkDomain, String domainUUID, Integer regionId); + Domain createDomain(String name, Long parentId, String networkDomain); Domain getDomain(long id); diff --git a/api/src/com/cloud/vm/Nic.java b/api/src/com/cloud/vm/Nic.java index 9d21130327a..b2f6976240c 100644 --- a/api/src/com/cloud/vm/Nic.java +++ b/api/src/com/cloud/vm/Nic.java @@ -151,4 +151,5 @@ public interface Nic extends Identity, InternalIdentity { String getIp6Cidr(); String getIp6Address(); + boolean getSecondaryIp(); } diff --git a/api/src/com/cloud/vm/NicProfile.java b/api/src/com/cloud/vm/NicProfile.java index e9e091cc2d7..5970ccd24ee 100644 --- a/api/src/com/cloud/vm/NicProfile.java +++ b/api/src/com/cloud/vm/NicProfile.java @@ -50,6 +50,8 @@ public class NicProfile implements InternalIdentity { Integer deviceId; String dns1; String dns2; + String ip6Dns1; + String ip6Dns2; Integer networkRate; boolean isSecurityGroupEnabled; String name; @@ -332,4 +334,20 @@ public class NicProfile implements InternalIdentity { this.requestedIpv6 = requestedIpv6; } + public String getIp6Dns1() { + return ip6Dns1; + } + + public void setIp6Dns1(String ip6Dns1) { + this.ip6Dns1 = ip6Dns1; + } + + public String getIp6Dns2() { + return ip6Dns2; + } + + public void setIp6Dns2(String ip6Dns2) { + this.ip6Dns2 = ip6Dns2; + } + } diff --git a/core/src/com/cloud/vm/VirtualEnvironment.java b/api/src/com/cloud/vm/NicSecondaryIp.java similarity index 63% rename from core/src/com/cloud/vm/VirtualEnvironment.java rename to api/src/com/cloud/vm/NicSecondaryIp.java index 79d4a59bbfc..655d172b33f 100644 --- a/core/src/com/cloud/vm/VirtualEnvironment.java +++ b/api/src/com/cloud/vm/NicSecondaryIp.java @@ -16,31 +16,21 @@ // under the License. package com.cloud.vm; -import java.util.List; +import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + /** - * - * be an information carrier within one thread only. - * + * Nic represents one nic on the VM. */ -public class VirtualEnvironment { +public interface NicSecondaryIp extends ControlledEntity, Identity, InternalIdentity { /** - * The actual machine + * @return id in the CloudStack database */ - public VirtualMachine machine; - - /** - * Disks to assign to the machine in order. - */ - public List disks; - - /** - * Networks to assign to the machine. - */ - public List networks; - - /** - * Boot options to assign to the machine. - */ - public String bootOptions; + long getId(); + long getNicId(); + String getIp4Address(); + long getNetworkId(); + long getVmId(); } diff --git a/api/src/com/cloud/vm/UserVmService.java b/api/src/com/cloud/vm/UserVmService.java index fb574fa5848..ea89eda89d2 100755 --- a/api/src/com/cloud/vm/UserVmService.java +++ b/api/src/com/cloud/vm/UserVmService.java @@ -23,7 +23,6 @@ import javax.naming.InsufficientResourcesException; import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; import org.apache.cloudstack.api.command.admin.vm.RecoverVMCmd; -import org.apache.cloudstack.api.command.user.template.CreateTemplateCmd; import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd; import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; import org.apache.cloudstack.api.command.user.vm.DestroyVMCmd; @@ -103,24 +102,6 @@ public interface UserVmService { */ UserVm resetVMSSHKey(ResetVMSSHKeyCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException; - /** - * Attaches the specified volume to the specified VM - * - * @param cmd - * - the command specifying volumeId and vmId - * @return the Volume object if attach worked successfully. - */ - Volume attachVolumeToVM(AttachVolumeCmd cmd); - - /** - * Detaches the specified volume from the VM it is currently attached to. - * - * @param cmd - * - the command specifying volumeId - * @return the Volume object if detach worked successfully. - */ - Volume detachVolumeFromVM(DetachVolumeCmd cmmd); - UserVm startVirtualMachine(StartVMCmd cmd) throws StorageUnavailableException, ExecutionException, ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException; @@ -151,28 +132,6 @@ public interface UserVmService { UserVm recoverVirtualMachine(RecoverVMCmd cmd) throws ResourceAllocationException; - /** - * Create a template database record in preparation for creating a private template. - * - * @param cmd - * the command object that defines the name, display text, snapshot/volume, bits, public/private, etc. - * for the - * private template - * @param templateOwner - * TODO - * @return the vm template object if successful, null otherwise - * @throws ResourceAllocationException - */ - VirtualMachineTemplate createPrivateTemplateRecord(CreateTemplateCmd cmd, Account templateOwner) throws ResourceAllocationException; - - /** - * Creates a private template from a snapshot of a VM - * - * @param cmd - * - the command specifying snapshotId, name, description - * @return a template if successfully created, null otherwise - */ - VirtualMachineTemplate createPrivateTemplate(CreateTemplateCmd cmd); /** * Creates a Basic Zone User VM in the database and returns the VM to the caller. diff --git a/api/src/com/cloud/vm/VirtualMachineProfile.java b/api/src/com/cloud/vm/VirtualMachineProfile.java index 0fab4436807..33a9171e732 100644 --- a/api/src/com/cloud/vm/VirtualMachineProfile.java +++ b/api/src/com/cloud/vm/VirtualMachineProfile.java @@ -136,4 +136,10 @@ public interface VirtualMachineProfile { BootloaderType getBootLoaderType(); Map getParameters(); + + Float getCpuOvercommitRatio(); + + Float getMemoryOvercommitRatio(); + + } diff --git a/api/src/org/apache/cloudstack/acl/APIChecker.java b/api/src/org/apache/cloudstack/acl/APIChecker.java index 2e2b73ba782..0d0dfd1be4e 100644 --- a/api/src/org/apache/cloudstack/acl/APIChecker.java +++ b/api/src/org/apache/cloudstack/acl/APIChecker.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.acl; import com.cloud.exception.PermissionDeniedException; -import com.cloud.exception.RequestLimitException; import com.cloud.user.User; import com.cloud.utils.component.Adapter; @@ -27,5 +26,5 @@ public interface APIChecker extends Adapter { // If true, apiChecker has checked the operation // If false, apiChecker is unable to handle the operation or not implemented // On exception, checkAccess failed don't allow - boolean checkAccess(User user, String apiCommandName) throws PermissionDeniedException, RequestLimitException; + boolean checkAccess(User user, String apiCommandName) throws PermissionDeniedException; } diff --git a/api/src/org/apache/cloudstack/api/ACL.java b/api/src/org/apache/cloudstack/api/ACL.java index 3623d1ac523..ce93b6aa7ae 100644 --- a/api/src/org/apache/cloudstack/api/ACL.java +++ b/api/src/org/apache/cloudstack/api/ACL.java @@ -22,10 +22,14 @@ import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; +import org.apache.cloudstack.acl.SecurityChecker.AccessType; + @Retention(RetentionPolicy.RUNTIME) @Target({ FIELD }) public @interface ACL { + AccessType accessType() default AccessType.ListEntry; + boolean checkKeyAccess() default false; boolean checkValueAccess() default false; } diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java index cd7d700d2b5..b40b26ce57c 100755 --- a/api/src/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/org/apache/cloudstack/api/ApiConstants.java @@ -46,6 +46,7 @@ public class ApiConstants { public static final String COMPONENT = "component"; public static final String CPU_NUMBER = "cpunumber"; public static final String CPU_SPEED = "cpuspeed"; + public static final String CPU_OVERCOMMIT_RATIO="cpuovercommitratio"; public static final String CREATED = "created"; public static final String CUSTOMIZED = "customized"; public static final String DESCRIPTION = "description"; @@ -58,6 +59,8 @@ public class ApiConstants { public static final String DISPLAY_TEXT = "displaytext"; public static final String DNS1 = "dns1"; public static final String DNS2 = "dns2"; + public static final String IP6_DNS1 = "ip6dns1"; + public static final String IP6_DNS2 = "ip6dns2"; public static final String DOMAIN = "domain"; public static final String DOMAIN_ID = "domainid"; public static final String DURATION = "duration"; @@ -119,6 +122,7 @@ public class ApiConstants { public static final String MAX = "max"; public static final String MAX_SNAPS = "maxsnaps"; public static final String MEMORY = "memory"; + public static final String MEMORY_OVERCOMMIT_RATIO="memoryovercommitratio"; public static final String MODE = "mode"; public static final String NAME = "name"; public static final String METHOD_NAME = "methodname"; @@ -218,6 +222,7 @@ public class ApiConstants { public static final String VM_LIMIT = "vmlimit"; public static final String VM_TOTAL = "vmtotal"; public static final String VNET = "vnet"; + public static final String IS_VOLATILE = "isvolatile"; public static final String VOLUME_ID = "volumeid"; public static final String ZONE_ID = "zoneid"; public static final String ZONE_NAME = "zonename"; @@ -355,7 +360,10 @@ public class ApiConstants { public static final String CUSTOM_DISK_OFF_MAX_SIZE = "customdiskofferingmaxsize"; public static final String DEFAULT_ZONE_ID = "defaultzoneid"; public static final String GUID = "guid"; - + public static final String VSWITCH_TYPE_GUEST_TRAFFIC = "guestvswitchtype"; + public static final String VSWITCH_TYPE_PUBLIC_TRAFFIC = "publicvswitchtype"; + public static final String VSWITCH_NAME_GUEST_TRAFFIC = "guestvswitchname"; + public static final String VSWITCH_NAME_PUBLIC_TRAFFIC = "publicvswitchname"; public static final String EXTERNAL_SWITCH_MGMT_DEVICE_ID = "vsmdeviceid"; public static final String EXTERNAL_SWITCH_MGMT_DEVICE_NAME = "vsmdevicename"; public static final String EXTERNAL_SWITCH_MGMT_DEVICE_STATE = "vsmdevicestate"; @@ -374,9 +382,8 @@ public class ApiConstants { public static final String VSM_CONFIG_STATE = "vsmconfigstate"; public static final String VSM_DEVICE_STATE = "vsmdevicestate"; public static final String ADD_VSM_FLAG = "addvsmflag"; - public static final String END_POINT = "endpoint"; - public static final String REGION_ID = "regionid"; - public static final String IS_PROPAGATE = "ispropagate"; + public static final String END_POINT = "endpoint"; + public static final String REGION_ID = "regionid"; public static final String VPC_OFF_ID = "vpcofferingid"; public static final String NETWORK = "network"; public static final String VPC_ID = "vpcid"; @@ -443,6 +450,16 @@ public class ApiConstants { public static final String VM_SNAPSHOT_ID = "vmsnapshotid"; public static final String VM_SNAPSHOT_DISK_IDS = "vmsnapshotdiskids"; public static final String VM_SNAPSHOT_MEMORY = "snapshotmemory"; + public static final String IMAGE_STORE_UUID = "imagestoreuuid"; + public static final String GUEST_VM_CIDR = "guestvmcidr"; + public static final String NETWORK_CIDR = "networkcidr"; + public static final String RESERVED_IP_RANGE = "reservediprange"; + public static final String UCS_MANAGER_ID = "ucsmanagerid"; + public static final String UCS_PROFILE_DN = "profiledn"; + public static final String UCS_BLADE_DN = "bladedn"; + public static final String UCS_BLADE_ID = "bladeid"; + public static final String VM_GUEST_IP = "vmguestip"; + public static final String OLDER_THAN = "olderthan"; public enum HostDetails { all, capacity, events, stats, min; diff --git a/api/src/org/apache/cloudstack/api/ApiErrorCode.java b/api/src/org/apache/cloudstack/api/ApiErrorCode.java index ee28fa05878..69bd0284cef 100644 --- a/api/src/org/apache/cloudstack/api/ApiErrorCode.java +++ b/api/src/org/apache/cloudstack/api/ApiErrorCode.java @@ -18,7 +18,6 @@ package org.apache.cloudstack.api; /** * Enum class for various API error code used in CloudStack - * @author minc * */ public enum ApiErrorCode { diff --git a/api/src/org/apache/cloudstack/api/BaseCmd.java b/api/src/org/apache/cloudstack/api/BaseCmd.java index 17f789f88fa..816b6deed77 100644 --- a/api/src/org/apache/cloudstack/api/BaseCmd.java +++ b/api/src/org/apache/cloudstack/api/BaseCmd.java @@ -62,6 +62,7 @@ import com.cloud.resource.ResourceService; import com.cloud.server.ManagementService; import com.cloud.server.TaggedResourceService; import com.cloud.storage.StorageService; +import com.cloud.storage.VolumeApiService; import com.cloud.storage.snapshot.SnapshotService; import com.cloud.template.TemplateService; import com.cloud.user.Account; @@ -69,7 +70,6 @@ import com.cloud.user.AccountService; import com.cloud.user.DomainService; import com.cloud.user.ResourceLimitService; import com.cloud.utils.Pair; -import com.cloud.vm.BareMetalVmService; import com.cloud.vm.UserVmService; import com.cloud.vm.snapshot.VMSnapshotService; @@ -102,6 +102,7 @@ public abstract class BaseCmd { @Inject public UserVmService _userVmService; @Inject public ManagementService _mgr; @Inject public StorageService _storageService; + @Inject public VolumeApiService _volumeService; @Inject public ResourceService _resourceService; @Inject public NetworkService _networkService; @Inject public TemplateService _templateService; diff --git a/api/src/org/apache/cloudstack/api/ResponseGenerator.java b/api/src/org/apache/cloudstack/api/ResponseGenerator.java index 267238af37b..a6025149846 100644 --- a/api/src/org/apache/cloudstack/api/ResponseGenerator.java +++ b/api/src/org/apache/cloudstack/api/ResponseGenerator.java @@ -53,6 +53,8 @@ import org.apache.cloudstack.api.response.LoadBalancerResponse; import org.apache.cloudstack.api.response.NetworkACLResponse; import org.apache.cloudstack.api.response.NetworkOfferingResponse; import org.apache.cloudstack.api.response.NetworkResponse; +import org.apache.cloudstack.api.response.NicResponse; +import org.apache.cloudstack.api.response.NicSecondaryIpResponse; import org.apache.cloudstack.api.response.PhysicalNetworkResponse; import org.apache.cloudstack.api.response.PodResponse; import org.apache.cloudstack.api.response.PrivateGatewayResponse; @@ -163,6 +165,8 @@ import com.cloud.user.User; import com.cloud.user.UserAccount; import com.cloud.uservm.UserVm; import com.cloud.vm.InstanceGroup; +import com.cloud.vm.Nic; +import com.cloud.vm.NicSecondaryIp; import com.cloud.vm.VirtualMachine; import com.cloud.vm.snapshot.VMSnapshot; @@ -385,4 +389,7 @@ public interface ResponseGenerator { TrafficMonitorResponse createTrafficMonitorResponse(Host trafficMonitor); VMSnapshotResponse createVMSnapshotResponse(VMSnapshot vmSnapshot); + NicSecondaryIpResponse createSecondaryIPToNicResponse(String ip, + Long nicId, Long networkId); + public NicResponse createNicResponse(Nic result); } diff --git a/api/src/org/apache/cloudstack/api/ServerApiException.java b/api/src/org/apache/cloudstack/api/ServerApiException.java index 682e5b7e774..4b0fae58548 100644 --- a/api/src/org/apache/cloudstack/api/ServerApiException.java +++ b/api/src/org/apache/cloudstack/api/ServerApiException.java @@ -43,7 +43,7 @@ public class ServerApiException extends CloudRuntimeException { super(description, cause); _errorCode = errorCode; _description = description; - if (cause instanceof CloudRuntimeException || cause instanceof CloudException ) { + if (cause instanceof CloudRuntimeException) { CloudRuntimeException rt = (CloudRuntimeException) cause; ArrayList idList = rt.getIdProxyList(); if (idList != null) { @@ -52,6 +52,15 @@ public class ServerApiException extends CloudRuntimeException { } } setCSErrorCode(rt.getCSErrorCode()); + } else if (cause instanceof CloudException) { + CloudException rt = (CloudException) cause; + ArrayList idList = rt.getIdProxyList(); + if (idList != null) { + for (int i = 0; i < idList.size(); i++) { + addProxyObject(idList.get(i)); + } + } + setCSErrorCode(rt.getCSErrorCode()); } } diff --git a/api/src/org/apache/cloudstack/api/command/admin/account/CreateAccountCmd.java b/api/src/org/apache/cloudstack/api/command/admin/account/CreateAccountCmd.java index b0f73d1d8f8..4121651d499 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/account/CreateAccountCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/account/CreateAccountCmd.java @@ -78,19 +78,7 @@ public class CreateAccountCmd extends BaseCmd { @Parameter(name = ApiConstants.ACCOUNT_DETAILS, type = CommandType.MAP, description = "details for account used to store specific parameters") private Map details; - //@Parameter(name = ApiConstants.REGION_DETAILS, type = CommandType.MAP, description = "details for account used to store region specific parameters") - //private Map regionDetails; - - @Parameter(name=ApiConstants.ACCOUNT_ID, type=CommandType.STRING, description="Account UUID, required for adding account from another Region") - private String accountUUID; - @Parameter(name=ApiConstants.USER_ID, type=CommandType.STRING, description="User UUID, required for adding account from another Region") - private String userUUID; - - @Parameter(name=ApiConstants.REGION_ID, type=CommandType.INTEGER, description="Id of the Region creating the account") - private Integer regionId; - - ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -145,18 +133,6 @@ public class CreateAccountCmd extends BaseCmd { return params; } - public String getAccountUUID() { - return accountUUID; - } - - public String getUserUUID() { - return userUUID; - } - - public Integer getRegionId() { - return regionId; - } - ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -174,8 +150,8 @@ public class CreateAccountCmd extends BaseCmd { @Override public void execute(){ UserContext.current().setEventDetails("Account Name: "+getAccountName()+", Domain Id:"+getDomainId()); - UserAccount userAccount = _accountService.createUserAccount(getUsername(), getPassword(), getFirstName(), getLastName(), getEmail(), getTimeZone(), getAccountName(), getAccountType(), getDomainId(), getNetworkDomain(), getDetails(), - getAccountUUID(), getUserUUID(), getRegionId()); + UserAccount userAccount = _accountService.createUserAccount(getUsername(), getPassword(), getFirstName(), getLastName(), getEmail(), getTimeZone(), getAccountName(), getAccountType(), + getDomainId(), getNetworkDomain(), getDetails()); if (userAccount != null) { AccountResponse response = _responseGenerator.createUserAccountResponse(userAccount); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/admin/account/DeleteAccountCmd.java b/api/src/org/apache/cloudstack/api/command/admin/account/DeleteAccountCmd.java index 959d7ce985b..9895da113f1 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/account/DeleteAccountCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/account/DeleteAccountCmd.java @@ -48,11 +48,8 @@ public class DeleteAccountCmd extends BaseAsyncCmd { required=true, description="Account id") private Long id; - @Parameter(name=ApiConstants.IS_PROPAGATE, type=CommandType.BOOLEAN, description="True if command is sent from another Region") - private Boolean isPropagate; - @Inject RegionService _regionService; - + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -62,10 +59,6 @@ public class DeleteAccountCmd extends BaseAsyncCmd { return id; } - public Boolean getIsPropagate() { - return isPropagate; - } - ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -103,7 +96,7 @@ public class DeleteAccountCmd extends BaseAsyncCmd { @Override public void execute(){ UserContext.current().setEventDetails("Account Id: "+getId()); - + boolean result = _regionService.deleteUserAccount(this); if (result) { SuccessResponse response = new SuccessResponse(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/admin/account/DisableAccountCmd.java b/api/src/org/apache/cloudstack/api/command/admin/account/DisableAccountCmd.java index 60e9fd5aa60..1f9b8217dd3 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/account/DisableAccountCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/account/DisableAccountCmd.java @@ -58,11 +58,8 @@ public class DisableAccountCmd extends BaseAsyncCmd { @Parameter(name=ApiConstants.LOCK, type=CommandType.BOOLEAN, required=true, description="If true, only lock the account; else disable the account") private Boolean lockRequested; - @Parameter(name=ApiConstants.IS_PROPAGATE, type=CommandType.BOOLEAN, description="True if command is sent from another Region") - private Boolean isPropagate; - @Inject RegionService _regionService; - + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -79,14 +76,10 @@ public class DisableAccountCmd extends BaseAsyncCmd { return domainId; } - public Boolean getIsPropagate() { - return isPropagate; - } - public Boolean getLockRequested() { - return lockRequested; - } - + return lockRequested; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -124,7 +117,7 @@ public class DisableAccountCmd extends BaseAsyncCmd { @Override public void execute() throws ConcurrentOperationException, ResourceUnavailableException{ UserContext.current().setEventDetails("Account Name: "+getAccountName()+", Domain Id:"+getDomainId()); - Account result = _regionService.disableAccount(this); + Account result = _regionService.disableAccount(this); if (result != null){ AccountResponse response = _responseGenerator.createAccountResponse(result); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/admin/account/EnableAccountCmd.java b/api/src/org/apache/cloudstack/api/command/admin/account/EnableAccountCmd.java index 9a92f789132..b9a9f6d70ba 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/account/EnableAccountCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/account/EnableAccountCmd.java @@ -50,11 +50,8 @@ public class EnableAccountCmd extends BaseCmd { description="Enables specified account in this domain.") private Long domainId; - @Parameter(name=ApiConstants.IS_PROPAGATE, type=CommandType.BOOLEAN, description="True if command is sent from another Region") - private Boolean isPropagate; - @Inject RegionService _regionService; - + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -71,10 +68,6 @@ public class EnableAccountCmd extends BaseCmd { return domainId; } - public Boolean getIsPropagate() { - return isPropagate; - } - ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -101,7 +94,7 @@ public class EnableAccountCmd extends BaseCmd { @Override public void execute(){ - Account result = _regionService.enableAccount(this); + Account result = _regionService.enableAccount(this); if (result != null){ AccountResponse response = _responseGenerator.createAccountResponse(result); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/admin/account/UpdateAccountCmd.java b/api/src/org/apache/cloudstack/api/command/admin/account/UpdateAccountCmd.java index 6fad48bf66e..60d1a97ffac 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/account/UpdateAccountCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/account/UpdateAccountCmd.java @@ -63,11 +63,8 @@ public class UpdateAccountCmd extends BaseCmd{ @Parameter(name = ApiConstants.ACCOUNT_DETAILS, type = CommandType.MAP, description = "details for account used to store specific parameters") private Map details; - @Parameter(name=ApiConstants.IS_PROPAGATE, type=CommandType.BOOLEAN, description="True if command is sent from another Region") - private Boolean isPropagate; - @Inject RegionService _regionService; - + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -102,10 +99,6 @@ public class UpdateAccountCmd extends BaseCmd{ return params; } - public Boolean getIsPropagate() { - return isPropagate; - } - ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -131,7 +124,7 @@ public class UpdateAccountCmd extends BaseCmd{ @Override public void execute(){ - Account result = _regionService.updateAccount(this); + Account result = _regionService.updateAccount(this); if (result != null){ AccountResponse response = _responseGenerator.createAccountResponse(result); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java b/api/src/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java index 912c396bf4f..d55ccd7dd11 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java @@ -20,6 +20,10 @@ package org.apache.cloudstack.api.command.admin.cluster; import java.util.ArrayList; import java.util.List; +import com.cloud.exception.InvalidParameterValueException; +import org.apache.cloudstack.api.*; +import org.apache.log4j.Logger; + import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; @@ -81,6 +85,40 @@ public class AddClusterCmd extends BaseCmd { @Parameter(name = ApiConstants.VSM_IPADDRESS, type = CommandType.STRING, required = false, description = "the ipaddress of the VSM associated with this cluster") private String vsmipaddress; + @Parameter (name=ApiConstants.CPU_OVERCOMMIT_RATIO, type = CommandType.STRING, required = false , description = "value of the cpu overcommit ratio, defaults to 1") + private String cpuovercommitRatio; + + @Parameter(name = ApiConstants.MEMORY_OVERCOMMIT_RATIO, type = CommandType.STRING, required = false ,description = "value of the default ram overcommit ratio, defaults to 1") + private String memoryovercommitratio; + + @Parameter(name = ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC, type = CommandType.STRING, required = false, description = "Type of virtual switch used for guest traffic in the cluster. Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch)") + private String vSwitchTypeGuestTraffic; + + @Parameter(name = ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC, type = CommandType.STRING, required = false, description = "Type of virtual switch used for public traffic in the cluster. Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch)") + private String vSwitchTypePublicTraffic; + + @Parameter(name = ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC, type = CommandType.STRING, required = false, description = "Name of virtual switch used for guest traffic in the cluster. This would override zone wide traffic label setting.") + private String vSwitchNameGuestTraffic; + + @Parameter(name = ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC, type = CommandType.STRING, required = false, description = "Name of virtual switch used for public traffic in the cluster. This would override zone wide traffic label setting.") + private String vSwitchNamePublicTraffic; + + public String getVSwitchTypeGuestTraffic() { + return vSwitchTypeGuestTraffic; + } + + public String getVSwitchTypePublicTraffic() { + return vSwitchTypePublicTraffic; + } + + public String getVSwitchNameGuestTraffic() { + return vSwitchNameGuestTraffic; + } + + public String getVSwitchNamePublicTraffic() { + return vSwitchNamePublicTraffic; + } + public String getVSMIpaddress() { return vsmipaddress; } @@ -147,9 +185,26 @@ public class AddClusterCmd extends BaseCmd { this.allocationState = allocationState; } + public Float getCpuOvercommitRatio (){ + if(cpuovercommitRatio != null){ + return Float.parseFloat(cpuovercommitRatio); + } + return 1.0f; + } + + public Float getMemoryOvercommitRaito (){ + if (memoryovercommitratio != null){ + return Float.parseFloat(memoryovercommitratio); + } + return 1.0f; + } + @Override public void execute(){ try { + if ((getMemoryOvercommitRaito().compareTo(1f) < 0) | (getCpuOvercommitRatio().compareTo(1f) < 0)) { + throw new InvalidParameterValueException("Cpu and ram overcommit ratios should not be less than 1"); + } List result = _resourceService.discoverCluster(this); ListResponse response = new ListResponse(); List clusterResponses = new ArrayList(); diff --git a/api/src/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java b/api/src/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java index 058c7ebc952..95728dd184d 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java @@ -54,6 +54,13 @@ public class UpdateClusterCmd extends BaseCmd { @Parameter(name=ApiConstants.MANAGED_STATE, type=CommandType.STRING, description="whether this cluster is managed by cloudstack") private String managedState; + @Parameter(name=ApiConstants.CPU_OVERCOMMIT_RATIO, type = CommandType.STRING, description = "Value of cpu overcommit ratio") + private String cpuovercommitratio; + + @Parameter(name=ApiConstants.MEMORY_OVERCOMMIT_RATIO, type = CommandType.STRING, description = "Value of ram overcommit ratio") + private String memoryovercommitratio; + + public String getClusterName() { return clusterName; } @@ -100,6 +107,20 @@ public class UpdateClusterCmd extends BaseCmd { this.managedState = managedstate; } + public Float getCpuOvercommitRatio (){ + if(cpuovercommitratio != null){ + return Float.parseFloat(cpuovercommitratio); + } + return 1.0f; + } + + public Float getMemoryOvercommitRaito (){ + if (memoryovercommitratio != null){ + return Float.parseFloat(memoryovercommitratio); + } + return 1.0f; + } + @Override public void execute(){ Cluster cluster = _resourceService.getCluster(getId()); @@ -107,7 +128,11 @@ public class UpdateClusterCmd extends BaseCmd { throw new InvalidParameterValueException("Unable to find the cluster by id=" + getId()); } - Cluster result = _resourceService.updateCluster(cluster, getClusterType(), getHypervisor(), getAllocationState(), getManagedstate()); + if ((getMemoryOvercommitRaito().compareTo(1f) < 0) | (getCpuOvercommitRatio().compareTo(1f) < 0)) { + throw new InvalidParameterValueException("Cpu and ram overcommit ratios should be greater than one"); + } + + Cluster result = _resourceService.updateCluster(cluster, getClusterType(), getHypervisor(), getAllocationState(), getManagedstate(), getMemoryOvercommitRaito(), getCpuOvercommitRatio()); if (result != null) { ClusterResponse clusterResponse = _responseGenerator.createClusterResponse(cluster, false); clusterResponse.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/admin/domain/CreateDomainCmd.java b/api/src/org/apache/cloudstack/api/command/admin/domain/CreateDomainCmd.java index e0ba69359ad..5eae4865732 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/domain/CreateDomainCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/domain/CreateDomainCmd.java @@ -49,12 +49,6 @@ public class CreateDomainCmd extends BaseCmd { @Parameter(name=ApiConstants.NETWORK_DOMAIN, type=CommandType.STRING, description="Network domain for networks in the domain") private String networkDomain; - @Parameter(name=ApiConstants.DOMAIN_ID, type=CommandType.STRING, description="Domain UUID, required for adding domain from another Region") - private String domainUUID; - - @Parameter(name=ApiConstants.REGION_ID, type=CommandType.INTEGER, description="Id of the Region creating the Domain") - private Integer regionId; - ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -71,14 +65,6 @@ public class CreateDomainCmd extends BaseCmd { return networkDomain; } - public String getDomainUUID() { - return domainUUID; - } - - public Integer getRegionId() { - return regionId; - } - ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -96,7 +82,7 @@ public class CreateDomainCmd extends BaseCmd { @Override public void execute(){ UserContext.current().setEventDetails("Domain Name: "+getDomainName()+((getParentDomainId()!=null)?", Parent DomainId :"+getParentDomainId():"")); - Domain domain = _domainService.createDomain(getDomainName(), getParentDomainId(), getNetworkDomain(), getDomainUUID(), getRegionId()); + Domain domain = _domainService.createDomain(getDomainName(), getParentDomainId(), getNetworkDomain()); if (domain != null) { DomainResponse response = _responseGenerator.createDomainResponse(domain); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/admin/domain/DeleteDomainCmd.java b/api/src/org/apache/cloudstack/api/command/admin/domain/DeleteDomainCmd.java index eae393da81e..488a758caec 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/domain/DeleteDomainCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/domain/DeleteDomainCmd.java @@ -50,11 +50,8 @@ public class DeleteDomainCmd extends BaseAsyncCmd { @Parameter(name=ApiConstants.CLEANUP, type=CommandType.BOOLEAN, description="true if all domain resources (child domains, accounts) have to be cleaned up, false otherwise") private Boolean cleanup; - @Parameter(name=ApiConstants.IS_PROPAGATE, type=CommandType.BOOLEAN, description="True if command is sent from another Region") - private Boolean propagate; - @Inject RegionService _regionService; - + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -67,10 +64,6 @@ public class DeleteDomainCmd extends BaseAsyncCmd { return cleanup; } - public Boolean isPropagate() { - return propagate; - } - ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/org/apache/cloudstack/api/command/admin/domain/UpdateDomainCmd.java b/api/src/org/apache/cloudstack/api/command/admin/domain/UpdateDomainCmd.java index c217f16c93a..e658f49ada1 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/domain/UpdateDomainCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/domain/UpdateDomainCmd.java @@ -52,11 +52,8 @@ public class UpdateDomainCmd extends BaseCmd { @Parameter(name=ApiConstants.NETWORK_DOMAIN, type=CommandType.STRING, description="Network domain for the domain's networks; empty string will update domainName with NULL value") private String networkDomain; - @Parameter(name=ApiConstants.IS_PROPAGATE, type=CommandType.BOOLEAN, description="True if command is sent from another Region") - private Boolean isPropagate; - @Inject RegionService _regionService; - + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -73,10 +70,6 @@ public class UpdateDomainCmd extends BaseCmd { return networkDomain; } - public Boolean getIsPropagate() { - return isPropagate; - } - ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -95,7 +88,7 @@ public class UpdateDomainCmd extends BaseCmd { public void execute(){ UserContext.current().setEventDetails("Domain Id: "+getId()); Domain domain = _regionService.updateDomain(this); - + if (domain != null) { DomainResponse response = _responseGenerator.createDomainResponse(domain); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/admin/ldap/LDAPConfigCmd.java b/api/src/org/apache/cloudstack/api/command/admin/ldap/LDAPConfigCmd.java index fbe8ab000e6..2976de4bf28 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/ldap/LDAPConfigCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/ldap/LDAPConfigCmd.java @@ -31,6 +31,7 @@ import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.user.Account; @APICommand(name = "ldapConfig", description="Configure the LDAP context for this site.", responseObject=LDAPConfigResponse.class, since="3.0.0") @@ -43,7 +44,10 @@ public class LDAPConfigCmd extends BaseCmd { //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - @Parameter(name=ApiConstants.HOST_NAME, type=CommandType.STRING, required=true, description="Hostname or ip address of the ldap server eg: my.ldap.com") + @Parameter(name=ApiConstants.LIST_ALL, type=CommandType.STRING, description="Hostname or ip address of the ldap server eg: my.ldap.com") + private String listall; + + @Parameter(name=ApiConstants.HOST_NAME, type=CommandType.STRING, description="Hostname or ip address of the ldap server eg: my.ldap.com") private String hostname; @Parameter(name=ApiConstants.PORT, type=CommandType.INTEGER, description="Specify the LDAP port if required, default is 389.") @@ -52,10 +56,10 @@ public class LDAPConfigCmd extends BaseCmd { @Parameter(name=ApiConstants.USE_SSL, type=CommandType.BOOLEAN, description="Check Use SSL if the external LDAP server is configured for LDAP over SSL.") private Boolean useSSL; - @Parameter(name=ApiConstants.SEARCH_BASE, type=CommandType.STRING, required=true, description="The search base defines the starting point for the search in the directory tree Example: dc=cloud,dc=com.") + @Parameter(name=ApiConstants.SEARCH_BASE, type=CommandType.STRING, description="The search base defines the starting point for the search in the directory tree Example: dc=cloud,dc=com.") private String searchBase; - @Parameter(name=ApiConstants.QUERY_FILTER, type=CommandType.STRING, required=true, description="You specify a query filter here, which narrows down the users, who can be part of this domain.") + @Parameter(name=ApiConstants.QUERY_FILTER, type=CommandType.STRING, description="You specify a query filter here, which narrows down the users, who can be part of this domain.") private String queryFilter; @Parameter(name=ApiConstants.BIND_DN, type=CommandType.STRING, description="Specify the distinguished name of a user with the search permission on the directory.") @@ -74,6 +78,10 @@ public class LDAPConfigCmd extends BaseCmd { /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// + public String getListAll() { + return listall == null ? "false" : listall; + } + public String getBindPassword() { return bindPassword; } @@ -82,30 +90,56 @@ public class LDAPConfigCmd extends BaseCmd { return bindDN; } + public void setBindDN(String bdn) { + this.bindDN=bdn; + } + public String getQueryFilter() { return queryFilter; } + public void setQueryFilter(String queryFilter) { + this.queryFilter=queryFilter; + } public String getSearchBase() { return searchBase; } + public void setSearchBase(String searchBase) { + this.searchBase=searchBase; + } + public Boolean getUseSSL() { - return useSSL == null ? Boolean.FALSE : Boolean.TRUE; + return useSSL == null ? Boolean.FALSE : useSSL; + } + + public void setUseSSL(Boolean useSSL) { + this.useSSL=useSSL; } public String getHostname() { return hostname; } + public void setHostname(String hostname) { + this.hostname=hostname; + } + public Integer getPort() { return port <= 0 ? 389 : port; } + public void setPort(Integer port) { + this.port=port; + } + public String getTrustStore() { return trustStore; } + public void setTrustStore(String trustStore) { + this.trustStore=trustStore; + } public String getTrustStorePassword() { return trustStorePassword; @@ -122,12 +156,25 @@ public class LDAPConfigCmd extends BaseCmd { InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException { try { - boolean result = _configService.updateLDAP(this); - if (result){ - LDAPConfigResponse lr = _responseGenerator.createLDAPConfigResponse(getHostname(), getPort(), getUseSSL(), getQueryFilter(), getSearchBase(), getBindDN()); + if ("true".equalsIgnoreCase(getListAll())){ + // return the existing conf + LDAPConfigCmd cmd = _configService.listLDAPConfig(this); + LDAPConfigResponse lr = _responseGenerator.createLDAPConfigResponse(cmd.getHostname(), cmd.getPort(), cmd.getUseSSL(), + cmd.getQueryFilter(), cmd.getSearchBase(), cmd.getBindDN()); lr.setResponseName(getCommandName()); this.setResponseObject(lr); } + else if (getHostname()==null || getSearchBase() == null || getQueryFilter() == null) { + throw new InvalidParameterValueException("You need to provide hostname, serachbase and queryfilter to configure your LDAP server"); + } + else { + boolean result = _configService.updateLDAP(this); + if (result){ + LDAPConfigResponse lr = _responseGenerator.createLDAPConfigResponse(getHostname(), getPort(), getUseSSL(), getQueryFilter(), getSearchBase(), getBindDN()); + lr.setResponseName(getCommandName()); + this.setResponseObject(lr); + } + } } catch (NamingException ne){ ne.printStackTrace(); diff --git a/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java b/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java index ee1e1b20bfc..e915c48e9b6 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java @@ -59,6 +59,9 @@ public class CreateServiceOfferingCmd extends BaseCmd { @Parameter(name=ApiConstants.LIMIT_CPU_USE, type=CommandType.BOOLEAN, description="restrict the CPU usage to committed service offering") private Boolean limitCpuUse; + @Parameter(name=ApiConstants.IS_VOLATILE, type=CommandType.BOOLEAN, description="true if the virtual machine needs to be volatile so that on every reboot of VM, original root disk is dettached then destroyed and a fresh root disk is created and attached to VM") + private Boolean isVolatile; + @Parameter(name=ApiConstants.STORAGE_TYPE, type=CommandType.STRING, description="the storage type of the service offering. Values are local and shared.") private String storageType; @@ -106,11 +109,15 @@ public class CreateServiceOfferingCmd extends BaseCmd { } public Boolean getOfferHa() { - return offerHa; + return offerHa == null ? false : offerHa; } public Boolean GetLimitCpuUse() { - return limitCpuUse; + return limitCpuUse == null ? false : limitCpuUse; + } + + public Boolean getVolatileVm() { + return isVolatile == null ? false : isVolatile; } public String getStorageType() { diff --git a/api/src/org/apache/cloudstack/api/command/admin/resource/ArchiveAlertsCmd.java b/api/src/org/apache/cloudstack/api/command/admin/resource/ArchiveAlertsCmd.java new file mode 100644 index 00000000000..2a1a47a13ed --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/admin/resource/ArchiveAlertsCmd.java @@ -0,0 +1,100 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.resource; + +import java.util.Date; +import java.util.List; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.AlertResponse; +import org.apache.cloudstack.api.response.ConditionResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.log4j.Logger; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.user.Account; + +@APICommand(name = "archiveAlerts", description = "Archive one or more alerts.", responseObject = SuccessResponse.class) +public class ArchiveAlertsCmd extends BaseCmd { + + public static final Logger s_logger = Logger.getLogger(ArchiveAlertsCmd.class.getName()); + + private static final String s_name = "archivealertsresponse"; + + // /////////////////////////////////////////////////// + // ////////////// API parameters ///////////////////// + // /////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.IDS, type = CommandType.LIST, collectionType = CommandType.UUID, entityType = AlertResponse.class, + description = "the IDs of the alerts") + private List ids; + + @Parameter(name=ApiConstants.OLDER_THAN, type=CommandType.DATE, description="archive alerts older than this date (use format \"yyyy-MM-dd\")") + private Date olderThan; + + @Parameter(name = ApiConstants.TYPE, type = CommandType.STRING, description = "archive by alert type") + private String type; + + // /////////////////////////////////////////////////// + // ///////////////// Accessors /////////////////////// + // /////////////////////////////////////////////////// + + public List getIds() { + return ids; + } + + public Date getOlderThan() { + return olderThan; + } + + public String getType() { + return type; + } + + // /////////////////////////////////////////////////// + // ///////////// API Implementation/////////////////// + // /////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public void execute() { + if(ids == null && type == null && olderThan == null) { + throw new InvalidParameterValueException("either ids, type or olderthan must be specified"); + } + boolean result = _mgr.archiveAlerts(this); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Unable to archive Alerts, one or more parameters has invalid values"); + } + } +} diff --git a/api/src/org/apache/cloudstack/api/command/admin/resource/DeleteAlertsCmd.java b/api/src/org/apache/cloudstack/api/command/admin/resource/DeleteAlertsCmd.java new file mode 100644 index 00000000000..f03793c4f6d --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/admin/resource/DeleteAlertsCmd.java @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.resource; + +import java.util.Date; +import java.util.List; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.AlertResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.log4j.Logger; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.user.Account; + +@APICommand(name = "deleteAlerts", description = "Delete one or more alerts.", responseObject = SuccessResponse.class) +public class DeleteAlertsCmd extends BaseCmd { + + public static final Logger s_logger = Logger.getLogger(DeleteAlertsCmd.class.getName()); + + private static final String s_name = "deletealertsresponse"; + + // /////////////////////////////////////////////////// + // ////////////// API parameters ///////////////////// + // /////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.IDS, type = CommandType.LIST, collectionType = CommandType.UUID, entityType = AlertResponse.class, + description = "the IDs of the alerts") + private List ids; + + @Parameter(name=ApiConstants.OLDER_THAN, type=CommandType.DATE, description="delete alerts older than (including) this date (use format \"yyyy-MM-dd\")") + private Date olderThan; + + @Parameter(name = ApiConstants.TYPE, type = CommandType.STRING, description = "delete by alert type") + private String type; + + // /////////////////////////////////////////////////// + // ///////////////// Accessors /////////////////////// + // /////////////////////////////////////////////////// + + public List getIds() { + return ids; + } + + public Date getOlderThan() { + return olderThan; + } + + public String getType() { + return type; + } + + // /////////////////////////////////////////////////// + // ///////////// API Implementation/////////////////// + // /////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public void execute() { + if(ids == null && type == null && olderThan == null) { + throw new InvalidParameterValueException("either ids, type or olderthan must be specified"); + } + boolean result = _mgr.deleteAlerts(this); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Unable to delete Alerts, one or more parameters has invalid values"); + } + } +} diff --git a/api/src/org/apache/cloudstack/api/command/admin/router/UpgradeRouterCmd.java b/api/src/org/apache/cloudstack/api/command/admin/router/UpgradeRouterCmd.java index c2cde163eba..b049f66f648 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/router/UpgradeRouterCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/router/UpgradeRouterCmd.java @@ -22,8 +22,8 @@ import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; -import org.apache.cloudstack.api.response.DiskOfferingResponse; import org.apache.cloudstack.api.response.DomainRouterResponse; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.log4j.Logger; import com.cloud.network.router.VirtualRouter; @@ -42,7 +42,7 @@ public class UpgradeRouterCmd extends BaseCmd { required=true, description="The ID of the router") private Long id; - @Parameter(name=ApiConstants.SERVICE_OFFERING_ID, type=CommandType.UUID, entityType = DiskOfferingResponse.class, + @Parameter(name=ApiConstants.SERVICE_OFFERING_ID, type=CommandType.UUID, entityType = ServiceOfferingResponse.class, required=true, description="the service offering ID to apply to the domain router") private Long serviceOfferingId; diff --git a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java index a3497a89f98..b86784ed0b0 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateStoragePoolCmd.java @@ -36,6 +36,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.storage.StoragePool; import com.cloud.user.Account; + @SuppressWarnings("rawtypes") @APICommand(name = "createStoragePool", description="Creates a storage pool.", responseObject=StoragePoolResponse.class) public class CreateStoragePoolCmd extends BaseCmd { @@ -48,7 +49,7 @@ public class CreateStoragePoolCmd extends BaseCmd { ///////////////////////////////////////////////////// @Parameter(name=ApiConstants.CLUSTER_ID, type=CommandType.UUID, entityType = ClusterResponse.class, - required=true, description="the cluster ID for the storage pool") + description="the cluster ID for the storage pool") private Long clusterId; @Parameter(name=ApiConstants.DETAILS, type=CommandType.MAP, description="the details for the storage pool") @@ -58,7 +59,7 @@ public class CreateStoragePoolCmd extends BaseCmd { private String storagePoolName; @Parameter(name=ApiConstants.POD_ID, type=CommandType.UUID, entityType = PodResponse.class, - required=true, description="the Pod ID for the storage pool") + description="the Pod ID for the storage pool") private Long podId; @Parameter(name=ApiConstants.TAGS, type=CommandType.STRING, description="the tags for the storage pool") @@ -70,6 +71,14 @@ public class CreateStoragePoolCmd extends BaseCmd { @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, entityType = ZoneResponse.class, required=true, description="the Zone ID for the storage pool") private Long zoneId; + + @Parameter(name=ApiConstants.PROVIDER, type=CommandType.STRING, + required=false, description="the storage provider uuid") + private String storageProviderUuid; + + @Parameter(name=ApiConstants.SCOPE, type=CommandType.STRING, + required=false, description="the scope of the storage: cluster or zone") + private String scope; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -102,6 +111,14 @@ public class CreateStoragePoolCmd extends BaseCmd { public Long getZoneId() { return zoneId; } + + public String getStorageProviderUuid() { + return this.storageProviderUuid; + } + + public String getScope() { + return this.scope; + } ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// diff --git a/api/src/org/apache/cloudstack/api/command/admin/systemvm/UpgradeSystemVMCmd.java b/api/src/org/apache/cloudstack/api/command/admin/systemvm/UpgradeSystemVMCmd.java index a70d927f020..e91d0053c64 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/systemvm/UpgradeSystemVMCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/systemvm/UpgradeSystemVMCmd.java @@ -23,7 +23,7 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.command.user.vm.UpgradeVMCmd; -import org.apache.cloudstack.api.response.DiskOfferingResponse; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.cloudstack.api.response.SystemVmResponse; import org.apache.log4j.Logger; @@ -48,7 +48,7 @@ public class UpgradeSystemVMCmd extends BaseCmd { required=true, description="The ID of the system vm") private Long id; - @Parameter(name=ApiConstants.SERVICE_OFFERING_ID, type=CommandType.UUID, entityType=DiskOfferingResponse.class, + @Parameter(name=ApiConstants.SERVICE_OFFERING_ID, type=CommandType.UUID, entityType=ServiceOfferingResponse.class, required=true, description="the service offering ID to apply to the system vm") private Long serviceOfferingId; diff --git a/api/src/org/apache/cloudstack/api/command/admin/user/CreateUserCmd.java b/api/src/org/apache/cloudstack/api/command/admin/user/CreateUserCmd.java index d1f72c45dd7..6ea8d9b20cb 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/user/CreateUserCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/user/CreateUserCmd.java @@ -65,12 +65,6 @@ public class CreateUserCmd extends BaseCmd { @Parameter(name=ApiConstants.USERNAME, type=CommandType.STRING, required=true, description="Unique username.") private String username; - @Parameter(name=ApiConstants.USER_ID, type=CommandType.STRING, description="User UUID, required for adding account from another Region") - private String userUUID; - - @Parameter(name=ApiConstants.REGION_ID, type=CommandType.INTEGER, description="Id of the Region creating the User") - private Integer regionId; - ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -107,14 +101,6 @@ public class CreateUserCmd extends BaseCmd { return username; } - public String getUserUUID() { - return userUUID; - } - - public Integer getRegionId() { - return regionId; - } - ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -146,7 +132,7 @@ public class CreateUserCmd extends BaseCmd { @Override public void execute(){ UserContext.current().setEventDetails("UserName: "+getUserName()+", FirstName :"+getFirstName()+", LastName: "+getLastName()); - User user = _accountService.createUser(getUserName(), getPassword(), getFirstName(), getLastName(), getEmail(), getTimezone(), getAccountName(), getDomainId(), getUserUUID(), getRegionId()); + User user = _accountService.createUser(getUserName(), getPassword(), getFirstName(), getLastName(), getEmail(), getTimezone(), getAccountName(), getDomainId()); if (user != null) { UserResponse response = _responseGenerator.createUserResponse(user); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/admin/user/DeleteUserCmd.java b/api/src/org/apache/cloudstack/api/command/admin/user/DeleteUserCmd.java index e8f671de1b9..5683d58ab30 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/user/DeleteUserCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/user/DeleteUserCmd.java @@ -33,7 +33,7 @@ import com.cloud.user.Account; import com.cloud.user.User; import com.cloud.user.UserContext; -@APICommand(name = "deleteUser", description="Creates a user for an account", responseObject=UserResponse.class) +@APICommand(name = "deleteUser", description="Deletes a user for an account", responseObject=SuccessResponse.class) public class DeleteUserCmd extends BaseCmd { public static final Logger s_logger = Logger.getLogger(DeleteUserCmd.class.getName()); @@ -42,14 +42,11 @@ public class DeleteUserCmd extends BaseCmd { ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=UserResponse.class, required=true, description="Deletes a user") + @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=UserResponse.class, required=true, description="id of the user to be deleted") private Long id; - @Parameter(name=ApiConstants.IS_PROPAGATE, type=CommandType.BOOLEAN, description="True if command is sent from another Region") - private Boolean isPropagate; - @Inject RegionService _regionService; - + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -58,10 +55,6 @@ public class DeleteUserCmd extends BaseCmd { return id; } - public Boolean getIsPropagate() { - return isPropagate; - } - ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/org/apache/cloudstack/api/command/admin/user/DisableUserCmd.java b/api/src/org/apache/cloudstack/api/command/admin/user/DisableUserCmd.java index 95013ec30bc..6eaa46bed30 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/user/DisableUserCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/user/DisableUserCmd.java @@ -49,11 +49,8 @@ public class DisableUserCmd extends BaseAsyncCmd { required=true, description="Disables user by user ID.") private Long id; - @Parameter(name=ApiConstants.IS_PROPAGATE, type=CommandType.BOOLEAN, description="True if command is sent from another Region") - private Boolean isPropagate; - @Inject RegionService _regionService; - + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -62,10 +59,6 @@ public class DisableUserCmd extends BaseAsyncCmd { return id; } - public Boolean getIsPropagate() { - return isPropagate; - } - ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -100,7 +93,7 @@ public class DisableUserCmd extends BaseAsyncCmd { public void execute(){ UserContext.current().setEventDetails("UserId: "+getId()); UserAccount user = _regionService.disableUser(this); - + if (user != null){ UserResponse response = _responseGenerator.createUserResponse(user); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/admin/user/EnableUserCmd.java b/api/src/org/apache/cloudstack/api/command/admin/user/EnableUserCmd.java index c1ba9003b05..382f67c98de 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/user/EnableUserCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/user/EnableUserCmd.java @@ -46,11 +46,8 @@ public class EnableUserCmd extends BaseCmd { required=true, description="Enables user by user ID.") private Long id; - @Parameter(name=ApiConstants.IS_PROPAGATE, type=CommandType.BOOLEAN, description="True if command is sent from another Region") - private Boolean isPropagate; - @Inject RegionService _regionService; - + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -59,10 +56,6 @@ public class EnableUserCmd extends BaseCmd { return id; } - public Boolean getIsPropagate() { - return isPropagate; - } - ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -86,7 +79,7 @@ public class EnableUserCmd extends BaseCmd { public void execute(){ UserContext.current().setEventDetails("UserId: "+getId()); UserAccount user = _regionService.enableUser(this); - + if (user != null){ UserResponse response = _responseGenerator.createUserResponse(user); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java b/api/src/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java index ee59d07cb79..1f31662e8ca 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java @@ -71,11 +71,8 @@ public class UpdateUserCmd extends BaseCmd { @Parameter(name=ApiConstants.USERNAME, type=CommandType.STRING, description="Unique username") private String username; - @Parameter(name=ApiConstants.IS_PROPAGATE, type=CommandType.BOOLEAN, description="True if command is sent from another Region") - private Boolean isPropagate; - @Inject RegionService _regionService; - + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -116,10 +113,6 @@ public class UpdateUserCmd extends BaseCmd { return username; } - public Boolean getIsPropagate() { - return isPropagate; - } - ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -143,7 +136,7 @@ public class UpdateUserCmd extends BaseCmd { public void execute(){ UserContext.current().setEventDetails("UserId: "+getId()); UserAccount user = _regionService.updateUser(this); - + if (user != null){ UserResponse response = _responseGenerator.createUserResponse(user); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java b/api/src/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java index f08cb16b23d..3c7956b7d7e 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java @@ -16,7 +16,7 @@ // under the License. package org.apache.cloudstack.api.command.admin.vpc; -import java.util.List; +import java.util.*; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -52,6 +52,10 @@ public class CreateVPCOfferingCmd extends BaseAsyncCreateCmd{ description="services supported by the vpc offering") private List supportedServices; + @Parameter(name = ApiConstants.SERVICE_PROVIDER_LIST, type = CommandType.MAP, description = "provider to service mapping. " + + "If not specified, the provider for the service will be mapped to the default provider on the physical network") + private Map serviceProviderList; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -68,10 +72,33 @@ public class CreateVPCOfferingCmd extends BaseAsyncCreateCmd{ return supportedServices; } + public Map> getServiceProviders() { + Map> serviceProviderMap = null; + if (serviceProviderList != null && !serviceProviderList.isEmpty()) { + serviceProviderMap = new HashMap>(); + Collection servicesCollection = serviceProviderList.values(); + Iterator iter = servicesCollection.iterator(); + while (iter.hasNext()) { + HashMap services = (HashMap) iter.next(); + String service = services.get("service"); + String provider = services.get("provider"); + List providerList = null; + if (serviceProviderMap.containsKey(service)) { + providerList = serviceProviderMap.get(service); + } else { + providerList = new ArrayList(); + } + providerList.add(provider); + serviceProviderMap.put(service, providerList); + } + } + + return serviceProviderMap; + } @Override public void create() throws ResourceAllocationException { - VpcOffering vpcOff = _vpcService.createVpcOffering(getVpcOfferingName(), getDisplayText(), getSupportedServices()); + VpcOffering vpcOff = _vpcService.createVpcOffering(getVpcOfferingName(), getDisplayText(), getSupportedServices(), getServiceProviders()); if (vpcOff != null) { this.setEntityId(vpcOff.getId()); this.setEntityUuid(vpcOff.getUuid()); diff --git a/api/src/org/apache/cloudstack/api/command/admin/zone/CreateZoneCmd.java b/api/src/org/apache/cloudstack/api/command/admin/zone/CreateZoneCmd.java index 1aa620ccd20..5cc905227ab 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/zone/CreateZoneCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/zone/CreateZoneCmd.java @@ -46,6 +46,12 @@ public class CreateZoneCmd extends BaseCmd { @Parameter(name=ApiConstants.DNS2, type=CommandType.STRING, description="the second DNS for the Zone") private String dns2; + @Parameter(name=ApiConstants.IP6_DNS1, type=CommandType.STRING, description="the first DNS for IPv6 network in the Zone") + private String ip6Dns1; + + @Parameter(name=ApiConstants.IP6_DNS2, type=CommandType.STRING, description="the second DNS for IPv6 network in the Zone") + private String ip6Dns2; + @Parameter(name=ApiConstants.GUEST_CIDR_ADDRESS, type=CommandType.STRING, description="the guest CIDR address for the Zone") private String guestCidrAddress; @@ -89,6 +95,14 @@ public class CreateZoneCmd extends BaseCmd { return dns2; } + public String getIp6Dns1() { + return ip6Dns1; + } + + public String getIp6Dns2() { + return ip6Dns2; + } + public String getGuestCidrAddress() { return guestCidrAddress; } diff --git a/api/src/org/apache/cloudstack/api/command/admin/zone/UpdateZoneCmd.java b/api/src/org/apache/cloudstack/api/command/admin/zone/UpdateZoneCmd.java index 81bdead221e..16c334eebc8 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/zone/UpdateZoneCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/zone/UpdateZoneCmd.java @@ -48,6 +48,12 @@ public class UpdateZoneCmd extends BaseCmd { @Parameter(name=ApiConstants.DNS2, type=CommandType.STRING, description="the second DNS for the Zone") private String dns2; + @Parameter(name=ApiConstants.IP6_DNS1, type=CommandType.STRING, description="the first DNS for IPv6 network in the Zone") + private String ip6Dns1; + + @Parameter(name=ApiConstants.IP6_DNS2, type=CommandType.STRING, description="the second DNS for IPv6 network in the Zone") + private String ip6Dns2; + @Parameter(name=ApiConstants.GUEST_CIDR_ADDRESS, type=CommandType.STRING, description="the guest CIDR address for the Zone") private String guestCidrAddress; @@ -105,6 +111,14 @@ public class UpdateZoneCmd extends BaseCmd { return id; } + public String getIp6Dns1() { + return ip6Dns1; + } + + public String getIp6Dns2() { + return ip6Dns2; + } + public String getInternalDns1() { return internalDns1; } diff --git a/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java b/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java index ecfd8df0ceb..87d4466e79a 100644 --- a/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java @@ -26,7 +26,7 @@ import org.apache.cloudstack.api.BaseAsyncCreateCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.AutoScaleVmProfileResponse; -import org.apache.cloudstack.api.response.DiskOfferingResponse; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.cloudstack.api.response.TemplateResponse; import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.api.response.ZoneResponse; @@ -56,7 +56,7 @@ public class CreateAutoScaleVmProfileCmd extends BaseAsyncCreateCmd { required = true, description = "availability zone for the auto deployed virtual machine") private Long zoneId; - @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = DiskOfferingResponse.class, + @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class, required = true, description = "the service offering of the auto deployed virtual machine") private Long serviceOfferingId; diff --git a/api/src/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java b/api/src/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java index eb862e62f47..a30e26cfd8b 100644 --- a/api/src/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java @@ -52,8 +52,12 @@ public class ListCapabilitiesCmd extends BaseCmd { response.setProjectInviteRequired((Boolean)capabilities.get("projectInviteRequired")); response.setAllowUsersCreateProjects((Boolean)capabilities.get("allowusercreateprojects")); response.setDiskOffMaxSize((Long)capabilities.get("customDiskOffMaxSize")); - response.setApiLimitInterval((Integer)capabilities.get("apiLimitInterval")); - response.setApiLimitMax((Integer)capabilities.get("apiLimitMax")); + if (capabilities.containsKey("apiLimitInterval")) { + response.setApiLimitInterval((Integer) capabilities.get("apiLimitInterval")); + } + if (capabilities.containsKey("apiLimitMax")) { + response.setApiLimitMax((Integer) capabilities.get("apiLimitMax")); + } response.setObjectName("capability"); response.setResponseName(getCommandName()); this.setResponseObject(response); diff --git a/api/src/org/apache/cloudstack/api/command/user/event/ArchiveEventsCmd.java b/api/src/org/apache/cloudstack/api/command/user/event/ArchiveEventsCmd.java new file mode 100644 index 00000000000..481607c9f0b --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/user/event/ArchiveEventsCmd.java @@ -0,0 +1,105 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//with the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. +package org.apache.cloudstack.api.command.user.event; + +import java.util.Date; +import java.util.List; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.AlertResponse; +import org.apache.cloudstack.api.response.EventResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.log4j.Logger; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.user.Account; +import com.cloud.user.UserContext; + +@APICommand(name = "archiveEvents", description = "Archive one or more events.", responseObject = SuccessResponse.class) +public class ArchiveEventsCmd extends BaseCmd { + + public static final Logger s_logger = Logger.getLogger(ArchiveEventsCmd.class.getName()); + + private static final String s_name = "archiveeventsresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.IDS, type = CommandType.LIST, collectionType = CommandType.UUID, entityType = EventResponse.class, + description = "the IDs of the events") + private List ids; + + @Parameter(name=ApiConstants.OLDER_THAN, type=CommandType.DATE, description="archive events older than (including) this date (use format \"yyyy-MM-dd\" or the new format \"yyyy-MM-dd HH:mm:ss\")") + private Date olderThan; + + @Parameter(name = ApiConstants.TYPE, type = CommandType.STRING, description = "archive by event type") + private String type; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public List getIds() { + return ids; + } + + public Date getOlderThan() { + return olderThan; + } + + public String getType() { + return type; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + Account account = UserContext.current().getCaller(); + if (account != null) { + return account.getId(); + } + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public void execute() { + if(ids == null && type == null && olderThan == null) { + throw new InvalidParameterValueException("either ids, type or olderthan must be specified"); + } + boolean result = _mgr.archiveEvents(this); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Unable to archive Events, one or more parameters has invalid values"); + } + } +} diff --git a/api/src/org/apache/cloudstack/api/command/user/event/DeleteEventsCmd.java b/api/src/org/apache/cloudstack/api/command/user/event/DeleteEventsCmd.java new file mode 100644 index 00000000000..55ca92a5dfe --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/user/event/DeleteEventsCmd.java @@ -0,0 +1,105 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//with the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. +package org.apache.cloudstack.api.command.user.event; + +import java.util.Date; +import java.util.List; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.AlertResponse; +import org.apache.cloudstack.api.response.EventResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.log4j.Logger; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.user.Account; +import com.cloud.user.UserContext; + +@APICommand(name = "deleteEvents", description = "Delete one or more events.", responseObject = SuccessResponse.class) +public class DeleteEventsCmd extends BaseCmd { + + public static final Logger s_logger = Logger.getLogger(DeleteEventsCmd.class.getName()); + + private static final String s_name = "deleteeventsresponse"; + + // /////////////////////////////////////////////////// + // ////////////// API parameters ///////////////////// + // /////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.IDS, type = CommandType.LIST, collectionType = CommandType.UUID, entityType = EventResponse.class, + description = "the IDs of the events") + private List ids; + + @Parameter(name=ApiConstants.OLDER_THAN, type=CommandType.DATE, description="delete events older than (including) this date (use format \"yyyy-MM-dd\" or the new format \"yyyy-MM-dd HH:mm:ss\")") + private Date olderThan; + + @Parameter(name = ApiConstants.TYPE, type = CommandType.STRING, description = "delete by event type") + private String type; + + // /////////////////////////////////////////////////// + // ///////////////// Accessors /////////////////////// + // /////////////////////////////////////////////////// + + public List getIds() { + return ids; + } + + public Date getOlderThan() { + return olderThan; + } + + public String getType() { + return type; + } + + // /////////////////////////////////////////////////// + // ///////////// API Implementation/////////////////// + // /////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + Account account = UserContext.current().getCaller(); + if (account != null) { + return account.getId(); + } + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public void execute() { + if(ids == null && type == null && olderThan == null) { + throw new InvalidParameterValueException("either ids, type or enddate must be specified"); + } + boolean result = _mgr.deleteEvents(this); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Unable to delete Events, one or more parameters has invalid values"); + } + } +} diff --git a/api/src/org/apache/cloudstack/api/command/user/firewall/CreatePortForwardingRuleCmd.java b/api/src/org/apache/cloudstack/api/command/user/firewall/CreatePortForwardingRuleCmd.java index 39ab812909d..40128526ce0 100644 --- a/api/src/org/apache/cloudstack/api/command/user/firewall/CreatePortForwardingRuleCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/firewall/CreatePortForwardingRuleCmd.java @@ -94,6 +94,9 @@ public class CreatePortForwardingRuleCmd extends BaseAsyncCreateCmd implements P description="The network of the vm the Port Forwarding rule will be created for. " + "Required when public Ip address is not associated with any Guest network yet (VPC case)") private Long networkId; + @Parameter(name = ApiConstants.VM_GUEST_IP, type = CommandType.STRING, required = false, + description = "VM guest nic Secondary ip address for the port forwarding rule") + private String vmSecondaryIp; // /////////////////////////////////////////////////// // ///////////////// Accessors /////////////////////// @@ -104,6 +107,13 @@ public class CreatePortForwardingRuleCmd extends BaseAsyncCreateCmd implements P return ipAddressId; } + public Ip getVmSecondaryIp() { + if (vmSecondaryIp == null) { + return null; + } + return new Ip(vmSecondaryIp); + } + @Override public String getProtocol() { return protocol.trim(); @@ -300,8 +310,15 @@ public class CreatePortForwardingRuleCmd extends BaseAsyncCreateCmd implements P throw new InvalidParameterValueException("Parameter cidrList is deprecated; if you need to open firewall rule for the specific cidr, please refer to createFirewallRule command"); } + Ip privateIp = getVmSecondaryIp(); + if (privateIp != null) { + if ( !privateIp.isIp4()) { + throw new InvalidParameterValueException("Invalid vm ip address"); + } + } + try { - PortForwardingRule result = _rulesService.createPortForwardingRule(this, virtualMachineId, getOpenFirewall()); + PortForwardingRule result = _rulesService.createPortForwardingRule(this, virtualMachineId, privateIp, getOpenFirewall()); setEntityId(result.getId()); setEntityUuid(result.getUuid()); } catch (NetworkRuleConflictException ex) { diff --git a/api/src/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java b/api/src/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java index 1824612ebb0..3219601156e 100644 --- a/api/src/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java @@ -52,7 +52,7 @@ public class ListIsosCmd extends BaseListTaggedResourcesCmd { private String hypervisor; @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType = TemplateResponse.class, - description="list all isos by id") + description="list ISO by id") private Long id; @Parameter(name=ApiConstants.IS_PUBLIC, type=CommandType.BOOLEAN, description="true if the ISO is publicly available to all users, false otherwise.") @@ -61,11 +61,14 @@ public class ListIsosCmd extends BaseListTaggedResourcesCmd { @Parameter(name=ApiConstants.IS_READY, type=CommandType.BOOLEAN, description="true if this ISO is ready to be deployed") private Boolean ready; - @Parameter(name=ApiConstants.ISO_FILTER, type=CommandType.STRING, description="possible values are \"featured\", \"self\", \"self-executable\",\"executable\", and \"community\". " + - "* featured-ISOs that are featured and are publicself-ISOs that have been registered/created by the owner. " + - "* selfexecutable-ISOs that have been registered/created by the owner that can be used to deploy a new VM. " + - "* executable-all ISOs that can be used to deploy a new VM " + - "* community-ISOs that are public.") + @Parameter(name=ApiConstants.ISO_FILTER, type=CommandType.STRING, description="possible values are \"featured\", \"self\", \"selfexecutable\",\"sharedexecutable\",\"executable\", and \"community\". " + + "* featured : templates that have been marked as featured and public. " + + "* self : templates that have been registered or created by the calling user. " + + "* selfexecutable : same as self, but only returns templates that can be used to deploy a new VM. " + + "* sharedexecutable : templates ready to be deployed that have been granted to the calling user by another user. " + + "* executable : templates that are owned by the calling user, or public templates, that can be used to deploy a VM. " + + "* community : templates that have been marked as public but not featured. " + + "* all : all templates (only usable by admins).") private String isoFilter = TemplateFilter.selfexecutable.toString(); @Parameter(name=ApiConstants.NAME, type=CommandType.STRING, description="list all isos by name") diff --git a/api/src/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java b/api/src/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java index a4a37c8fd58..284d5530846 100644 --- a/api/src/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java @@ -88,6 +88,10 @@ public class RegisterIsoCmd extends BaseCmd { @Parameter(name=ApiConstants.PROJECT_ID, type=CommandType.UUID, entityType = ProjectResponse.class, description="Register iso for the project") private Long projectId; + + @Parameter(name=ApiConstants.IMAGE_STORE_UUID, type=CommandType.STRING, + description="Image store uuid") + private String imageStoreUuid; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -140,6 +144,10 @@ public class RegisterIsoCmd extends BaseCmd { public String getChecksum() { return checksum; } + + public String getImageStoreUuid() { + return this.imageStoreUuid; + } ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// diff --git a/api/src/org/apache/cloudstack/api/command/user/nat/EnableStaticNatCmd.java b/api/src/org/apache/cloudstack/api/command/user/nat/EnableStaticNatCmd.java index ce6ea1663b9..a0ec68ef5dd 100644 --- a/api/src/org/apache/cloudstack/api/command/user/nat/EnableStaticNatCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/nat/EnableStaticNatCmd.java @@ -59,6 +59,9 @@ public class EnableStaticNatCmd extends BaseCmd{ description="The network of the vm the static nat will be enabled for." + " Required when public Ip address is not associated with any Guest network yet (VPC case)") private Long networkId; + @Parameter(name = ApiConstants.VM_GUEST_IP, type = CommandType.STRING, required = false, + description = "VM guest nic Secondary ip address for the port forwarding rule") + private String vmSecondaryIp; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -72,6 +75,13 @@ public class EnableStaticNatCmd extends BaseCmd{ return virtualMachineId; } + public String getVmSecondaryIp() { + if (vmSecondaryIp == null) { + return null; + } + return vmSecondaryIp; + } + public long getNetworkId() { IpAddress ip = _entityMgr.findById(IpAddress.class, getIpAddressId()); Long ntwkId = null; @@ -110,7 +120,7 @@ public class EnableStaticNatCmd extends BaseCmd{ @Override public void execute() throws ResourceUnavailableException{ try { - boolean result = _rulesService.enableStaticNat(ipAddressId, virtualMachineId, getNetworkId(), false); + boolean result = _rulesService.enableStaticNat(ipAddressId, virtualMachineId, getNetworkId(), false, getVmSecondaryIp()); if (result) { SuccessResponse response = new SuccessResponse(getCommandName()); this.setResponseObject(response); diff --git a/api/src/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java b/api/src/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java index 67774075774..41aaaaada12 100644 --- a/api/src/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java @@ -64,6 +64,9 @@ public class UpdateNetworkCmd extends BaseAsyncCmd { description="network offering ID") private Long networkOfferingId; + @Parameter(name=ApiConstants.GUEST_VM_CIDR, type=CommandType.STRING, description="CIDR for Guest VMs,Cloudstack allocates IPs to Guest VMs only from this CIDR") + private String guestVmCidr; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -94,6 +97,10 @@ public class UpdateNetworkCmd extends BaseAsyncCmd { } return false; } + + private String getGuestVmCidr() { + return guestVmCidr; + } ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -125,10 +132,10 @@ public class UpdateNetworkCmd extends BaseAsyncCmd { Network result = null; if (network.getVpcId() != null) { result = _vpcService.updateVpcGuestNetwork(getId(), getNetworkName(), getDisplayText(), callerAccount, - callerUser, getNetworkDomain(), getNetworkOfferingId(), getChangeCidr()); + callerUser, getNetworkDomain(), getNetworkOfferingId(), getChangeCidr(), getGuestVmCidr()); } else { result = _networkService.updateGuestNetwork(getId(), getNetworkName(), getDisplayText(), callerAccount, - callerUser, getNetworkDomain(), getNetworkOfferingId(), getChangeCidr()); + callerUser, getNetworkDomain(), getNetworkOfferingId(), getChangeCidr(), getGuestVmCidr()); } if (result != null) { diff --git a/api/src/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java b/api/src/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java index 84fa197d12c..ba1f924fe02 100644 --- a/api/src/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java @@ -240,7 +240,7 @@ import com.cloud.user.UserContext; @Override public void create() throws ResourceAllocationException { VirtualMachineTemplate template = null; - template = _userVmService.createPrivateTemplateRecord(this, _accountService.getAccount(getEntityOwnerId())); + template = this._templateService.createPrivateTemplateRecord(this, _accountService.getAccount(getEntityOwnerId())); if (template != null) { this.setEntityId(template.getId()); this.setEntityUuid(template.getUuid()); @@ -255,7 +255,7 @@ import com.cloud.user.UserContext; public void execute() { UserContext.current().setEventDetails("Template Id: "+getEntityId()+((getSnapshotId() == null) ? " from volume Id: " + getVolumeId() : " from snapshot Id: " + getSnapshotId())); VirtualMachineTemplate template = null; - template = _userVmService.createPrivateTemplate(this); + template = this._templateService.createPrivateTemplate(this); if (template != null){ List templateResponses; diff --git a/api/src/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java b/api/src/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java index 3e98ca624ab..c9da0c28cd6 100644 --- a/api/src/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java @@ -110,7 +110,11 @@ public class RegisterTemplateCmd extends BaseCmd { @Parameter(name=ApiConstants.PROJECT_ID, type=CommandType.UUID, entityType = ProjectResponse.class, description="Register template for the project") private Long projectId; - + + @Parameter(name=ApiConstants.IMAGE_STORE_UUID, type=CommandType.STRING, + description="Image store uuid") + private String imageStoreUuid; + @Parameter(name=ApiConstants.DETAILS, type=CommandType.MAP, description="Template details in key/value pairs.") protected Map details; @@ -189,6 +193,10 @@ public class RegisterTemplateCmd extends BaseCmd { public String getTemplateTag() { return templateTag; } + + public String getImageStoreUuid() { + return this.imageStoreUuid; + } public Map getDetails() { if (details == null || details.isEmpty()) { diff --git a/api/src/org/apache/cloudstack/api/command/user/vm/AddIpToVmNicCmd.java b/api/src/org/apache/cloudstack/api/command/user/vm/AddIpToVmNicCmd.java new file mode 100644 index 00000000000..df6b3999dba --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/user/vm/AddIpToVmNicCmd.java @@ -0,0 +1,197 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.vm; + +import org.apache.log4j.Logger; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.NicResponse; +import org.apache.cloudstack.api.response.NicSecondaryIpResponse; + +import com.cloud.async.AsyncJob; +import com.cloud.dc.DataCenter; +import com.cloud.dc.DataCenter.NetworkType; +import com.cloud.event.EventTypes; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientAddressCapacityException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.Network; +import com.cloud.user.Account; +import com.cloud.user.UserContext; +import com.cloud.utils.net.NetUtils; +import com.cloud.vm.Nic; + +@APICommand(name = "addIpToNic", description = "Assigns secondary IP to NIC", responseObject = NicSecondaryIpResponse.class) +public class AddIpToVmNicCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(AddIpToVmNicCmd.class.getName()); + private static final String s_name = "addiptovmnicresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name=ApiConstants.NIC_ID, type=CommandType.UUID, entityType = NicResponse.class, required = true, + description="the ID of the nic to which you want to assign private IP") + private Long nicId; + + @Parameter(name = ApiConstants.IP_ADDRESS, type = CommandType.STRING, required = false, + description = "Secondary IP Address") + private String ipAddr; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public String getEntityTable() { + return "nic_secondary_ips"; + } + + public String getAccountName() { + return UserContext.current().getCaller().getAccountName(); + } + + public long getDomainId() { + return UserContext.current().getCaller().getDomainId(); + } + + private long getZoneId() { + Network ntwk = _entityMgr.findById(Network.class, getNetworkId()); + if (ntwk == null) { + throw new InvalidParameterValueException("Can't find zone id for specified"); + } + return ntwk.getDataCenterId(); + } + + public Long getNetworkId() { + Nic nic = _entityMgr.findById(Nic.class, nicId); + if (nic == null) { + throw new InvalidParameterValueException("Can't find network id for specified nic"); + } + Long networkId = nic.getNetworkId(); + return networkId; + } + + public Long getNicId() { + return nicId; + } + + public String getIpaddress () { + if (ipAddr != null) { + return ipAddr; + } else { + return null; + } + } + + public NetworkType getNetworkType() { + Network ntwk = _entityMgr.findById(Network.class, getNetworkId()); + DataCenter dc = _entityMgr.findById(DataCenter.class, ntwk.getDataCenterId()); + return dc.getNetworkType(); + } + + @Override + public long getEntityOwnerId() { + Account caller = UserContext.current().getCaller(); + return caller.getAccountId(); + } + + @Override + public String getEventType() { + return EventTypes.EVENT_NET_IP_ASSIGN; + } + + @Override + public String getEventDescription() { + return "associating ip to nic id: " + getNetworkId() + " in zone " + getZoneId(); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + + @Override + public String getCommandName() { + return s_name; + } + + public static String getResultObjectName() { + return "addressinfo"; + } + + @Override + public void execute() throws ResourceUnavailableException, ResourceAllocationException, + ConcurrentOperationException, InsufficientCapacityException { + + UserContext.current().setEventDetails("Nic Id: " + getNicId() ); + String ip; + String secondaryIp = null; + if ((ip = getIpaddress()) != null) { + if (!NetUtils.isValidIp(ip)) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Invalid ip address " + ip); + } + } + + try { + secondaryIp = _networkService.allocateSecondaryGuestIP(_accountService.getAccount(getEntityOwnerId()), getZoneId(), getNicId(), getNetworkId(), getIpaddress()); + } catch (InsufficientAddressCapacityException e) { + throw new InvalidParameterValueException("Allocating guest ip for nic failed"); + } + + if (secondaryIp != null) { + if (getNetworkType() == NetworkType.Basic) { + // add security group rules for the secondary ip addresses + boolean success = false; + success = _securityGroupService.securityGroupRulesForVmSecIp(getNicId(), getNetworkId(), secondaryIp, (boolean) true); + if (success == false) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to set security group rules for the secondary ip"); + } + } + + s_logger.info("Associated ip address to NIC : " + secondaryIp); + NicSecondaryIpResponse response = new NicSecondaryIpResponse(); + response = _responseGenerator.createSecondaryIPToNicResponse(secondaryIp, getNicId(), getNetworkId()); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to assign secondary ip to nic"); + } + } + + @Override + public String getSyncObjType() { + return BaseAsyncCmd.networkSyncObject; + } + + @Override + public Long getSyncObjId() { + return getNetworkId(); + } + + @Override + public AsyncJob.Type getInstanceType() { + return AsyncJob.Type.IpAddress; + } + +} diff --git a/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java b/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java old mode 100644 new mode 100755 index 70a263d06d2..21a45f8cc7f --- a/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java @@ -24,6 +24,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -53,7 +54,6 @@ import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.network.IpAddress; import com.cloud.network.Network; import com.cloud.network.Network.IpAddresses; import com.cloud.offering.DiskOffering; @@ -103,7 +103,7 @@ public class DeployVMCmd extends BaseAsyncCreateCmd { private Long domainId; //Network information - @ACL + @ACL(accessType = AccessType.UseNetwork) @Parameter(name=ApiConstants.NETWORK_IDS, type=CommandType.LIST, collectionType=CommandType.UUID, entityType=NetworkResponse.class, description="list of network ids used by virtual machine. Can't be specified with ipToNetworkList parameter") private List networkIds; @@ -306,9 +306,6 @@ public class DeployVMCmd extends BaseAsyncCreateCmd { if (requestedIpv6 != null) { requestedIpv6 = requestedIpv6.toLowerCase(); } - if (requestedIpv6 != null) { - throw new InvalidParameterValueException("Cannot support specified IPv6 address!"); - } IpAddresses addrs = new IpAddresses(requestedIp, requestedIpv6); ipToNetworkMap.put(networkId, addrs); } @@ -388,7 +385,7 @@ public class DeployVMCmd extends BaseAsyncCreateCmd { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (InsufficientCapacityException ex) { s_logger.info(ex); - s_logger.trace(ex); + s_logger.info(ex.getMessage(), ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ex.getMessage()); } } else { @@ -407,10 +404,6 @@ public class DeployVMCmd extends BaseAsyncCreateCmd { @Override public void create() throws ResourceAllocationException{ try { - if (getIp6Address() != null) { - throw new InvalidParameterValueException("Cannot support specified IPv6 address!"); - } - //Verify that all objects exist before passing them to the service Account owner = _accountService.getActiveAccountById(getEntityOwnerId()); @@ -477,7 +470,7 @@ public class DeployVMCmd extends BaseAsyncCreateCmd { } } catch (InsufficientCapacityException ex) { s_logger.info(ex); - s_logger.trace(ex); + s_logger.trace(ex.getMessage(), ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ex.getMessage()); } catch (ResourceUnavailableException ex) { s_logger.warn("Exception: ", ex); diff --git a/api/src/org/apache/cloudstack/api/command/user/vm/ListNicsCmd.java b/api/src/org/apache/cloudstack/api/command/user/vm/ListNicsCmd.java new file mode 100644 index 00000000000..9af044ebb70 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/user/vm/ListNicsCmd.java @@ -0,0 +1,133 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.vm; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.log4j.Logger; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.NicResponse; +import org.apache.cloudstack.api.response.UserVmResponse; + +import com.cloud.async.AsyncJob; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import com.cloud.user.UserContext; +import com.cloud.vm.Nic; +import com.cloud.vm.NicSecondaryIp; + +@APICommand(name = "listNics", description = "list the vm nics IP to NIC", responseObject = NicResponse.class) +public class ListNicsCmd extends BaseListCmd { + public static final Logger s_logger = Logger.getLogger(ListNicsCmd.class.getName()); + private static final String s_name = "listnics"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name=ApiConstants.NIC_ID, type=CommandType.UUID, entityType = NicResponse.class, required = false, + description="the ID of the nic to to list IPs") + private Long nicId; + + @Parameter(name=ApiConstants.VIRTUAL_MACHINE_ID, type=CommandType.UUID, entityType = UserVmResponse.class, required = true, + description="the ID of the vm") + private Long vmId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public String getEntityTable() { + return "nics"; + } + + public String getAccountName() { + return UserContext.current().getCaller().getAccountName(); + } + + public long getDomainId() { + return UserContext.current().getCaller().getDomainId(); + } + + public Long getNicId() { + return nicId; + } + + public Long getVmId() { + return vmId; + } + + @Override + public long getEntityOwnerId() { + Account caller = UserContext.current().getCaller(); + return caller.getAccountId(); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + + @Override + public String getCommandName() { + return s_name; + } + + public static String getResultObjectName() { + return "addressinfo"; + } + + @Override + public void execute() throws ResourceUnavailableException, ResourceAllocationException, + ConcurrentOperationException, InsufficientCapacityException { + + try { + List results = _networkService.listNics(this); + ListResponse response = new ListResponse(); + List resList = new ArrayList(results.size()); + for (Nic r : results) { + NicResponse resp = _responseGenerator.createNicResponse(r); + resp.setObjectName("nic"); + resList.add(resp); + } + response.setResponses(resList); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + + } catch (Exception e) { + s_logger.warn("Failed to list secondary ip address per nic "); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); + } + } + + @Override + public AsyncJob.Type getInstanceType() { + return AsyncJob.Type.IpAddress; + } + +} diff --git a/api/src/org/apache/cloudstack/api/command/user/vm/RemoveIpFromVmNicCmd.java b/api/src/org/apache/cloudstack/api/command/user/vm/RemoveIpFromVmNicCmd.java new file mode 100644 index 00000000000..21a9a0c96b4 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/user/vm/RemoveIpFromVmNicCmd.java @@ -0,0 +1,172 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.vm; + +import org.apache.log4j.Logger; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.NicSecondaryIpResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import com.cloud.async.AsyncJob; +import com.cloud.dc.DataCenter; +import com.cloud.dc.DataCenter.NetworkType; +import com.cloud.event.EventTypes; +import com.cloud.exception.InsufficientAddressCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.network.Network; +import com.cloud.user.Account; +import com.cloud.user.UserContext; +import com.cloud.vm.Nic; +import com.cloud.vm.NicSecondaryIp; + +@APICommand(name = "removeIpFromNic", description="Assigns secondary IP to NIC.", responseObject=SuccessResponse.class) +public class RemoveIpFromVmNicCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(RemoveIpFromVmNicCmd.class.getName()); + private static final String s_name = "removeipfromnicresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name=ApiConstants.ID, type=CommandType.UUID, required = true, entityType = NicSecondaryIpResponse.class, + description="the ID of the secondary ip address to nic") + private Long id; + + // unexposed parameter needed for events logging + @Parameter(name=ApiConstants.ACCOUNT_ID, type=CommandType.UUID, expose=false) + private Long ownerId; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public String getEntityTable() { + return "nic_secondary_ips"; + } + + public Long getIpAddressId() { + return id; + } + + public String getAccountName() { + return UserContext.current().getCaller().getAccountName(); + } + + public long getDomainId() { + return UserContext.current().getCaller().getDomainId(); + } + + @Override + public long getEntityOwnerId() { + Account caller = UserContext.current().getCaller(); + return caller.getAccountId(); + } + + @Override + public String getEventType() { + return EventTypes.EVENT_NET_IP_ASSIGN; + } + + public NicSecondaryIp getIpEntry() { + NicSecondaryIp nicSecIp = _entityMgr.findById(NicSecondaryIp.class, getIpAddressId()); + return nicSecIp; + } + + @Override + public String getEventDescription() { + return ("Disassociating ip address with id=" + id); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + public static String getResultObjectName() { + return "addressinfo"; + } + + public Long getNetworkId() { + NicSecondaryIp nicSecIp = _entityMgr.findById(NicSecondaryIp.class, getIpAddressId()); + if (nicSecIp != null) { + Long networkId = nicSecIp.getNetworkId(); + return networkId; + } else { + return null; + } + } + + public NetworkType getNetworkType() { + Network ntwk = _entityMgr.findById(Network.class, getNetworkId()); + if (ntwk != null) { + DataCenter dc = _entityMgr.findById(DataCenter.class, ntwk.getDataCenterId()); + return dc.getNetworkType(); + } + return null; + } + + @Override + public void execute() throws InvalidParameterValueException { + UserContext.current().setEventDetails("Ip Id: " + id); + NicSecondaryIp nicSecIp = getIpEntry(); + + if (nicSecIp == null) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Invalid IP id is passed"); + } + + if (getNetworkType() == NetworkType.Basic) { + //remove the security group rules for this secondary ip + boolean success = false; + success = _securityGroupService.securityGroupRulesForVmSecIp(nicSecIp.getNicId(), nicSecIp.getNetworkId(),nicSecIp.getIp4Address(), false); + if (success == false) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to set security group rules for the secondary ip"); + } + } + + try { + boolean result = _networkService.releaseSecondaryIpFromNic(id); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to remove secondary ip address for the nic"); + } + } catch (InvalidParameterValueException e) { + throw new InvalidParameterValueException("Removing guest ip from nic failed"); + } + } + + @Override + public String getSyncObjType() { + return BaseAsyncCmd.networkSyncObject; + } + + @Override + public AsyncJob.Type getInstanceType() { + return AsyncJob.Type.IpAddress; + } + +} diff --git a/api/src/org/apache/cloudstack/api/command/user/vm/RestoreVMCmd.java b/api/src/org/apache/cloudstack/api/command/user/vm/RestoreVMCmd.java index e98c2f2eddc..9c33f97c317 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vm/RestoreVMCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vm/RestoreVMCmd.java @@ -22,6 +22,7 @@ import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseAsyncCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.TemplateResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.log4j.Logger; @@ -34,7 +35,7 @@ import com.cloud.user.Account; import com.cloud.user.UserContext; import com.cloud.uservm.UserVm; -@APICommand(name = "restoreVirtualMachine", description="Restore a VM to original template or specific snapshot", responseObject=UserVmResponse.class, since="3.0.0") +@APICommand(name = "restoreVirtualMachine", description="Restore a VM to original template or new template", responseObject=UserVmResponse.class, since="3.0.0") public class RestoreVMCmd extends BaseAsyncCmd { public static final Logger s_logger = Logger.getLogger(RestoreVMCmd.class); private static final String s_name = "restorevmresponse"; @@ -43,6 +44,9 @@ public class RestoreVMCmd extends BaseAsyncCmd { required=true, description="Virtual Machine ID") private Long vmId; + @Parameter(name=ApiConstants.TEMPLATE_ID, type=CommandType.UUID, entityType = TemplateResponse.class, description="an optional template Id to restore vm from the new template") + private Long templateId; + @Override public String getEventType() { return EventTypes.EVENT_VM_RESTORE; @@ -85,4 +89,8 @@ public class RestoreVMCmd extends BaseAsyncCmd { public long getVmId() { return vmId; } + + public Long getTemplateId() { + return templateId; + } } diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/AttachVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/AttachVolumeCmd.java index 4d82534c2b2..e577e35795e 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/AttachVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/AttachVolumeCmd.java @@ -119,7 +119,7 @@ public class AttachVolumeCmd extends BaseAsyncCmd { @Override public void execute(){ UserContext.current().setEventDetails("Volume Id: "+getId()+" VmId: "+getVirtualMachineId()); - Volume result = _userVmService.attachVolumeToVM(this); + Volume result = _volumeService.attachVolumeToVM(this); if (result != null) { VolumeResponse response = _responseGenerator.createVolumeResponse(result); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java index 2f77862b3b9..5db06bcd47f 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java @@ -153,7 +153,7 @@ public class CreateVolumeCmd extends BaseAsyncCreateCmd { @Override public void create() throws ResourceAllocationException{ - Volume volume = _storageService.allocVolume(this); + Volume volume = this._volumeService.allocVolume(this); if (volume != null) { this.setEntityId(volume.getId()); this.setEntityUuid(volume.getUuid()); @@ -165,7 +165,7 @@ public class CreateVolumeCmd extends BaseAsyncCreateCmd { @Override public void execute(){ UserContext.current().setEventDetails("Volume Id: "+getEntityId()+((getSnapshotId() == null) ? "" : " from snapshot: " + getSnapshotId())); - Volume volume = _storageService.createVolume(this); + Volume volume = _volumeService.createVolume(this); if (volume != null) { VolumeResponse response = _responseGenerator.createVolumeResponse(volume); //FIXME - have to be moved to ApiResponseHelper diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/DeleteVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/DeleteVolumeCmd.java index 39c3de3fac9..394b0092123 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/DeleteVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/DeleteVolumeCmd.java @@ -80,7 +80,7 @@ public class DeleteVolumeCmd extends BaseCmd { @Override public void execute() throws ConcurrentOperationException { UserContext.current().setEventDetails("Volume Id: "+getId()); - boolean result = _storageService.deleteVolume(id, UserContext.current().getCaller()); + boolean result = this._volumeService.deleteVolume(id, UserContext.current().getCaller()); if (result) { SuccessResponse response = new SuccessResponse(getCommandName()); this.setResponseObject(response); diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/DetachVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/DetachVolumeCmd.java index 6153e17448b..9a5929eccca 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/DetachVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/DetachVolumeCmd.java @@ -130,7 +130,7 @@ public class DetachVolumeCmd extends BaseAsyncCmd { @Override public void execute(){ UserContext.current().setEventDetails("Volume Id: "+getId()+" VmId: "+getVirtualMachineId()); - Volume result = _userVmService.detachVolumeFromVM(this); + Volume result = _volumeService.detachVolumeFromVM(this); if (result != null){ VolumeResponse response = _responseGenerator.createVolumeResponse(result); response.setResponseName("volume"); diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java index d43ad5500e1..287241a8d90 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java @@ -92,7 +92,7 @@ public class MigrateVolumeCmd extends BaseAsyncCmd { public void execute(){ Volume result; try { - result = _storageService.migrateVolume(getVolumeId(), getStoragePoolId()); + result = _volumeService.migrateVolume(this); if (result != null) { VolumeResponse response = _responseGenerator.createVolumeResponse(result); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java index 52863444507..955727a7d82 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/ResizeVolumeCmd.java @@ -133,7 +133,7 @@ public class ResizeVolumeCmd extends BaseAsyncCmd { @Override public void execute(){ UserContext.current().setEventDetails("Volume Id: " + getEntityId() + " to size " + getSize() + "G"); - Volume volume = _storageService.resizeVolume(this); + Volume volume = _volumeService.resizeVolume(this); if (volume != null) { VolumeResponse response = _responseGenerator.createVolumeResponse(volume); //FIXME - have to be moved to ApiResponseHelper diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java index 107d938b106..3b00ba0d4bb 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java @@ -67,6 +67,10 @@ public class UploadVolumeCmd extends BaseAsyncCmd { @Parameter(name=ApiConstants.CHECKSUM, type=CommandType.STRING, description="the MD5 checksum value of this volume") private String checksum; + + @Parameter(name=ApiConstants.IMAGE_STORE_UUID, type=CommandType.STRING, + description="Image store uuid") + private String imageStoreUuid; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -99,6 +103,10 @@ public class UploadVolumeCmd extends BaseAsyncCmd { public String getChecksum() { return checksum; } + + public String getImageStoreUuid() { + return this.imageStoreUuid; + } ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// @@ -110,7 +118,7 @@ public class UploadVolumeCmd extends BaseAsyncCmd { ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { - Volume volume = _storageService.uploadVolume(this); + Volume volume = _volumeService.uploadVolume(this); if (volume != null){ VolumeResponse response = _responseGenerator.createVolumeResponse(volume); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/user/vpn/CreateVpnCustomerGatewayCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpn/CreateVpnCustomerGatewayCmd.java index 71d134bb149..38b40b54c5f 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpn/CreateVpnCustomerGatewayCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpn/CreateVpnCustomerGatewayCmd.java @@ -34,7 +34,7 @@ import com.cloud.user.UserContext; public class CreateVpnCustomerGatewayCmd extends BaseAsyncCmd { public static final Logger s_logger = Logger.getLogger(CreateVpnCustomerGatewayCmd.class.getName()); - private static final String s_name = "createcustomergatewayresponse"; + private static final String s_name = "createvpncustomergatewayresponse"; ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/org/apache/cloudstack/api/command/user/zone/ListZonesByCmd.java b/api/src/org/apache/cloudstack/api/command/user/zone/ListZonesByCmd.java index 97fe2ffeb90..bbfb598b1db 100644 --- a/api/src/org/apache/cloudstack/api/command/user/zone/ListZonesByCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/zone/ListZonesByCmd.java @@ -53,6 +53,9 @@ public class ListZonesByCmd extends BaseListCmd { description="the ID of the domain associated with the zone") private Long domainId; + @Parameter(name=ApiConstants.NAME, type=CommandType.STRING, description="the name of the zone") + private String name; + @Parameter(name=ApiConstants.SHOW_CAPACITIES, type=CommandType.BOOLEAN, description="flag to display the capacity of the zones") private Boolean showCapacities; @@ -72,6 +75,10 @@ public class ListZonesByCmd extends BaseListCmd { return domainId; } + public String getName(){ + return name; + } + public Boolean getShowCapacities() { return showCapacities; } diff --git a/api/src/org/apache/cloudstack/api/response/AddIpToVmNicResponse.java b/api/src/org/apache/cloudstack/api/response/AddIpToVmNicResponse.java new file mode 100644 index 00000000000..9af20b23871 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/response/AddIpToVmNicResponse.java @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; +import java.util.Date; +import java.util.List; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +@SuppressWarnings("unused") +public class AddIpToVmNicResponse extends BaseResponse { + @SerializedName(ApiConstants.ID) @Param(description="the ID of the secondary private IP addr") + private Long id; + + @SerializedName(ApiConstants.IP_ADDRESS) @Param(description="Secondary IP address") + private String ipAddr; + + @SerializedName(ApiConstants.NIC_ID) @Param(description="the ID of the nic") + private Long nicId; + + @SerializedName(ApiConstants.NETWORK_ID) @Param(description="the ID of the network") + private Long nwId; + + @SerializedName(ApiConstants.VIRTUAL_MACHINE_ID) @Param(description="the ID of the vm") + private Long vmId; + + public Long getId() { + return id; + } + + public String getIpAddr() { + return ipAddr; + } + + public void setIpAddr(String ipAddr) { + this.ipAddr = ipAddr; + } + + public Long getNicId() { + return nicId; + } + + public void setNicId(Long nicId) { + this.nicId = nicId; + } + + public Long getNwId() { + return nwId; + } + + public void setNwId(Long nwId) { + this.nwId = nwId; + } + + public Long getVmId() { + return vmId; + } + + public void setVmId(Long vmId) { + this.vmId = vmId; + } + + public Long setId(Long id) { + return id; + } + + +} diff --git a/api/src/org/apache/cloudstack/api/response/ClusterResponse.java b/api/src/org/apache/cloudstack/api/response/ClusterResponse.java index 551e530cc38..a90acde6145 100644 --- a/api/src/org/apache/cloudstack/api/response/ClusterResponse.java +++ b/api/src/org/apache/cloudstack/api/response/ClusterResponse.java @@ -62,6 +62,12 @@ public class ClusterResponse extends BaseResponse { @SerializedName("capacity") @Param(description="the capacity of the Cluster", responseObject = CapacityResponse.class) private List capacitites; + @SerializedName("cpuovercommitratio") @Param(description = "The cpu overcommit ratio of the cluster") + private String cpuovercommitratio; + + @SerializedName("memoryovercommitratio") @Param (description = "The ram overcommit ratio of the cluster") + private String memoryovercommitratio; + public String getId() { return id; } @@ -149,4 +155,18 @@ public class ClusterResponse extends BaseResponse { public void setCapacitites(ArrayList arrayList) { this.capacitites = arrayList; } + public void setCpuovercommitratio(String cpuovercommitratio){ + this.cpuovercommitratio= cpuovercommitratio; + } + public void setRamovercommitratio (String memoryOvercommitRatio){ + this.memoryovercommitratio= memoryOvercommitRatio; + } + + public String getCpuovercommitratio (){ + return cpuovercommitratio; + } + + public String getRamovercommitratio (){ + return memoryovercommitratio; + } } diff --git a/api/src/org/apache/cloudstack/api/response/DomainRouterResponse.java b/api/src/org/apache/cloudstack/api/response/DomainRouterResponse.java index c9aa19755e4..274e7a5becb 100644 --- a/api/src/org/apache/cloudstack/api/response/DomainRouterResponse.java +++ b/api/src/org/apache/cloudstack/api/response/DomainRouterResponse.java @@ -47,6 +47,12 @@ public class DomainRouterResponse extends BaseResponse implements ControlledView @SerializedName(ApiConstants.DNS2) @Param(description="the second DNS for the router") private String dns2; + @SerializedName(ApiConstants.IP6_DNS1) @Param(description="the first IPv6 DNS for the router") + private String ip6Dns1; + + @SerializedName(ApiConstants.IP6_DNS2) @Param(description="the second IPv6 DNS for the router") + private String ip6Dns2; + @SerializedName("networkdomain") @Param(description="the network domain for the router") private String networkDomain; @@ -338,4 +344,20 @@ public class DomainRouterResponse extends BaseResponse implements ControlledView public void addNic(NicResponse nic) { this.nics.add(nic); } + + public String getIp6Dns1() { + return ip6Dns1; + } + + public void setIp6Dns1(String ip6Dns1) { + this.ip6Dns1 = ip6Dns1; + } + + public String getIp6Dns2() { + return ip6Dns2; + } + + public void setIp6Dns2(String ip6Dns2) { + this.ip6Dns2 = ip6Dns2; + } } diff --git a/api/src/org/apache/cloudstack/api/response/IPAddressResponse.java b/api/src/org/apache/cloudstack/api/response/IPAddressResponse.java index 251b2dd09e8..cede84f931e 100644 --- a/api/src/org/apache/cloudstack/api/response/IPAddressResponse.java +++ b/api/src/org/apache/cloudstack/api/response/IPAddressResponse.java @@ -82,6 +82,10 @@ public class IPAddressResponse extends BaseResponse implements ControlledEntityR @SerializedName(ApiConstants.VIRTUAL_MACHINE_ID) @Param(description="virutal machine id the ip address is assigned to (not null only for static nat Ip)") private String virtualMachineId; + @SerializedName("vmipaddress") @Param(description="virutal machine (dnat) ip address (not null only for static nat Ip)") + private String virtualMachineIp; + + @SerializedName("virtualmachinename") @Param(description="virutal machine name the ip address is assigned to (not null only for static nat Ip)") private String virtualMachineName; @@ -185,6 +189,10 @@ public class IPAddressResponse extends BaseResponse implements ControlledEntityR this.virtualMachineId = virtualMachineId; } + public void setVirtualMachineIp(String virtualMachineIp) { + this.virtualMachineIp = virtualMachineIp; + } + public void setVirtualMachineName(String virtualMachineName) { this.virtualMachineName = virtualMachineName; } diff --git a/api/src/org/apache/cloudstack/api/response/LDAPConfigResponse.java b/api/src/org/apache/cloudstack/api/response/LDAPConfigResponse.java index aa10229f2bd..bbeec630d81 100644 --- a/api/src/org/apache/cloudstack/api/response/LDAPConfigResponse.java +++ b/api/src/org/apache/cloudstack/api/response/LDAPConfigResponse.java @@ -30,7 +30,7 @@ public class LDAPConfigResponse extends BaseResponse { @SerializedName(ApiConstants.PORT) @Param(description="Specify the LDAP port if required, default is 389") private String port; - @SerializedName(ApiConstants.PORT) @Param(description="Check Use SSL if the external LDAP server is configured for LDAP over SSL") + @SerializedName(ApiConstants.USE_SSL) @Param(description="Check Use SSL if the external LDAP server is configured for LDAP over SSL") private String useSSL; @SerializedName(ApiConstants.SEARCH_BASE) @Param(description="The search base defines the starting point for the search in the directory tree Example: dc=cloud,dc=com") diff --git a/api/src/org/apache/cloudstack/api/response/NetworkResponse.java b/api/src/org/apache/cloudstack/api/response/NetworkResponse.java index 7b29efbf4d9..cd32dede3c8 100644 --- a/api/src/org/apache/cloudstack/api/response/NetworkResponse.java +++ b/api/src/org/apache/cloudstack/api/response/NetworkResponse.java @@ -52,9 +52,15 @@ public class NetworkResponse extends BaseResponse implements ControlledEntityRes @SerializedName(ApiConstants.NETMASK) @Param(description="the network's netmask") private String netmask; - @SerializedName(ApiConstants.CIDR) @Param(description="the cidr the network") + @SerializedName(ApiConstants.CIDR) @Param(description="Cloudstack managed address space, all CloudStack managed VMs get IP address from CIDR") private String cidr; + @SerializedName(ApiConstants.NETWORK_CIDR) @Param(description="the network CIDR of the guest network configured with IP reservation. It is the summation of CIDR and RESERVED_IP_RANGE") + private String networkCidr; + + @SerializedName(ApiConstants.RESERVED_IP_RANGE) @Param(description="the network's IP range not to be used by CloudStack guest VMs and can be used for non CloudStack purposes") + private String reservedIpRange; + @SerializedName(ApiConstants.ZONE_ID) @Param(description="zone id of the network") private String zoneId; @@ -289,6 +295,14 @@ public class NetworkResponse extends BaseResponse implements ControlledEntityRes this.cidr = cidr; } + public void setNetworkCidr(String networkCidr) { + this.networkCidr = networkCidr; + } + + public void setReservedIpRange(String reservedIpRange) { + this.reservedIpRange = reservedIpRange; + } + public void setRestartRequired(Boolean restartRequired) { this.restartRequired = restartRequired; } diff --git a/api/src/org/apache/cloudstack/api/response/NicResponse.java b/api/src/org/apache/cloudstack/api/response/NicResponse.java index a7d1a0d068e..a1ceaf63798 100644 --- a/api/src/org/apache/cloudstack/api/response/NicResponse.java +++ b/api/src/org/apache/cloudstack/api/response/NicResponse.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.api.response; +import java.util.List; + import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseResponse; @@ -75,7 +77,10 @@ public class NicResponse extends BaseResponse { @SerializedName(ApiConstants.IP6_ADDRESS) @Param(description="the IPv6 address of network") private String ip6Address; - + + @SerializedName("secondaryip") @Param(description="the Secondary ipv4 addr of nic") + private List secondaryIps; + public String getId() { return id; } @@ -167,4 +172,9 @@ public class NicResponse extends BaseResponse { return false; return true; } + + public void setSecondaryIps(List ipList) { + this.secondaryIps = ipList; + } + } diff --git a/api/src/org/apache/cloudstack/api/response/NicSecondaryIpResponse.java b/api/src/org/apache/cloudstack/api/response/NicSecondaryIpResponse.java new file mode 100644 index 00000000000..695468faef2 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/response/NicSecondaryIpResponse.java @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; +import java.util.Date; +import java.util.List; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + +import com.cloud.network.rules.FirewallRule; +import com.cloud.serializer.Param; +import com.cloud.vm.NicSecondaryIp; +import com.google.gson.annotations.SerializedName; + +@EntityReference(value=NicSecondaryIp.class) +@SuppressWarnings("unused") +public class NicSecondaryIpResponse extends BaseResponse { + + @SerializedName(ApiConstants.ID) @Param(description="the ID of the secondary private IP addr") + private String id; + + @SerializedName(ApiConstants.IP_ADDRESS) @Param(description="Secondary IP address") + private String ipAddr; + + @SerializedName(ApiConstants.NIC_ID) @Param(description="the ID of the nic") + private String nicId; + + @SerializedName(ApiConstants.NETWORK_ID) @Param(description="the ID of the network") + private String nwId; + + @SerializedName(ApiConstants.VIRTUAL_MACHINE_ID) @Param(description="the ID of the vm") + private String vmId; + + @Override + public String getObjectId() { + return this.getId(); + } + + public String getId() { + return id; + } + + public String getIpAddr() { + return ipAddr; + } + + public void setIpAddr(String ipAddr) { + this.ipAddr = ipAddr; + } + + public String getNicId() { + return nicId; + } + + public void setNicId(String string) { + this.nicId = string; + } + + public String getNwId() { + return nwId; + } + + public void setNwId(String nwId) { + this.nwId = nwId; + } + + public String getVmId() { + return vmId; + } + + public void setVmId(String vmId) { + this.vmId = vmId; + } + + public void setId(String id) { + this.id = id; + } + + +} diff --git a/api/src/org/apache/cloudstack/api/response/ZoneResponse.java b/api/src/org/apache/cloudstack/api/response/ZoneResponse.java index ab99e2d1e5f..2ebb15a1ecf 100644 --- a/api/src/org/apache/cloudstack/api/response/ZoneResponse.java +++ b/api/src/org/apache/cloudstack/api/response/ZoneResponse.java @@ -44,6 +44,12 @@ public class ZoneResponse extends BaseResponse { @SerializedName(ApiConstants.DNS2) @Param(description="the second DNS for the Zone") private String dns2; + @SerializedName(ApiConstants.IP6_DNS1) @Param(description="the first IPv6 DNS for the Zone") + private String ip6Dns1; + + @SerializedName(ApiConstants.IP6_DNS2) @Param(description="the second IPv6 DNS for the Zone") + private String ip6Dns2; + @SerializedName(ApiConstants.INTERNAL_DNS1) @Param(description="the first internal DNS for the Zone") private String internalDns1; @@ -176,4 +182,20 @@ public class ZoneResponse extends BaseResponse { public void setLocalStorageEnabled(boolean localStorageEnabled) { this.localStorageEnabled = localStorageEnabled; } + + public String getIp6Dns1() { + return ip6Dns1; + } + + public void setIp6Dns1(String ip6Dns1) { + this.ip6Dns1 = ip6Dns1; + } + + public String getIp6Dns2() { + return ip6Dns2; + } + + public void setIp6Dns2(String ip6Dns2) { + this.ip6Dns2 = ip6Dns2; + } } diff --git a/api/src/org/apache/cloudstack/query/QueryService.java b/api/src/org/apache/cloudstack/query/QueryService.java index bfe7b855c81..c3f86aabb7f 100644 --- a/api/src/org/apache/cloudstack/query/QueryService.java +++ b/api/src/org/apache/cloudstack/query/QueryService.java @@ -58,7 +58,6 @@ import com.cloud.exception.PermissionDeniedException; /** * Service used for list api query. - * @author minc * */ public interface QueryService { diff --git a/api/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java b/api/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java index 7836b6d6e8e..71004977d89 100644 --- a/api/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java +++ b/api/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java @@ -133,15 +133,15 @@ public class BackupSnapshotCommandTest { } @Override - public String getStorageProvider() { + public Long getStorageProviderId() { // TODO Auto-generated method stub return null; } @Override - public String getStorageType() { + public boolean isInMaintenance() { // TODO Auto-generated method stub - return null; + return false; }; }; diff --git a/api/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java b/api/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java index 3545d0f1c29..767d7c37c5e 100644 --- a/api/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java +++ b/api/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java @@ -115,16 +115,16 @@ public class SnapshotCommandTest { } @Override - public String getStorageProvider() { + public Long getStorageProviderId() { // TODO Auto-generated method stub return null; } - @Override - public String getStorageType() { - // TODO Auto-generated method stub - return null; - }; + @Override + public boolean isInMaintenance() { + // TODO Auto-generated method stub + return false; + }; }; SnapshotCommand ssc = new SnapshotCommand(pool, diff --git a/api/test/org/apache/cloudstack/api/command/test/AddIpToVmNicTest.java b/api/test/org/apache/cloudstack/api/command/test/AddIpToVmNicTest.java new file mode 100644 index 00000000000..106589d10cc --- /dev/null +++ b/api/test/org/apache/cloudstack/api/command/test/AddIpToVmNicTest.java @@ -0,0 +1,132 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.test; + +import junit.framework.Assert; +import junit.framework.TestCase; + +import org.apache.cloudstack.api.ResponseGenerator; +import org.apache.cloudstack.api.command.user.vm.AddIpToVmNicCmd; +import org.apache.cloudstack.api.command.user.vm.RemoveIpFromVmNicCmd; +import org.apache.cloudstack.api.response.NicSecondaryIpResponse; +import org.apache.cloudstack.api.response.SuccessResponse; + + + +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.mockito.Mockito; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientAddressCapacityException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.NetworkService; +import com.cloud.user.Account; + +public class AddIpToVmNicTest extends TestCase { + + private AddIpToVmNicCmd addIpToVmNicCmd; + private RemoveIpFromVmNicCmd removeIpFromVmNicCmd; + private ResponseGenerator responseGenerator; + private SuccessResponse successResponseGenerator; + + @Rule + public ExpectedException expectedException = ExpectedException.none(); + + @Before + public void setUp() { + addIpToVmNicCmd = new AddIpToVmNicCmd() { + }; + removeIpFromVmNicCmd = new RemoveIpFromVmNicCmd(); + } + + @Test + public void testCreateSuccess() throws ResourceAllocationException, ResourceUnavailableException, ConcurrentOperationException, InsufficientCapacityException { + + NetworkService networkService = Mockito.mock(NetworkService.class); + AddIpToVmNicCmd ipTonicCmd = Mockito.mock(AddIpToVmNicCmd.class); + + Mockito.when( + networkService.allocateSecondaryGuestIP(Mockito.any(Account.class), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyString())).thenReturn("10.1.1.2"); + + ipTonicCmd._networkService = networkService; + responseGenerator = Mockito.mock(ResponseGenerator.class); + + NicSecondaryIpResponse ipres = Mockito.mock(NicSecondaryIpResponse.class); + Mockito.when(responseGenerator.createSecondaryIPToNicResponse(Mockito.anyString(), Mockito.anyLong(), Mockito.anyLong())).thenReturn(ipres); + + ipTonicCmd._responseGenerator = responseGenerator; + ipTonicCmd.execute(); + } + + @Test + public void testCreateFailure() throws ResourceAllocationException, ResourceUnavailableException, ConcurrentOperationException, InsufficientCapacityException { + + NetworkService networkService = Mockito.mock(NetworkService.class); + AddIpToVmNicCmd ipTonicCmd = Mockito.mock(AddIpToVmNicCmd.class); + + Mockito.when( + networkService.allocateSecondaryGuestIP(Mockito.any(Account.class), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyString())).thenReturn(null); + + ipTonicCmd._networkService = networkService; + + try { + ipTonicCmd.execute(); + } catch (InsufficientAddressCapacityException e) { + throw new InvalidParameterValueException("Allocating guest ip for nic failed"); + } + } + + @Test + public void testRemoveIpFromVmNicSuccess() throws ResourceAllocationException, ResourceUnavailableException, ConcurrentOperationException, InsufficientCapacityException { + + NetworkService networkService = Mockito.mock(NetworkService.class); + RemoveIpFromVmNicCmd removeIpFromNic = Mockito.mock(RemoveIpFromVmNicCmd.class); + + Mockito.when( + networkService.releaseSecondaryIpFromNic(Mockito.anyInt())).thenReturn(true); + + removeIpFromNic._networkService = networkService; + successResponseGenerator = Mockito.mock(SuccessResponse.class); + + removeIpFromNic.execute(); + } + + @Test + public void testRemoveIpFromVmNicFailure() throws InsufficientAddressCapacityException { + NetworkService networkService = Mockito.mock(NetworkService.class); + RemoveIpFromVmNicCmd removeIpFromNic = Mockito.mock(RemoveIpFromVmNicCmd.class); + + Mockito.when( + networkService.releaseSecondaryIpFromNic(Mockito.anyInt())).thenReturn(false); + + removeIpFromNic._networkService = networkService; + successResponseGenerator = Mockito.mock(SuccessResponse.class); + + try { + removeIpFromNic.execute(); + } catch (InvalidParameterValueException exception) { + Assert.assertEquals("Failed to remove secondary ip address for the nic", + exception.getLocalizedMessage()); + } + } +} diff --git a/api/test/org/apache/cloudstack/api/command/test/UsageCmdTest.java b/api/test/org/apache/cloudstack/api/command/test/UsageCmdTest.java new file mode 100644 index 00000000000..1f218f47e2a --- /dev/null +++ b/api/test/org/apache/cloudstack/api/command/test/UsageCmdTest.java @@ -0,0 +1,69 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.test; + +import junit.framework.TestCase; +import org.apache.cloudstack.api.command.admin.usage.GetUsageRecordsCmd; +import org.apache.cloudstack.usage.Usage; +import org.apache.cloudstack.usage.UsageService; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.List; + +public class UsageCmdTest extends TestCase { + + private GetUsageRecordsCmd getUsageRecordsCmd; + + @Rule + public ExpectedException expectedException = ExpectedException.none(); + + @Before + public void setUp() { + + getUsageRecordsCmd = new GetUsageRecordsCmd() { + + }; + } + + @Test + public void testExecuteSuccess() { + UsageService usageService = Mockito.mock(UsageService.class); + getUsageRecordsCmd._usageService = usageService; + getUsageRecordsCmd.execute(); + } + + @Test + public void testExecuteEmptyResult() { + + UsageService usageService = Mockito.mock(UsageService.class); + + List usageRecords = new ArrayList(); + + Mockito.when(usageService.getUsageRecords(getUsageRecordsCmd)).thenReturn( + usageRecords); + + getUsageRecordsCmd._usageService = usageService; + getUsageRecordsCmd.execute(); + + } + +} diff --git a/api/test/src/com/cloud/agent/api/test/ResizeVolumeCommandTest.java b/api/test/src/com/cloud/agent/api/test/ResizeVolumeCommandTest.java index 7f5540fa4d3..852e52b1b86 100644 --- a/api/test/src/com/cloud/agent/api/test/ResizeVolumeCommandTest.java +++ b/api/test/src/com/cloud/agent/api/test/ResizeVolumeCommandTest.java @@ -134,15 +134,15 @@ public class ResizeVolumeCommandTest { } @Override - public String getStorageProvider() { + public Long getStorageProviderId() { // TODO Auto-generated method stub return null; } @Override - public String getStorageType() { + public boolean isInMaintenance() { // TODO Auto-generated method stub - return null; + return false; }; }; diff --git a/awsapi/pom.xml b/awsapi/pom.xml index 5a0ad7b0cb4..f19a71381d3 100644 --- a/awsapi/pom.xml +++ b/awsapi/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT @@ -354,7 +354,7 @@ + value="${cs.replace.properties}" /> diff --git a/awsapi/src/com/cloud/bridge/persist/dao/CloudStackUserDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/CloudStackUserDaoImpl.java index 061351930da..f108a20e5b4 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/CloudStackUserDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/CloudStackUserDaoImpl.java @@ -20,6 +20,7 @@ import javax.ejb.Local; import org.apache.log4j.Logger; import org.jasypt.encryption.pbe.StandardPBEStringEncryptor; +import org.springframework.stereotype.Component; import com.cloud.bridge.model.CloudStackUserVO; import com.cloud.bridge.util.EncryptionSecretKeyCheckerUtil; @@ -28,6 +29,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +@Component @Local(value={CloudStackUserDao.class}) public class CloudStackUserDaoImpl extends GenericDaoBase implements CloudStackUserDao { public static final Logger logger = Logger.getLogger(CloudStackUserDaoImpl.class); diff --git a/awsapi/src/com/cloud/bridge/service/EC2SoapServiceImpl.java b/awsapi/src/com/cloud/bridge/service/EC2SoapServiceImpl.java index 9fc581be86f..cebac0b159e 100644 --- a/awsapi/src/com/cloud/bridge/service/EC2SoapServiceImpl.java +++ b/awsapi/src/com/cloud/bridge/service/EC2SoapServiceImpl.java @@ -1905,7 +1905,10 @@ public class EC2SoapServiceImpl implements AmazonEC2SkeletonInterface { param3.setStartTime( cal ); param3.setOwnerId(ownerId); - param3.setVolumeSize( snap.getVolumeSize().toString()); + if ( snap.getVolumeSize() == null ) + param3.setVolumeSize("0"); + else + param3.setVolumeSize( snap.getVolumeSize().toString() ); param3.setDescription( snap.getName()); param3.setOwnerAlias( snap.getAccountName() ); diff --git a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java index a835d8a258b..e92f845f2b1 100644 --- a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java +++ b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2Engine.java @@ -25,9 +25,12 @@ import java.security.SignatureException; import java.sql.SQLException; import java.text.ParseException; import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.Set; import java.util.UUID; import javax.inject.Inject; @@ -443,25 +446,35 @@ public class EC2Engine extends ManagerBase { */ public EC2DescribeSnapshotsResponse handleRequest( EC2DescribeSnapshots request ) { - EC2DescribeVolumesResponse volumes = new EC2DescribeVolumesResponse(); EC2SnapshotFilterSet sfs = request.getFilterSet(); EC2TagKeyValue[] tagKeyValueSet = request.getResourceTagSet(); try { - // -> query to get the volume size for each snapshot EC2DescribeSnapshotsResponse response = listSnapshots( request.getSnapshotSet(), getResourceTags(tagKeyValueSet)); if (response == null) { return new EC2DescribeSnapshotsResponse(); } EC2Snapshot[] snapshots = response.getSnapshotSet(); - for (EC2Snapshot snap : snapshots) { - volumes = listVolumes(snap.getVolumeId(), null, volumes, null); - EC2Volume[] volSet = volumes.getVolumeSet(); - if (0 < volSet.length) snap.setVolumeSize(volSet[0].getSize()); - volumes.reset(); + // -> query to get the volume size for each snapshot + HashMap volumeIdSize = new HashMap(); + for( EC2Snapshot snap : snapshots ) { + Boolean duplicateVolume = false; + Long size = null; + if ( volumeIdSize.containsKey(snap.getVolumeId()) ) { + size = volumeIdSize.get(snap.getVolumeId()); + duplicateVolume = true; + break; + } + if ( !duplicateVolume ) { + EC2DescribeVolumesResponse volumes = new EC2DescribeVolumesResponse(); + volumes = listVolumes(snap.getVolumeId(), null, volumes, null); + EC2Volume[] volumeSet = volumes.getVolumeSet(); + if (volumeSet.length > 0) size = volumeSet[0].getSize(); + volumeIdSize.put(snap.getVolumeId(), size); + } + snap.setVolumeSize(size); } - if ( null == sfs ) return response; else return sfs.evaluate( response ); @@ -1410,47 +1423,53 @@ public class EC2Engine extends ManagerBase { // now actually deploy the vms for( int i=0; i < createInstances; i++ ) { - CloudStackUserVm resp = getApi().deployVirtualMachine(svcOffering.getId(), - request.getTemplateId(), zoneId, null, null, null, null, - null, null, null, request.getKeyName(), null, (network != null ? network.getId() : null), - null, constructList(request.getGroupSet()), request.getSize().longValue(), request.getUserData()); - EC2Instance vm = new EC2Instance(); - vm.setId(resp.getId().toString()); - vm.setName(resp.getName()); - vm.setZoneName(resp.getZoneName()); - vm.setTemplateId(resp.getTemplateId().toString()); - if (resp.getSecurityGroupList() != null && resp.getSecurityGroupList().size() > 0) { - List securityGroupList = resp.getSecurityGroupList(); - for (CloudStackSecurityGroup securityGroup : securityGroupList) { - EC2SecurityGroup param = new EC2SecurityGroup(); - param.setId(securityGroup.getId()); - param.setName(securityGroup.getName()); - vm.addGroupName(param); + try{ + CloudStackUserVm resp = getApi().deployVirtualMachine(svcOffering.getId(), + request.getTemplateId(), zoneId, null, null, null, null, + null, null, null, request.getKeyName(), null, (network != null ? network.getId() : null), + null, constructList(request.getGroupSet()), request.getSize().longValue(), request.getUserData()); + EC2Instance vm = new EC2Instance(); + vm.setId(resp.getId().toString()); + vm.setName(resp.getName()); + vm.setZoneName(resp.getZoneName()); + vm.setTemplateId(resp.getTemplateId().toString()); + if (resp.getSecurityGroupList() != null && resp.getSecurityGroupList().size() > 0) { + List securityGroupList = resp.getSecurityGroupList(); + for (CloudStackSecurityGroup securityGroup : securityGroupList) { + EC2SecurityGroup param = new EC2SecurityGroup(); + param.setId(securityGroup.getId()); + param.setName(securityGroup.getName()); + vm.addGroupName(param); + } } - } - vm.setState(resp.getState()); - vm.setCreated(resp.getCreated()); - List nicList = resp.getNics(); - for (CloudStackNic nic : nicList) { - if (nic.getIsDefault()) { - vm.setPrivateIpAddress(nic.getIpaddress()); - break; + vm.setState(resp.getState()); + vm.setCreated(resp.getCreated()); + List nicList = resp.getNics(); + for (CloudStackNic nic : nicList) { + if (nic.getIsDefault()) { + vm.setPrivateIpAddress(nic.getIpaddress()); + break; + } } + vm.setIpAddress(resp.getIpAddress()); + vm.setAccountName(resp.getAccountName()); + vm.setDomainId(resp.getDomainId()); + vm.setHypervisor(resp.getHypervisor()); + vm.setServiceOffering( svcOffering.getName()); + vm.setKeyPairName(resp.getKeyPairName()); + instances.addInstance(vm); + countCreated++; + }catch(Exception e){ + logger.error("Failed to deploy VM number: "+ (i+1) +" due to error: "+e.getMessage()); + break; } - vm.setIpAddress(resp.getIpAddress()); - vm.setAccountName(resp.getAccountName()); - vm.setDomainId(resp.getDomainId()); - vm.setHypervisor(resp.getHypervisor()); - vm.setServiceOffering( svcOffering.getName()); - vm.setKeyPairName(resp.getKeyPairName()); - instances.addInstance(vm); - countCreated++; } if (0 == countCreated) { // TODO, we actually need to destroy left-over VMs when the exception is thrown - throw new EC2ServiceException(ServerError.InsufficientInstanceCapacity, "Insufficient Instance Capacity" ); + throw new EC2ServiceException(ServerError.InternalError, "Failed to deploy instances" ); } + logger.debug("Could deploy "+ countCreated + " VM's successfully"); return instances; } catch( Exception e ) { diff --git a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2InstanceFilterSet.java b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2InstanceFilterSet.java index e0aae7364d9..b5b7c7840df 100644 --- a/awsapi/src/com/cloud/bridge/service/core/ec2/EC2InstanceFilterSet.java +++ b/awsapi/src/com/cloud/bridge/service/core/ec2/EC2InstanceFilterSet.java @@ -50,6 +50,7 @@ public class EC2InstanceFilterSet { filterTypes.put( "group-id", "string" ); filterTypes.put( "tag-key", "string" ); filterTypes.put( "tag-value", "string" ); + filterTypes.put( "dns-name", "string" ); } @@ -184,6 +185,8 @@ public class EC2InstanceFilterSet { } } return false; + }else if (filterName.equalsIgnoreCase( "dns-name" )){ + return containsString( vm.getName(), valueSet ); } else return false; } diff --git a/client/WEB-INF/classes/resources/messages.properties b/client/WEB-INF/classes/resources/messages.properties index a0ed7c9a277..d167a5c0a4f 100644 --- a/client/WEB-INF/classes/resources/messages.properties +++ b/client/WEB-INF/classes/resources/messages.properties @@ -17,6 +17,13 @@ #new labels (begin) ********************************************************************************************** +label.menu.regions=Regions +label.region=Region +label.add.region=Add Region +label.remove.region=Remove Region +message.remove.region=Are you sure you want to remove this region from this management server? +message.add.region=Please specify the required information to add a new region. +label.endpoint=Endpoint label.plugins=Plugins label.plugin.details=Plugin details label.author.name=Author name diff --git a/client/WEB-INF/classes/resources/messages_fr_FR.properties b/client/WEB-INF/classes/resources/messages_fr_FR.properties index e29d61b51ea..267baec0270 100644 --- a/client/WEB-INF/classes/resources/messages_fr_FR.properties +++ b/client/WEB-INF/classes/resources/messages_fr_FR.properties @@ -15,1505 +15,1453 @@ # specific language governing permissions and limitations # under the License. - -#new labels (begin) ********************************************************************************************** -# label.isolation.uri=Isolation URI -# label.broadcast.uri=Broadcast URI -#new labels (end) ************************************************************************************************ - - -#modified labels (begin) ***************************************************************************************** -# message.zoneWizard.enable.local.storage=WARNING\: If you enable local storage for this zone, you must do the following, depending on where you would like your system VMs to launch\:

1. If system VMs need to be launched in primary storage, primary storage needs to be added to the zone after creation. You must also start the zone in a disabled state.

2. If system VMs need to be launched in local storage, system.vm.use.local.storage needs to be set to true before you enable the zone.


Would you like to continue? -#modified labels (end) ******************************************************************************************* - -# label.configure.network.ACLs=Configure Network ACLs -# label.network.ACLs=Network ACLs -# label.add.network.ACL=Add network ACL -# label.private.Gateway=Private Gateway -# label.VPC.router.details=VPC router details -# label.VMs.in.tier=VMs in tier -# label.local.storage.enabled=Local storage enabled -# label.tier.details=Tier details -# label.edit.tags=Edit tags -label.action.enable.physical.network=Activer le réseau physique -label.action.disable.physical.network=Désactiver le réseau physique -message.action.enable.physical.network=Confirmer l\'activation de ce réseau physique. -message.action.disable.physical.network=Confirmer la désactivation de ce réseau physique. - -# label.select.tier=Select Tier -# label.add.ACL=Add ACL -# label.remove.ACL=Remove ACL -# label.tier=Tier -# label.network.ACL=Network ACL -# label.network.ACL.total=Network ACL Total -# label.add.new.gateway=Add new gateway -# message.add.new.gateway.to.vpc=Please specify the information to add a new gateway to this VPC. -# label.delete.gateway=delete gateway -# message.delete.gateway=Please confirm you want to delete the gateway -# label.CIDR.of.destination.network=CIDR of destination network -# label.add.route=Add route -# label.add.static.route=Add static route -# label.remove.static.route=Remove static route -# label.site.to.site.VPN=site-to-site VPN -# label.add.VPN.gateway=Add VPN Gateway -# message.add.VPN.gateway=Please confirm that you want to add a VPN Gateway -# label.VPN.gateway=VPN Gateway -# label.delete.VPN.gateway=delete VPN Gateway -# message.delete.VPN.gateway=Please confirm that you want to delete this VPN Gateway -# label.VPN.connection=VPN Connection -# label.IPsec.preshared.key=IPsec Preshared-Key -# label.IKE.policy=IKE policy -# label.ESP.policy=ESP policy -# label.create.VPN.connection=Create VPN Connection -# label.VPN.customer.gateway=VPN Customer Gateway -# label.CIDR.list=CIDR list -# label.IKE.lifetime=IKE Lifetime (second) -# label.ESP.lifetime=ESP Lifetime(second) -# label.dead.peer.detection=Dead Peer Detection -# label.reset.VPN.connection=Reset VPN connection -# message.reset.VPN.connection=Please confirm that you want to reset VPN connection -# label.delete.VPN.connection=delete VPN connection -# message.delete.VPN.connection=Please confirm that you want to delete VPN connection -# label.add.new.tier=Add new tier -# label.add.VM.to.tier=Add VM to tier -# label.remove.tier=Remove tier - -# label.local.storage.enabled=Local storage enabled -# label.associated.network=Associated Network -# label.add.port.forwarding.rule=Add port forwarding rule -# label.dns=DNS - -# label.vpc=VPC -# label.vpc.id=VPC ID -# label.tier=Tier -# label.add.vpc=Add VPC -# label.super.cidr.for.guest.networks=Super CIDR for Guest Networks -# label.DNS.domain.for.guest.networks=DNS domain for Guest Networks -# label.configure.vpc=Configure VPC -# label.edit.vpc=Edit VPC -# label.restart.vpc=restart VPC -# message.restart.vpc=Please confirm that you want to restart the VPC -# label.remove.vpc=remove VPC -# message.remove.vpc=Please confirm that you want to remove the VPC -# label.vpn.customer.gateway=VPN Customer Gateway -# label.add.vpn.customer.gateway=Add VPN Customer Gateway -# label.IKE.encryption=IKE Encryption -# label.IKE.hash=IKE Hash -# label.IKE.DH=IKE DH -# label.ESP.encryption=ESP Encryption -# label.ESP.hash=ESP Hash -# label.perfect.forward.secrecy=Perfect Forward Secrecy -# label.IKE.lifetime=IKE Lifetime (second) -# label.ESP.lifetime=ESP Lifetime(second) -# label.dead.peer.detection=Dead Peer Detection -# label.delete.VPN.customer.gateway=delete VPN Customer Gateway -# message.delete.VPN.customer.gateway=Please confirm that you want to delete this VPN Customer Gateway - -label.network.domain.text=Texte du domaine réseau -label.memory.mb=Mémoire -label.cpu.mhz=CPU (en MHz) - -message.action.remove.host=Supprimer le dernier/seul hôte dans le cluster et le réinstaller va supprimer l\'environnement/la base de données sur l\'hôte et rendre les VMs invitées inutilisables. - -message.action.reboot.router=Confirmez que vous souhaitez redémarrer ce routeur. -message.action.stop.router=Confirmez que vous souhaitez arrêter ce routeur. -message.restart.network=Confirmer le redémarrage du réseau - - -label.ipaddress=Adresse IP -label.vcdcname=Nom du DC vCenter -label.vcipaddress=Adresse IP vCenter -label.vsmctrlvlanid=VLAN ID Controle -label.vsmpktvlanid=VLAN ID Paquet -label.vsmstoragevlanid=VLAN ID Stockage -label.nexusVswitch=Nexus Vswitch -label.action.delete.nexusVswitch=Supprimer le NexusVswitch -label.action.enable.nexusVswitch=Activer le NexusVswitch -label.action.disable.nexusVswitch=Désactiver le NexusVswitch -label.action.list.nexusVswitch=Lister les NexusVswitch -message.action.delete.nexusVswitch=Confirmer la suppression de ce NexusVswitch. -message.action.enable.nexusVswitch=Confirmer l\'activation de ce NexusVswitch. -message.action.disable.nexusVswitch=Confirmer la désactivation de ce NexusVswitch. -message.specify.url=Renseigner l\'URL -label.select.instance.to.attach.volume.to=Sélectionner l\'instance à laquelle rattacher ce volume -label.upload=Charger -label.upload.volume=Charger un volume -label.virtual.routers=Routeurs virtuels -label.primary.storage.count=Groupes de stockage primaire -label.secondary.storage.count=Groupes de stokage secondaire -label.number.of.system.vms=Nombre de VM système -label.number.of.virtual.routers=Nombre de routeurs virtuels -label.action.register.iso=Enregistrer ISO -label.isolation.method=Méthode de séparation -label.action.register.template=Enregister image -label.checksum=Checksum MD5 -label.vpn=VPN -label.vlan=VLAN - - -label.management.ips=Adresses IP de management -label.devices=Machines -label.rules=Règles -label.traffic.label=Label trafic -label.vm.state=Etat VM -# message.setup.physical.network.during.zone.creation.basic=When adding a basic zone, you can set up one physical network, which corresponds to a NIC on the hypervisor. The network carries several types of traffic.

You may also drag and drop other traffic types onto the physical network. -label.domain.router=Routeur du domaine -label.console.proxy=Console proxy -label.secondary.storage.vm=VM stockage secondaire -label.add.netScaler.device=Ajouter un Netscaler -label.add.F5.device=Ajouter un F5 -label.add.SRX.device=Ajouter un SRX -label.account.and.security.group=Compte, groupe de sécurité -label.fetch.latest=Raffraichir -label.system.offering=Offre système -message.validate.instance.name=le nom de l\'instance de l\'instance ne peut dépasser 63 caractères. Seuls les lettres de a à z, les chiffres de 0 à 9 et les tirets sont acceptés. Le nom doit commencer par une lettre et se terminer par une lettre ou un chiffre. - - -label.isolated.networks=Réseaux isolés -label.latest.events=Derniers évenements -state.Enabled=Actifs -label.system.wide.capacity=Capacité globale -label.network.service.providers=Provider de réseau -message.launch.zone=La zone est prête à démarrer; Continuer. -error.unable.to.reach.management.server=Impossible de joindre le serveur de management -label.internal.name=Nom interne -message.configure.all.traffic.types=Vous avez de multiples réseaux physiques; veuillez configurer les labels pour chaque type de trafic en cliquant sur le bouton Modifier. -message.edit.traffic.type=Spécifier le label de trafic associé avec ce type de trafic -label.edit.traffic.type=Modifer le type de trafic -label.label=Label -label.max.networks=Réseaux Max. -error.invalid.username.password=Utilisateur ou mot de passe invalide -message.enabling.security.group.provider=Activation du groupe de sécurité pour le provider -message.adding.Netscaler.provider=Ajouter un Netscaler provider -message.creating.guest.network=Création du réseau pour les instances -label.action.delete.physical.network=Supprimer le réseau physique -message.action.delete.physical.network=Confirmer la suppression du réseau physique -# message.installWizard.copy.whatIsAHost=A host is a single computer. Hosts provide the computing resources that run the guest virtual machines. Each host has hypervisor software installed on it to manage the guest VMs (except for bare metal hosts, which are a special case discussed in the Advanced Installation Guide). For example, a Linux KVM-enabled server, a Citrix XenServer server, and an ESXi server are hosts. In a Basic Installation, we use a single host running XenServer or KVM.

The host is the smallest organizational unit within a CloudStack&\#8482; deployment. Hosts are contained within clusters, clusters are contained within pods, and pods are contained within zones. - - -label.add.compute.offering=Ajouter une offre de calcul -label.compute.offering=Offre de calcul -label.compute.offerings=Offres de calcul -label.select.offering=Choisir une offre -label.menu.infrastructure=Infrastructure -label.sticky.tablesize=Taille du tableau -label.sticky.expire=Expiration -label.sticky.cookie-name=Nom du cookie -label.sticky.mode=Mode -label.sticky.length=Longueur -label.sticky.holdtime=Temps de pause -label.sticky.request-learn=Apprendre la requête -label.sticky.prefix=Préfixe -label.sticky.nocache=Pas de cache -label.sticky.indirect=Indirect -label.sticky.postonly=Après seulement -label.sticky.domain=Domaine -state.Allocating=Allocation en cours -state.Migrating=Migration en cours -error.please.specify.physical.network.tags=L\'offre réseau ne sera pas disponible tant que des label n\'auront pas été renseigner pour ce réseau physique. - - -state.Stopping=Arrêt en cours -message.add.load.balancer.under.ip=La règle de load balancer a été ajoutée sous l\'adresse IP \: -message.select.instance=Sélectionner une instance. -label.select=Selectionner -label.select.vm.for.static.nat=Sélectionner une VM pour le NAT statique -label.select.instance=Sélectionner une instance. -label.nat.port.range=Plage de port NAT -label.static.nat.vm.details=NAT statique, détails par VM -label.edit.lb.rule=Modifier la règle LB -message.migrate.instance.to.host=Merci de confirmer la migration de l\'instance vers un autre serveur -label.migrate.instance.to.host=Migration de l\'instance sur un autre serveur -message.migrate.instance.to.ps=Merci de confirmer la migration de l\'instance vers un autre stockage primaire -label.migrate.instance.to.ps=Migration de l\'instance sur un autre stockage primaire -label.corrections.saved=Modifications enregistrées -# message.installWizard.copy.whatIsSecondaryStorage=Secondary storage is associated with a zone, and it stores the following\:
  • Templates - OS images that can be used to boot VMs and can include additional configuration information, such as installed applications
  • ISO images - OS images that can be bootable or non-bootable
  • Disk volume snapshots - saved copies of VM data which can be used for data recovery or to create new templates
-# message.installWizard.copy.whatIsPrimaryStorage=A CloudStack&\#8482; cloud infrastructure makes use of two types of storage\: primary storage and secondary storage. Both of these can be iSCSI or NFS servers, or localdisk.

Primary storage is associated with a cluster, and it stores the disk volumes of each guest VM for all the VMs running on hosts in that cluster. The primary storage server is typically located close to the hosts. -# message.installWizard.copy.whatIsACluster=A cluster provides a way to group hosts. The hosts in a cluster all have identical hardware, run the same hypervisor, are on the same subnet, and access the same shared storage. Virtual machine instances (VMs) can be live-migrated from one host to another within the same cluster, without interrupting service to the user. A cluster is the third-largest organizational unit within a CloudStack&\#8482; deployment. Clusters are contained within pods, and pods are contained within zones.

CloudStack&\#8482; allows multiple clusters in a cloud deployment, but for a Basic Installation, we only need one cluster. -# message.installWizard.copy.whatIsAPod=A pod often represents a single rack. Hosts in the same pod are in the same subnet.

A pod is the second-largest organizational unit within a CloudStack&\#8482; deployment. Pods are contained within zones. Each zone can contain one or more pods; in the Basic Installation, you will have just one pod in your zone. -# message.installWizard.copy.whatIsAZone=A zone is the largest organizational unit within a CloudStack&\#8482; deployment. A zone typically corresponds to a single datacenter, although it is permissible to have multiple zones in a datacenter. The benefit of organizing infrastructure into zones is to provide physical isolation and redundancy. For example, each zone can have its own power supply and network uplink, and the zones can be widely separated geographically (though this is not required). -# message.installWizard.copy.whatIsCloudStack=CloudStack&\#8482 is a software platform that pools computing resources to build public, private, and hybrid Infrastructure as a Service (IaaS) clouds. CloudStack&\#8482 manages the network, storage, and compute nodes that make up a cloud infrastructure. Use CloudStack&\#8482 to deploy, manage, and configure cloud computing environments.

Extending beyond individual virtual machine images running on commodity hardware, CloudStack&\#8482 provides a turnkey cloud infrastructure software stack for delivering virtual datacenters as a service - delivering all of the essential components to build, deploy, and manage multi-tier and multi-tenant cloud applications. Both open-source and Premium versions are available, with the open-source version offering nearly identical features. -message.installWizard.tooltip.addSecondaryStorage.path=Le chemin exporté, situé sur le serveur spécifié précédement -message.installWizard.tooltip.addSecondaryStorage.nfsServer=Adresse IP du server NFS supportant le stockage secondaire -# message.installWizard.tooltip.addPrimaryStorage.path=(for NFS) In NFS this is the exported path from the server. Path (for SharedMountPoint). With KVM this is the path on each host that is where this primary storage is mounted. For example, "/mnt/primary". -message.installWizard.tooltip.addPrimaryStorage.server=(pour NFS, iSCSI ou PreSetup) Adresse IP ou nom DNS du stockage -message.installWizard.tooltip.addPrimaryStorage.name=Nom pour ce stockage -message.installWizard.tooltip.addHost.password=Le mot de passe pour l\'utilisateur indiqué précédement (issu de l\'installation XenServer). -message.installWizard.tooltip.addHost.username=Habituellement root. -message.installWizard.tooltip.addHost.hostname=Le nom DNS ou adresse IP du serveur. -message.installWizard.tooltip.addCluster.name=Un nom pour le cluster. Ce choix est libre et n\'est pas utilisé par CloudStack. -# message.installWizard.tooltip.addPod.reservedSystemEndIp=This is the IP range in the private network that the CloudStack uses to manage Secondary Storage VMs and Console Proxy VMs. These IP addresses are taken from the same subnet as computing servers. -# message.installWizard.tooltip.addPod.reservedSystemStartIp=This is the IP range in the private network that the CloudStack uses to manage Secondary Storage VMs and Console Proxy VMs. These IP addresses are taken from the same subnet as computing servers. -message.installWizard.tooltip.addPod.reservedSystemNetmask=Le masque réseau que les instances utiliseront sur le réseau -message.installWizard.tooltip.addPod.reservedSystemGateway=Passerelle pour les serveurs dans ce pod -message.installWizard.tooltip.addPod.name=Nom pour le pod -# message.installWizard.tooltip.configureGuestTraffic.guestEndIp=The range of IP addresses that will be available for allocation to guests in this zone. If one NIC is used, these IPs should be in the same CIDR as the pod CIDR. -# message.installWizard.tooltip.configureGuestTraffic.guestStartIp=The range of IP addresses that will be available for allocation to guests in this zone. If one NIC is used, these IPs should be in the same CIDR as the pod CIDR. -message.installWizard.tooltip.configureGuestTraffic.guestNetmask=Le masque réseau que les instances devrait utiliser sur le réseau -message.installWizard.tooltip.configureGuestTraffic.guestGateway=La passerelle que les instances clientes doivent utiliser -message.installWizard.tooltip.configureGuestTraffic.description=Desctiption pour ce réseau -message.installWizard.tooltip.configureGuestTraffic.name=Nom pour ce réseau -# message.installWizard.tooltip.addZone.internaldns2=These are DNS servers for use by system VMs in the zone. These DNS servers will be accessed via the private network interface of the System VMs. The private IP address you provide for the pods must have a route to the DNS server named here. -# message.installWizard.tooltip.addZone.internaldns1=These are DNS servers for use by system VMs in the zone. These DNS servers will be accessed via the private network interface of the System VMs. The private IP address you provide for the pods must have a route to the DNS server named here. -# message.installWizard.tooltip.addZone.dns2=These are DNS servers for use by guest VMs in the zone. These DNS servers will be accessed via the public network you will add later. The public IP addresses for the zone must have a route to the DNS server named here. -message.installWizard.tooltip.addZone.name=Nom pour la zone -# message.installWizard.tooltip.addZone.dns1=These are DNS servers for use by guest VMs in the zone. These DNS servers will be accessed via the public network you will add later. The public IP addresses for the zone must have a route to the DNS server named here. -message.setup.successful=Installation Cloud réussie \! -label.may.continue=Vous pouvez continuer. -error.installWizard.message=Une erreur s\'est produite; vous pouvez retourner en arrière et corriger les erreurs -message.installWizard.now.building=Construction de votre Cloud en cours -message.installWizard.click.retry=Appuyer sur le bouton pour essayer à nouveau le démarrage. -label.launch=Démarrer -label.installWizard.click.launch=Appuyer sur le bouton démarrer. -label.congratulations=Félicitations \! -label.installWizard.addSecondaryStorageIntro.subtitle=Qu\'est ce que le stockage secondaire ? -label.installWizard.addSecondaryStorageIntro.title=Ajoutons du stockage secondaire -label.installWizard.addPrimaryStorageIntro.subtitle=Qu\'est ce que le stockage primaire ? -label.installWizard.addPrimaryStorageIntro.title=Ajoutons du stockage primaire -label.installWizard.addHostIntro.subtitle=Qu\'est ce qu\'un serveur ? -label.installWizard.addHostIntro.title=Ajoutons un serveur -label.installWizard.addClusterIntro.subtitle=Qu\'est ce qu\'un cluster ? -label.installWizard.addClusterIntro.title=Ajoutons un cluster -label.installWizard.addPodIntro.subtitle=Qu\'est ce qu\'un pod ? -label.installWizard.addPodIntro.title=Ajoutons un pod -label.installWizard.addZone.title=Ajouter une zone -label.installWizard.addZoneIntro.subtitle=Qu\'est ce qu\'une zone ? -label.installWizard.addZoneIntro.title=Ajoutons une zone -error.password.not.match=Les mot de passe ne correspondent pas -label.confirm.password=Confirmer le mot de passe -message.change.password=Merci de modifier votre mot de passe. -label.save.and.continue=Enregistrer et continuer -label.skip.guide=J\'ai déjà utilisé CloudStack avant, passer ce tutorial -label.continue.basic.install=Continuer avec l\'installation basique -label.introduction.to.cloudstack=Introduction à CloudStack -label.what.is.cloudstack=Qu\'est ce que CloudStack ? -label.hints=Astuces -label.installWizard.subtitle=Ce tutorial vous aidera à configurer votre installation CloudStack -label.continue=Continuer -label.installWizard.title=Bonjour et bienvenue dans CloudStack -label.agree=Accepter -label.manage.resources=Gérer les ressources -label.port.forwarding.policies=Règles de transfert de port -label.load.balancing.policies=Règles de répartition de charge -label.networking.and.security=Réseau et sécurité -label.bandwidth=Bande passante -label.virtual.machines=Machines virtuelles -label.compute.and.storage=Processeur et Stockage -label.task.completed=Tâche terminée -label.update.project.resources=Mettre à jour les ressources du projet -label.remove.project.account=Supprimer le compte projet -label.item.listing=Liste des éléments -message.select.item=Merci de sélectionner un élément. -label.removing=Suppression -label.invite=Inviter -label.add.by=Ajouté par -label.max.vms=Max VMs utilisateur -label.max.public.ips=Max IP publiques -label.max.volumes=Max volumes -label.max.snapshots=Max snapshots -label.max.templates=Max images -# label.max.vpcs=Max. VPCs -label.project.dashboard=Tableau de bord projet -label.remind.later=Rappeler moi plus tard -label.invited.accounts=Comptes invités -label.invite.to=Inviter sur -label.add.accounts.to=Ajouter des comptes sur -label.add.accounts=Ajouter des comptes -label.project.name=Nom du projet -label.create.project=Créer un projet -label.networks=Réseaux -label.launch.vm=Démarrer VM -label.new.vm=Nouvelle VM -label.previous=Retour -label.add.to.group=Ajouter au groupe -message.vm.review.launch=Merci de vérifier les informations suivantes et de confirmer que votre instance virtuelle est correcte avant de la démarrer. -message.select.security.groups=Merci de sélectionner un(des) groupe(s) de sécurité pour la nouvelle VM -label.new=Nouveau -message.please.select.networks=Selectionner les réseaux pour votre instance. -message.please.proceed=Continuer vers la prochaine étape. -message.zone.no.network.selection=La zone sélectionnée ne propose pas le réseau choisi -label.no.thanks=Non merci -label.my.templates=Mes images -message.select.template=Sélectionner une image pour votre nouvelle instance virtuelle. -message.select.iso=Sélectionner une ISO pour votre nouvelle instance virtuelle. -message.template.desc=Image OS pouvant être utilisée pour démarrer une VM -message.iso.desc=Image ISO contenant des données ou des binaires de démarrage -label.select.iso.or.template=Sélectionner une ISO ou une image -message.select.a.zone=Une zone corespond en général à un seul datacenter. Des zones multiples peuvent permettre de rendre votre cloud plus fiable en apportant une isolation physique et de la redondance. -label.select.a.zone=Sélectionner une zone -label.review=Revoir -label.select.a.template=Sélectionner une image -label.setup=Configuration -state.Allocated=Alloué -changed.item.properties=Propriétés de l\'élément modifiées -label.apply=Appliquer -label.default=Par défaut -label.viewing=Consultation en cours -label.move.to.top=Placer au dessus -label.move.up.row=Monter d\'un cran -label.move.down.row=Descendre d\'un cran -# label.move.to.bottom=Move to bottom -label.drag.new.position=Déplacer sur une autre position -label.order=Ordre -label.no.data=Aucune donnée -label.change.value=Modifier la valeur -label.clear.list=Purger la liste -label.full.path=Chemin complet -message.add.domain=Spécifier le sous domaine que vous souhaitez créer sous ce domaine -message.delete.user=Confirmer la suppression de l\'utilisateur. -message.enable.user=Confirmer l\'activation de cet utilisateur. -message.disable.user=Confirmer la désactivation de l\'utilisateur. -message.generate.keys=Confirmer la génération de nouvelles clefs pour cet utilisateur. -message.update.resource.count=Confirmer la mise à jour des ressources pour ce compte. -message.edit.account=Modifier ("-1" signifie pas de limite de ressources) -label.total.of.vm=Total VM -label.total.of.ip=Total adresses IP -state.enabled=Actif -message.action.download.iso=Confirmer le téléchargement de l\'ISO -message.action.download.template=Confirmer le téléchargement de l\'image -label.destination.zone=Zone destination -label.keyboard.type=Type de clavier -label.nic.adapter.type=Type de carte réseau -label.root.disk.controller=Controlleur disque principal -label.community=Communauté -label.remove.egress.rule=Supprimer la règle sortante -label.add.egress.rule=Ajouter la règle sortante -label.egress.rule=Règle sortante -label.remove.ingress.rule=Supprimer la règle entrante -label.delete.vpn.user=Supprimer l\'utilisateur VPN -label.add.vpn.user=Ajouter un utilisateur VPN -label.remove.pf=Supprimer la règle de transfert de port -label.remove.vm.from.lb=Supprimer la VM de la règle de répartition de charge -label.add.vms.to.lb=Ajouter une/des VM(s) à la règle de répartition de charge -label.add.vm=Ajouter VM -label.remove.static.nat.rule=Supprimer le NAT statique -label.remove.rule=Supprimer la règle -label.add.static.nat.rule=Ajouter une règle de NAT statique -label.add.rule=Ajouter règle -label.configuration=Configuration -message.disable.vpn=Etes vous sûr de vouloir désactiver le VPN ? -label.disable.vpn=Désactiver le VPN -message.enable.vpn=Confirmer l\'activation de l\'accès VPN pour cette adresse IP. -label.enable.vpn=Activer VPN -message.acquire.new.ip=Confirmer l\'ajout d\'une nouvelle adresse IP pour ce réseau. -label.elastic=Elastique -label.my.network=Mon réseau -label.add.vms=Ajouter VMs -label.configure=Configurer -label.stickiness=Fidélité -label.source=Origine -label.least.connections=Le moins de connexions -label.round.robin=Aléatoire -label.restart.required=Rédémarrage nécessaire -label.clean.up=Nettoyage -label.restart.network=Redémarrage du réseau -label.edit.network.details=Modifier les paramètres réseau -label.add.guest.network=Ajout d\'un réseau pour les VM -label.guest.networks=Réseau pour les VM -message.ip.address.changed=Vos adresses IP ont peut être changées; Voulez vous raffraichir la liste ? Dans ce cas, le panneau de détail se fermera. -state.BackingUp=Sauvegarde en cours -state.BackedUp=Sauvegardé -label.done=Terminé -label.vm.name=Nom de la VM -message.migrate.volume=Confirmer la migration du volume vers un autre stockage primaire. -label.migrate.volume=Migration du volume vers un autre stockage primaire -message.create.template=Voulez vous créer l\'image ? -label.create.template=Création d\'image -message.download.volume.confirm=Confirmer le téléchargement du volume -message.detach.disk=Voulez vous détacher ce disque ? -state.ready=Prêt -state.Ready=Prêt -label.vm.display.name=Nom d\'affichage de la VM -label.select-view=Sélectionner la vue -label.local.storage=Stockage local -label.direct.ips=IP directes -label.view.all=Voir tout -label.zone.details=Détails de la zone -message.alert.state.detected=Etat d\'alerte détecté -state.Starting=Démarrage en cours -state.Expunging=Purge en cours -state.Creating=Création en cours -message.decline.invitation=Voulez vous refuser cette invitation au projet ? -label.decline.invitation=Refuser l\'invitation -message.confirm.join.project=Confirmer l\'invitation au projet -message.join.project=Vous avez rejoint un projet. Sélectionnez la vue Projet pour le voir. -label.accept.project.invitation=Accepter l\'invitation au projet -label.token=Jeton unique -label.project.id=ID projet -message.enter.token=Entrer le jeton unique reçu dans le message d\'invitation. -label.enter.token=Entrez le jeton unique -state.Accepted=Accepté -state.Pending=En attente -state.Completed=Terminé -state.Declined=Refusé -label.project=Projet -label.invitations=Invitations -label.delete.project=Supprimer projet -message.delete.project=Confirmer la suppression du projet -message.activate.project=Confirmer l\'activation de ce projet -label.activate.project=Activer projet -label.suspend.project=Suspendre projet -message.suspend.project=Confirmer la suspension de ce projet -state.Suspended=Suspendu -label.edit.project.details=Modifier les détails du projet -label.new.project=Nouveau projet -state.Active=Actif -state.Disabled=Désactivé -label.projects=Projets -label.make.project.owner=Devenir propriétaire du projet -label.remove.project.account=Supprimer le compte projet -message.project.invite.sent=Invitation envoyée; les utilisateurs seront ajoutés après acceptation de l\'invitation -label.add.account.to.project=Ajouter un compte au projet -label.revoke.project.invite=Revoquer l\'invitation -label.project.invite=Inviter sur le projet -label.select.project=Sélectionner un projet -message.no.projects=Vous n\'avez pas de projet.
Vous pouvez en créer un depuis la section projets. -message.no.projects.adminOnly=Vous n\'avez pas de projet.
Contacter votre administrateur pour ajouter un projet. -message.pending.projects.1=Vous avez des invitations projet en attente \: -message.pending.projects.2=Pour les visualiser, aller dans la section projets, puis sélectionner invitation dans la liste déroulante. -message.instanceWizard.noTemplates=Vous n\'avez pas de image disponible; Ajouter une image compatible puis relancer l\'assistant de création d\'instance. -label.view=Voir -instances.actions.reboot.label=Redémarrer l\'instance -label.filterBy=Filtrer par -label.ok=OK -notification.reboot.instance=Redémarrer l\'instance -notification.start.instance=Démarrer l\'instance -notification.stop.instance=Stopper l\'instance -label.display.name=Nom d\'affichage -label.zone.name=Nom de la zone -ui.listView.filters.all=Tous -ui.listView.filters.mine=Mon -state.Running=Démarrée -state.Stopped=Arrétée -state.Destroyed=Supprimée -state.Error=Erreur -message.reset.password.warning.notPasswordEnabled=L\'image de cette instance a été créée sans la gestion de mot de passe -message.reset.password.warning.notStopped=Votre instance doit être arretée avant de changer son mot de passe -label.notifications=Messages -label.default.view=Vue par défaut -label.project.view=Vue projet - -message.add.system.service.offering=Ajouter les informations suivantes pour créer une nouvelle offre système. -message.action.delete.system.service.offering=Confirmer la suppression de l\'offre système. -label.action.delete.system.service.offering=Supprimer l\'offre système -label.hypervisor.capabilities=Fonctions hyperviseur -label.hypervisor.version=Version hyperviseur -label.max.guest.limit=Nombre maximum d\'instances -label.add.network.offering=Ajouter une offre réseau -label.supported.services=Services supportés -label.service.capabilities=Fonctions disponibles -label.guest.type=Type d\'instance -label.specify.IP.ranges=Plages IP -label.conserve.mode=Conserver le mode -label.created.by.system=Créé par le system -label.menu.system.service.offerings=Offres système -label.add.system.service.offering=Ajouter une offre de service système -label.redundant.router.capability=Router redondant -label.supported.source.NAT.type=Type de NAT supporté -label.elastic.LB=Répartition de charge extensible -label.LB.isolation=Répartition de charge isolée -label.elastic.IP=IP extensible -label.network.label.display.for.blank.value=Utiliser la passerelle par défaut -label.xen.traffic.label=Label pour le trafic Xenserver -label.kvm.traffic.label=Label pour le trafic KVM -label.vmware.traffic.label=Label pour le trafic VMware -label.start.IP=Démarrer l\'IP -label.end.IP=Résilier l\'IP -label.remove.ip.range=Supprimer la plage IP -label.ip.ranges=Plages IP -label.start.vlan=VLAN de départ -label.end.vlan=VLAN de fin -label.broadcast.domain.range=Plage de brodcast domaine -label.compute=Processeur -message.add.guest.network=Confirmer l\'ajout du réseau -label.subdomain.access=Accès au sous domaine -label.guest.start.ip=Adresse IP de début pour les instances -label.guest.end.ip=Adresse IP de fin pour les instances -label.virtual.router=Routeur Virtuel -label.physical.network.ID=ID du réseau physique -label.destination.physical.network.id=ID du réseau physique de destination -label.dhcp=DHCP -label.destroy.router=Supprimer le routeur -message.confirm.destroy.router=Confirmer la suppression du routeur -label.change.service.offering=Modifier l\'offre de service -label.view.console=Voir la console -label.redundant.state=Etat redondant -label.enable.provider=Activer le provider -message.confirm.enable.provider=Confirmer l\'activation de ce provider -label.disable.provider=Désactiver ce provider -message.confirm.disable.provider=Confirmer la désactivation de ce provider -label.shutdown.provider=Eteindre le provider -message.confirm.shutdown.provider=Confirmer l\'extinction de ce provider -label.netScaler=Netscaler -label.add.new.NetScaler=Ajouter Netscaler -label.capacity=Capacité -label.dedicated=Dédié -label.f5=F5 -label.add.new.F5=Ajouter un F5 -label.srx=SRX -label.providers=Fournisseurs -label.add.new.SRX=Ajouter un SRX -label.timeout=Expiration -label.public.network=Réseau public -label.private.network=Réseau privé -label.enable.swift=Activer Swift -confirm.enable.swift=Remplir les informations suivantes pour activer Swift -message.after.enable.swift=Swift configuré. Remarque \: une fois quitter cette page, il ne sera plus possible de reconfiguré Swift une nouvelle fois. -label.key=Clef -label.delete.NetScaler=Supprimer Netscaler -message.confirm.delete.NetScaler=Confirmer la suppression du Netscaler -label.delete.F5=Supprimer F5 -message.confirm.delete.F5=Confirmer la suppression du F5 -label.delete.SRX=Supprimer SRX -message.confirm.delete.SRX=Confirmer la suppression du SRX -label.pods=Pods -label.pod.name=Nom du pod -label.reserved.system.gateway=Gateway système réservée -label.reserved.system.netmask=Masque réseau système réservé -label.start.reserved.system.IP=Adresse IP de démarrage réservée pour le système -label.end.reserved.system.IP=Adresse IP de fin réservée pour le système -label.clusters=Clusters -label.cluster.name=Nom du cluster -label.host.MAC=MAC serveur -label.agent.username=Compte de l\'agent -label.agent.password=Mot de passe de l\'agent -message.confirm.action.force.reconnect=Confirmer que reconnection forcée de ce serveur. -label.resource.state=Etat des ressources -label.LUN.number=N° LUN -message.confirm.remove.IP.range=Confirmer la suppression de cette plage d\'adresses IP -message.tooltip.zone.name=Nom pour la zone. -message.tooltip.dns.1=Nom d\'un serveur DNS utilisé par les VM de la zone. Les adresses IP publiques de cette zones doivent avoir une route vers ce serveur. -message.tooltip.dns.2=Nom d\'un serveur DNS secondaire utilisé par les VM de la zone. Les adresses IP publiques de cette zones doivent avoir une route vers ce serveur. -message.tooltip.internal.dns.1=Nom d\'un serveur DNS que CloudStack peut utiliser pour les VM système dans cette zone. Les adresses IP privées des pods doivent avoir une route vers ce serveur. -message.tooltip.internal.dns.2=Nom d\'un serveur DNS que CloudStack peut utiliser pour les VM système dans cette zone. Les adresses IP privées des pods doivent avoir une route vers ce serveur. -message.tooltip.network.domain=Suffixe DNS pour les noms de domaine personnalisés accèdé par les intances. -message.tooltip.pod.name=Nom pour le pod. -message.tooltip.reserved.system.gateway=La passerelle pour les serveur du pod. -message.tooltip.reserved.system.netmask=Le préfixe réseau utilisé par le sous-réseau du pod. Au format CIDR. -message.creating.zone=Création de la zone -message.creating.physical.networks=Création des réseaux physiques -message.configuring.physical.networks=Configuration des réseaux physiques -message.adding.Netscaler.device=Ajout du Netscaler -message.creating.pod=Création d\'un pod -message.configuring.public.traffic=Configuration du réseau public -message.configuring.storage.traffic=Configuration du réseau de stockage -message.configuring.guest.traffic=Configuration du réseau VM -message.creating.cluster=Création du cluster -message.adding.host=Ajout du serveur -message.creating.primary.storage=Création du stockage primaire -message.creating.secondary.storage=Création du stockage secondaire -message.Zone.creation.complete=Création de la zone terminée -message.enabling.zone=Activation de la zone -error.something.went.wrong.please.correct.the.following=Erreur; corriger le point suivant -error.could.not.enable.zone=Impossible d\'activer la zone -message.zone.creation.complete.would.you.like.to.enable.this.zone=Création de la zone terminée. Voulez vous l\'activer ? -message.please.add.at.lease.one.traffic.range=Ajouter au moins une plage réseau -message.you.must.have.at.least.one.physical.network=Vous devez avoir au moins un réseau physique -message.please.select.a.different.public.and.management.network.before.removing=Sélectionner un réseau publique et management différent avant de supprimer - -label.zone.type=Type de zone -label.setup.zone=Configurer la zone -label.setup.network=Configurer le réseau -label.add.resources=Ajouter des resssources -label.launch=Démarrer -label.set.up.zone.type=configurer le type de zone -message.please.select.a.configuration.for.your.zone=Sélectionner une configuration pour la zone. -# message.desc.basic.zone=Provide a single network where each VM instance is assigned an IP directly from the network. Guest isolation can be provided through layer-3 means such as security groups (IP address source filtering). -label.basic=Basic -# message.desc.advanced.zone=For more sophisticated network topologies. This network model provides the most flexibility in defining guest networks and providing custom network offerings such as firewall, VPN, or load balancer support. -label.advanced=Avancé -# message.desc.zone=A zone is the largest organizational unit in CloudStack, and it typically corresponds to a single datacenter. Zones provide physical isolation and redundancy. A zone consists of one or more pods (each of which contains hosts and primary storage servers) and a secondary storage server which is shared by all pods in the zone. -label.physical.network=Réseau physique -label.public.traffic=Trafic public -label.guest.traffic=Trafic invité -label.storage.traffic=Trafic stockage -# message.setup.physical.network.during.zone.creation=When adding an advanced zone, you need to set up one or more physical networks. Each network corresponds to a NIC on the hypervisor. Each physical network can carry one or more types of traffic, with certain restrictions on how they may be combined.

Drag and drop one or more traffic types onto each physical network. -label.add.physical.network=Ajouter un réseau physique -label.traffic.types=Type de trafic -label.management=Management -label.guest=Invité -label.please.specify.netscaler.info=Renseigner les informations sur le Netscaler -# message.public.traffic.in.advanced.zone=Public traffic is generated when VMs in the cloud access the internet. Publicly-accessible IPs must be allocated for this purpose. End users can use the CloudStack UI to acquire these IPs to implement NAT between their guest network and their public network.

Provide at least one range of IP addresses for internet traffic. -# message.public.traffic.in.basic.zone=Public traffic is generated when VMs in the cloud access the Internet or provide services to clients over the Internet. Publicly accessible IPs must be allocated for this purpose. When a instance is created, an IP from this set of Public IPs will be allocated to the instance in addition to the guest IP address. Static 1-1 NAT will be set up automatically between the public IP and the guest IP. End users can also use the CloudStack UI to acquire additional IPs to implement static NAT between their instances and the public IP. -# message.add.pod.during.zone.creation=Each zone must contain in one or more pods, and we will add the first pod now. A pod contains hosts and primary storage servers, which you will add in a later step. First, configure a range of reserved IP addresses for CloudStack\'s internal management traffic. The reserved IP range must be unique for each zone in the cloud. -# message.guest.traffic.in.advanced.zone=Guest network traffic is communication between end-user virtual machines. Specify a range of VLAN IDs to carry guest traffic for each physical network. -# message.guest.traffic.in.basic.zone=Guest network traffic is communication between end-user virtual machines. Specify a range of IP addresses that CloudStack can assign to guest VMs. Make sure this range does not overlap the reserved system IP range. -# message.storage.traffic=Traffic between CloudStack\'s internal resources, including any components that communicate with the Management Server, such as hosts and CloudStack system VMs. Please configure storage traffic here. -# message.desc.cluster=Each pod must contain one or more clusters, and we will add the first cluster now. A cluster provides a way to group hosts. The hosts in a cluster all have identical hardware, run the same hypervisor, are on the same subnet, and access the same shared storage. Each cluster consists of one or more hosts and one or more primary storage servers. -# message.desc.host=Each cluster must contain at least one host (computer) for guest VMs to run on, and we will add the first host now. For a host to function in CloudStack, you must install hypervisor software on the host, assign an IP address to the host, and ensure the host is connected to the CloudStack management server.

Give the host\'s DNS or IP address, the user name (usually root) and password, and any labels you use to categorize hosts. -# message.desc.primary.storage=Each cluster must contain one or more primary storage servers, and we will add the first one now. Primary storage contains the disk volumes for all the VMs running on hosts in the cluster. Use any standards-compliant protocol that is supported by the underlying hypervisor. -# message.desc.secondary.storage=Each zone must have at least one NFS or secondary storage server, and we will add the first one now. Secondary storage stores VM templates, ISO images, and VM disk volume snapshots. This server must be available to all hosts in the zone.

Provide the IP address and exported path. -label.launch.zone=Démarrer la zone -message.please.wait.while.zone.is.being.created=Patienter pendant la création de la zone, cela peut prendre du temps... - -label.load.balancing=Load Balancing -label.static.nat.enabled=NAT statique activé -label.zones=Zones -label.view.more=Voir plus -label.number.of.zones=Nombre de zones -label.number.of.pods=Nombre de Pods -label.number.of.clusters=Nombre de clusters -label.number.of.hosts=Nombre de serveurs -label.total.hosts=Total serveurs -label.total.CPU=Capacité Totale en CPU -label.total.memory=Total mémoire -label.total.storage=Total stockage -label.purpose=Rôle - - - - -label.action.migrate.router=Migration routeur -label.action.migrate.router.processing=Migration routeur en cours... -message.migrate.router.confirm=Confirmer la migration du routeur vers \: -label.migrate.router.to=Migrer le routeur vers - -label.action.migrate.systemvm=Migration VM système -label.action.migrate.systemvm.processing=Migration VM système en cours ... -message.migrate.systemvm.confirm=Confirmer la migration de la VM système vers \: -label.migrate.systemvm.to=Miger la VM système vers - - -mode=Mode -side.by.side=Côte à cote -inline=Aligné - -extractable=Décompressable - -label.ocfs2=OCFS2 - -label.action.edit.host=Modifier l\'hôte - -network.rate=Débit Réseau - -ICMP.type=Type ICMP +#Stored by I18NEdit, may be edited! ICMP.code=Code ICMP - -image.directory=Répertoire d\'images - -label.action.create.template.from.vm=Créer un modèle depuis la VM -label.action.create.template.from.volume=Créer un modèle depuis le volume - -message.vm.create.template.confirm=Créer un modèle va redémarrer la VM automatiquement - -label.action.manage.cluster=Gérer le Cluster -message.action.manage.cluster=Confirmez que vous voulez gérer le cluster -label.action.manage.cluster.processing=Gestion du cluster... - -label.action.unmanage.cluster=Ne plus gérer le Cluster -message.action.unmanage.cluster=Confirmez que vous ne voulez plus gérer le cluster -label.action.unmanage.cluster.processing=Arrêt de la gestion du Cluster - -label.allocation.state=Etat de l\'allocation -managed.state=Etat de la gestion - -label.default.use=Utilisation par défaut -label.host.tags=Labels Server - -label.cidr=CIDR -label.cidr.list=CIDR Source - -label.storage.tags=Etiquettes de stockage - -label.redundant.router=Routeur redondant -label.is.redundant.router=Redondant - +ICMP.type=Type ICMP +changed.item.properties=Propri\u00E9t\u00E9s de l\\'\u00E9l\u00E9ment modifi\u00E9es +confirm.enable.s3=Remplir les informations suivantes pour activer le support de stockage secondaire S3 +confirm.enable.swift=Remplir les informations suivantes pour activer Swift +error.could.not.enable.zone=Impossible d\\'activer la zone +error.installWizard.message=Une erreur s\\'est produite ; vous pouvez retourner en arri\u00E8re et corriger les erreurs +error.invalid.username.password=Utilisateur ou mot de passe invalide +error.login=Votre nom d\\'utilisateur / mot de passe ne correspond pas \u00E0 nos donn\u00E9es. +error.menu.select=\u00C9chec de l\\'action car il n\\'y a aucun \u00E9l\u00E9ment s\u00E9lectionn\u00E9. +error.mgmt.server.inaccessible=Le serveur de gestion est indisponible. Essayez plus tard. +error.password.not.match=Les mots de passe ne correspondent pas +error.please.specify.physical.network.tags=L\\'offre r\u00E9seau ne sera pas disponible tant que des libell\u00E9s n\\'auront pas \u00E9t\u00E9 renseign\u00E9s pour ce r\u00E9seau physique. +error.session.expired=Votre session a expir\u00E9e. +error.something.went.wrong.please.correct.the.following=Erreur; corriger le point suivant +error.unable.to.reach.management.server=Impossible de joindre le serveur d\\'administration +error.unresolved.internet.name=Votre nom Internet ne peut pas \u00EAtre r\u00E9solu. +extractable=D\u00E9compressable force.delete=Forcer la suppression -force.delete.domain.warning=Attention\: Choisir cette opion entrainera la suppression de tous les domaines issus et l\'ensemble des comptes associées, ainsi que de leur ressources - +force.delete.domain.warning=Attention \: Choisir cette option entra\u00EEnera la suppression de tous les domaines issus et l\\'ensemble des comptes associ\u00E9s, ainsi que de leur ressources force.remove=Forcer la suppression -force.remove.host.warning=Attention\: Choisir cette option entrainera CloudStack à  arrêter l\'ensemble des machines virtuelles avant d\'enlever l\'hôte du cluster - -force.stop=Forcer l\'arrêt -force.stop.instance.warning=Attention \: un arrêt forcé sur cette instance est l\'option ultime. Cela peut engendrer des pertes de données et/ou un comportement inconsitent de votre instance. - -label.PreSetup=PreSetup -label.SR.name = Nom du point de montage -label.SharedMountPoint=Point de montage partagé -label.clvm=CLVM -label.volgroup=Volume Group -label.VMFS.datastore=datastore VMFS - -label.network.device=Equipement Réseau -label.add.network.device=Ajouter un équipement réseau -label.network.device.type=Type d\'équipement réseau +force.remove.host.warning=Attention \: Choisir cette option entra\u00EEnera CloudStack \u00E0\u00A0arr\u00EAter l\\'ensemble des machines virtuelles avant d\\'enlever l\\'h\u00F4te du cluster +force.stop=Forcer l\\'arr\u00EAt +force.stop.instance.warning=Attention \: un arr\u00EAt forc\u00E9 sur cette instance est la dernier option. Cela peut engendrer des pertes de donn\u00E9es et/ou un comportement inconsistant de votre instance. +image.directory=R\u00E9pertoire d\\'images +inline=Align\u00E9 +instances.actions.reboot.label=Red\u00E9marrer l\\'instance +label.CIDR.list=Liste CIDR +label.CIDR.of.destination.network=CIDR du r\u00E9seau de destination +label.CPU.cap=Limitation CPU label.DHCP.server.type=Serveur DHCP +label.DNS.domain.for.guest.networks=Domaine DNS pour les r\u00E9seaux invit\u00E9s +label.ESP.encryption=Chiffrement ESP +label.ESP.hash=Empreinte ESP +label.ESP.lifetime=Dur\u00E9e de vie ESP (secondes) +label.ESP.policy=Mode ESP +label.IKE.DH=DH IKE +label.IKE.encryption=Chiffrement IKE +label.IKE.hash=Empreinte IKE +label.IKE.lifetime=Dur\u00E9e de vie IKE (secondes) +label.IKE.policy=Mode IKE +label.IPsec.preshared.key=Cl\u00E9 partag\u00E9e IPsec +label.LB.isolation=R\u00E9partition de charge isol\u00E9e +label.LUN.number=N\u00B0 LUN +label.PING.CIFS.password=Mot de passe CIFS PING +label.PING.CIFS.username=Identifiant CIFS PING +label.PING.dir=R\u00E9pertoire PING +label.PING.storage.IP=IP stockage PING +label.PreSetup=PreSetup label.Pxe.server.type=Serveur PXE -label.PING.storage.IP=adresse PING (stockage) -label.PING.dir=répertoire PING -label.TFTP.dir=répertoire TFTP -label.PING.CIFS.username=utilisateur CIFS PING -label.PING.CIFS.password=mot de passe CIFS PING -label.CPU.cap=Utilisation maximum du CPU - - -label.action.enable.zone=Activer la zone -label.action.enable.zone.processing=Activation de la zone... -message.action.enable.zone=Confirmez que vous voulez activer cette zone -label.action.disable.zone=Désactivation de la zone -label.action.disable.zone.processing=Désactivation de la zone... -message.action.disable.zone=Confirmez que vous voulez désactiver cette zone - -label.action.enable.pod=Activer le Pod -label.action.enable.pod.processing=Activation du Pod... -message.action.enable.pod=Confirmez que vous souhaitez activer ce Pod -label.action.disable.pod=Désactiver le Pod -label.action.disable.pod.processing=Désactivation du Pod... -message.action.disable.pod=Confirmez que vous voulez désactiver ce Pod - -label.action.enable.cluster=Activer le cluster -label.action.enable.cluster.processing=Activation du cluster... -message.action.enable.cluster=Confirmez que vous souhaitez activer ce cluster -label.action.disable.cluster=Désactiver le cluster -label.action.disable.cluster.processing=Désactivation du cluster... -message.action.disable.cluster=Confirmez que vous souhaitez désactiver ce cluster - +label.SR.name=Nom du point de montage +label.SharedMountPoint=Point de montage partag\u00E9 +label.TFTP.dir=R\u00E9pertoire TFTP +label.VMFS.datastore=Magasin de donn\u00E9es VMFS +label.VMs.in.tier=Machines virtuelles dans le tiers +label.VPC.router.details=D\u00E9tails routeur VPC +label.VPN.connection=Connexion VPN +label.VPN.customer.gateway=Passerelle VPN client +label.VPN.gateway=Passerelle VPN +label.accept.project.invitation=Accepter l\\'invitation au projet +label.account=Compte +label.account.and.security.group=Compte, groupe de s\u00E9curit\u00E9 label.account.id=ID du Compte label.account.name=Nom du compte -label.account.specific=Spécifique au compte -label.account=Compte +label.account.specific=Sp\u00E9cifique au compte label.accounts=Comptes -label.acquire.new.ip=Acquérir une nouvelle adresse IP -label.show.ingress.rule=Montrer la règle Ingress -label.hide.ingress.rule=Cacher la règle Ingress -label.action.attach.disk.processing=Attachement du Disque... -label.action.attach.disk=Attacher un disque -label.action.attach.iso.processing=Attachement de l\'image ISO -label.action.attach.iso=Attacher une image ISO -label.action.cancel.maintenance.mode.processing=Annulation du mode maintenance... +label.acquire.new.ip=Acqu\u00E9rir une nouvelle adresse IP +label.action.attach.disk=Rattacher un disque +label.action.attach.disk.processing=Rattachement du Disque... +label.action.attach.iso=Rattacher une image ISO +label.action.attach.iso.processing=Rattachement de l\\'image ISO label.action.cancel.maintenance.mode=Annuler le mode maintenance +label.action.cancel.maintenance.mode.processing=Annulation du mode maintenance... label.action.change.password=Changer le mot de passe -label.action.change.service.processing=Changement de d\'offre de service... -label.action.change.service=Changer d\'offre de service -label.action.copy.ISO.processing=Copie de l\'image ISO... +label.action.change.service=Changer d\\'offre de service +label.action.change.service.processing=Changement de d\\'offre de service... label.action.copy.ISO=Copier une image ISO -label.action.copy.template.processing=Copie du Modèle... -label.action.copy.template=Copier un modèle -label.action.create.template.processing=Création du Modèle... -label.action.create.template=Créer un modèle -label.action.create.vm.processing=Création de la VM.. -label.action.create.vm=Créer une VM -label.action.create.volume.processing=Création du Volume... -label.action.create.volume=Créer un Volume -label.action.delete.IP.range.processing=Suppression de la plage IP... +label.action.copy.ISO.processing=Copie de l\\'image ISO... +label.action.copy.template=Copier un mod\u00E8le +label.action.copy.template.processing=Copie du Mod\u00E8le... +label.action.create.template=Cr\u00E9er un mod\u00E8le +label.action.create.template.from.vm=Cr\u00E9er un mod\u00E8le depuis la VM +label.action.create.template.from.volume=Cr\u00E9er un mod\u00E8le depuis le volume +label.action.create.template.processing=Cr\u00E9ation du Mod\u00E8le... +label.action.create.vm=Cr\u00E9er une VM +label.action.create.vm.processing=Cr\u00E9ation de la VM... +label.action.create.volume=Cr\u00E9er un Volume +label.action.create.volume.processing=Cr\u00E9ation du Volume... label.action.delete.IP.range=Supprimer la plage IP -label.action.delete.ISO.processing=Suppression de l\'image ISO... -label.action.delete.ISO=Supprimer l\'image ISO -label.action.delete.account.processing=Suppression du compte... +label.action.delete.IP.range.processing=Suppression de la plage IP... +label.action.delete.ISO=Supprimer l\\'image ISO +label.action.delete.ISO.processing=Suppression de l\\'image ISO... label.action.delete.account=Supprimer un compte -label.action.delete.cluster.processing=Suppression du Cluster... +label.action.delete.account.processing=Suppression du compte... label.action.delete.cluster=Supprimer le Cluster -label.action.delete.disk.offering.processing=Suppression de l\'offre Disque... -label.action.delete.disk.offering=Supprimer l\'offre Disque - -label.action.update.resource.count=Mettre à jour le compte d\'utilisation des ressources -label.action.update.resource.count.processing=Mise à jour du compteur... - +label.action.delete.cluster.processing=Suppression du Cluster... +label.action.delete.disk.offering=Supprimer l\\'offre Disque +label.action.delete.disk.offering.processing=Suppression de l\\'offre Disque... label.action.delete.domain=Supprimer le domaine label.action.delete.domain.processing=Suppression du domaine... - -label.action.delete.firewall.processing=Suppression du Parefeu... -label.action.delete.firewall=Supprimer la règle de firewall -label.action.delete.ingress.rule.processing=Suppression de la règle Ingress.. -label.action.delete.ingress.rule=Supprimer la règle Ingress -label.action.delete.load.balancer.processing=Suppression de l\'équilibreur de charge... -label.action.delete.load.balancer=Supprimer la règle de load balancer -label.action.edit.network.processing=Modification du Réseau... -label.action.edit.network=Modifier le réseau -label.action.delete.network.processing=Suppression du réseau... -label.action.delete.network=Supprimer le réseau -label.action.delete.pod.processing=Suppression du pod... +label.action.delete.firewall=Supprimer la r\u00E8gle de pare-feu +label.action.delete.firewall.processing=Suppression du Pare-feu... +label.action.delete.ingress.rule=Supprimer la r\u00E8gle d\\'entr\u00E9e +label.action.delete.ingress.rule.processing=Suppression de la r\u00E8gle d\\'entr\u00E9e.. +label.action.delete.load.balancer=Supprimer la r\u00E8gle de r\u00E9partition de charge +label.action.delete.load.balancer.processing=Suppression du r\u00E9partiteur de charge... +label.action.delete.network=Supprimer le r\u00E9seau +label.action.delete.network.processing=Suppression du r\u00E9seau... +label.action.delete.nexusVswitch=Supprimer le Nexus 1000v +label.action.delete.physical.network=Supprimer le r\u00E9seau physique label.action.delete.pod=Supprimer le Pod -label.action.delete.primary.storage.processing=Suppression du stockage primaire... +label.action.delete.pod.processing=Suppression du pod... label.action.delete.primary.storage=Supprimer le stockage primaire -label.action.delete.secondary.storage.processing=Suppression du stockage secondaire... +label.action.delete.primary.storage.processing=Suppression du stockage primaire... label.action.delete.secondary.storage=Supprimer le stockage secondaire -label.action.delete.security.group.processing=Suppression du groupe de sécurité -label.action.delete.security.group=Supprimer le groupe de sécurité -label.action.delete.service.offering.processing=Suppression de l\'offre de service... -label.action.delete.service.offering=Supprimer l\'offre de service -label.action.delete.snapshot.processing=Suppresison de l\'instantané... -label.action.delete.snapshot=Supprimer l\'instantané -label.action.delete.template.processing=Suppression du modèle... -label.action.delete.template=Supprimer le modèle -label.action.delete.user.processing=Suppression de l\'utilisateur... -label.action.delete.user=Supprimer l\'utilisateur -label.action.delete.volume.processing=Suppression du volume... +label.action.delete.secondary.storage.processing=Suppression du stockage secondaire... +label.action.delete.security.group=Supprimer le groupe de s\u00E9curit\u00E9 +label.action.delete.security.group.processing=Suppression du groupe de s\u00E9curit\u00E9 +label.action.delete.service.offering=Supprimer l\\'offre de service +label.action.delete.service.offering.processing=Suppression de l\\'offre de service... +label.action.delete.snapshot=Supprimer l\\'instantan\u00E9 +label.action.delete.snapshot.processing=Suppression de l\\'instantan\u00E9... +label.action.delete.system.service.offering=Supprimer l\\'offre syst\u00E8me +label.action.delete.template=Supprimer le mod\u00E8le +label.action.delete.template.processing=Suppression du mod\u00E8le... +label.action.delete.user=Supprimer l\\'utilisateur +label.action.delete.user.processing=Suppression de l\\'utilisateur... label.action.delete.volume=Supprimer le volume -label.action.delete.zone.processing=Suppression de la zone... +label.action.delete.volume.processing=Suppression du volume... label.action.delete.zone=Supprimer la zone -label.action.destroy.instance.processing=Suppression de l\'instance... -label.action.destroy.instance=Supprimer l\'instance -label.action.destroy.systemvm.processing=Suppression de la VM Système... -label.action.destroy.systemvm=Supprimer la VM Système -label.action.detach.disk.processing=Détachement du disque... -label.action.detach.disk=Détacher le disque -label.action.detach.iso.processing=Détachement de l\'image ISO... -label.action.detach.iso=Détacher l\'image ISO -label.action.disable.account.processing=Désactivation du compte... -label.action.disable.account=Désactiver le compte -label.action.disable.static.NAT.processing=Désactivation du NAT Statique... -label.action.disable.static.NAT=Désactiver le NAT Statique -label.action.disable.user.processing=Désactivation du l\'utilisateur... -label.action.disable.user=Désactiver l\'utilisateur -label.action.download.ISO=Télécharger une image ISO -label.action.download.template=Télécharger un modèle -label.action.download.volume.processing=Téléchargement du volume... -label.action.download.volume=Télécharger un volume -label.action.edit.ISO=Modifier l\'image ISO +label.action.delete.zone.processing=Suppression de la zone... +label.action.destroy.instance=Supprimer l\\'instance +label.action.destroy.instance.processing=Suppression de l\\'instance... +label.action.destroy.systemvm=Supprimer la VM Syst\u00E8me +label.action.destroy.systemvm.processing=Suppression de la VM Syst\u00E8me... +label.action.detach.disk=D\u00E9tacher le disque +label.action.detach.disk.processing=D\u00E9tachement du disque... +label.action.detach.iso=D\u00E9tacher l\\'image ISO +label.action.detach.iso.processing=D\u00E9tachement de l\\'image ISO... +label.action.disable.account=D\u00E9sactiver le compte +label.action.disable.account.processing=D\u00E9sactivation du compte... +label.action.disable.cluster=D\u00E9sactiver le cluster +label.action.disable.cluster.processing=D\u00E9sactivation du cluster... +label.action.disable.nexusVswitch=D\u00E9sactiver le Nexus 1000v +label.action.disable.physical.network=D\u00E9sactiver le r\u00E9seau physique +label.action.disable.pod=D\u00E9sactiver le Pod +label.action.disable.pod.processing=D\u00E9sactivation du Pod... +label.action.disable.static.NAT=D\u00E9sactiver le NAT Statique +label.action.disable.static.NAT.processing=D\u00E9sactivation du NAT Statique... +label.action.disable.user=D\u00E9sactiver l\\'utilisateur +label.action.disable.user.processing=D\u00E9sactivation de l\\'utilisateur... +label.action.disable.zone=D\u00E9sactivation de la zone +label.action.disable.zone.processing=D\u00E9sactivation de la zone... +label.action.download.ISO=T\u00E9l\u00E9charger une image ISO +label.action.download.template=T\u00E9l\u00E9charger un mod\u00E8le +label.action.download.volume=T\u00E9l\u00E9charger un volume +label.action.download.volume.processing=T\u00E9l\u00E9chargement du volume... +label.action.edit.ISO=Modifier l\\'image ISO label.action.edit.account=Modifier le Compte -label.action.edit.disk.offering=Modifier l\'offre de disque +label.action.edit.disk.offering=Modifier l\\'offre de disque label.action.edit.domain=Modifier le domaine label.action.edit.global.setting=Modifier la configuration globale -label.action.edit.instance=Modifier l\'instancer -label.action.edit.network.offering=Modifier l\'offre de service réseau +label.action.edit.host=Modifier l\\'h\u00F4te +label.action.edit.instance=Modifier l\\'instance +label.action.edit.network=Modifier le r\u00E9seau +label.action.edit.network.offering=Modifier l\\'offre de service r\u00E9seau +label.action.edit.network.processing=Modification du R\u00E9seau... label.action.edit.pod=Modifier le pod label.action.edit.primary.storage=Modifier le stockage primaire label.action.edit.resource.limits=Modifier les limites de ressources -label.action.edit.service.offering=Modifier l\'offre de service -label.action.edit.template=Modifier le modèle -label.action.edit.user=Modifier l\'utilisateur +label.action.edit.service.offering=Modifier l\\'offre de service +label.action.edit.template=Modifier le mod\u00E8le +label.action.edit.user=Modifier l\\'utilisateur label.action.edit.zone=Modifier la zone -label.action.enable.account.processing=Activation du compte... label.action.enable.account=Activer le compte -label.action.enable.maintenance.mode.processing=Activation du mode maintenance... +label.action.enable.account.processing=Activation du compte... +label.action.enable.cluster=Activer le cluster +label.action.enable.cluster.processing=Activation du cluster... label.action.enable.maintenance.mode=Activer le mode maintenance -label.action.enable.static.NAT.processing=Activation du NAT Statique... +label.action.enable.maintenance.mode.processing=Activation du mode maintenance... +label.action.enable.nexusVswitch=Activer le Nexus 1000v +label.action.enable.physical.network=Activer le r\u00E9seau physique +label.action.enable.pod=Activer le Pod +label.action.enable.pod.processing=Activation du Pod... label.action.enable.static.NAT=Activer le NAT Statique -label.action.enable.user.processing=Activation de l\'utilisateur... -label.action.enable.user=Activer l\'utilisateur -label.action.force.reconnect.processing=Reconnexion en cours... +label.action.enable.static.NAT.processing=Activation du NAT Statique... +label.action.enable.user=Activer l\\'utilisateur +label.action.enable.user.processing=Activation de l\\'utilisateur... +label.action.enable.zone=Activer la zone +label.action.enable.zone.processing=Activation de la zone... label.action.force.reconnect=Forcer la reconnexion -label.action.generate.keys.processing=Génération des clés... -label.action.generate.keys=Générer les clés -label.action.lock.account.processing=Blocage du compte... -label.action.lock.account=Bloquer le compte -label.action.migrate.instance=Migrer l\'instance -label.action.migrate.instance.processing=Migration de l\'instance... -label.action.reboot.instance.processing=Redémarrage de l\'instance... -label.action.reboot.instance=Redémarrer l\'instance -label.action.reboot.router.processing=Redémarrage du routeur... -label.action.reboot.router=Redémarrer le routeur -label.action.reboot.systemvm.processing=Redémarrage de la VM Système... -label.action.reboot.systemvm=Redémarre la VM Système -label.action.recurring.snapshot=Snapshots récurrents -label.action.release.ip.processing=Libération de l\'adresse IP... -label.action.release.ip=Libérer l\'adresse IP -label.action.remove.host.processing=Suppression de l\'hôte... -label.action.remove.host=Supprimer l\'hôte -label.action.reset.password.processing=Réinitialisation le mot de passe... -label.action.reset.password=Réinitialiser le mot de passe +label.action.force.reconnect.processing=Reconnexion en cours... +label.action.generate.keys=G\u00E9n\u00E9rer les cl\u00E9s +label.action.generate.keys.processing=G\u00E9n\u00E9ration des cl\u00E9s... +label.action.list.nexusVswitch=Liste des Nexus 1000v +label.action.lock.account=Verrouiller le compte +label.action.lock.account.processing=Verrouillage du compte... +label.action.manage.cluster=G\u00E9rer le Cluster +label.action.manage.cluster.processing=Gestion du cluster... +label.action.migrate.instance=Migrer l\\'instance +label.action.migrate.instance.processing=Migration de l\\'instance... +label.action.migrate.router=Migration routeur +label.action.migrate.router.processing=Migration routeur en cours... +label.action.migrate.systemvm=Migration VM syst\u00E8me +label.action.migrate.systemvm.processing=Migration VM syst\u00E8me en cours ... +label.action.reboot.instance=Red\u00E9marrer l\\'instance +label.action.reboot.instance.processing=Red\u00E9marrage de l\\'instance... +label.action.reboot.router=Red\u00E9marrer le routeur +label.action.reboot.router.processing=Red\u00E9marrage du routeur... +label.action.reboot.systemvm=Red\u00E9marrer la VM Syst\u00E8me +label.action.reboot.systemvm.processing=Red\u00E9marrage de la VM Syst\u00E8me... +label.action.recurring.snapshot=Instantan\u00E9s r\u00E9currents +label.action.register.iso=Enregistrer ISO +label.action.register.template=Enregistrer mod\u00E8le +label.action.release.ip=Lib\u00E9rer l\\'adresse IP +label.action.release.ip.processing=Lib\u00E9ration de l\\'adresse IP... +label.action.remove.host=Supprimer l\\'h\u00F4te +label.action.remove.host.processing=Suppression de l\\'h\u00F4te... +label.action.reset.password=R\u00E9-initialiser le mot de passe +label.action.reset.password.processing=R\u00E9-initialisation du mot de passe... label.action.resource.limits=Limites de ressources -label.action.restore.instance.processing=Restauration de l\'instance... -label.action.restore.instance=Restaurer l\'instance -label.action.start.instance.processing=Démarrage de l\'instance -label.action.start.instance=Démarrer l\'instance -label.action.start.router.processing=Démarrage du routeur... -label.action.start.router=Démarrer le routeur -label.action.start.systemvm.processing=Démarrage de la VM système -label.action.start.systemvm=Démarrer la VM système -label.action.stop.instance.processing=Arrêt de l\'Instance... -label.action.stop.instance=Arrêter l\'Instance -label.action.stop.router.processing=Arrêt du routeur... -label.action.stop.router=Arrêter le routeur -label.action.stop.systemvm.processing=Arrêt de la VM système... -label.action.stop.systemvm=Arrêter la VM système -label.action.take.snapshot.processing=Prise de l\'instantané -label.action.take.snapshot=Prendre un instantané -label.action.update.OS.preference.processing=Mise à jour des préférences d\'OS... -label.action.update.OS.preference=Mettre à jour les préférences d\'OS +label.action.restore.instance=Restaurer l\\'instance +label.action.restore.instance.processing=Restauration de l\\'instance... +label.action.start.instance=D\u00E9marrer l\\'instance +label.action.start.instance.processing=D\u00E9marrage de l\\'instance... +label.action.start.router=D\u00E9marrer le routeur +label.action.start.router.processing=D\u00E9marrage du routeur... +label.action.start.systemvm=D\u00E9marrer la VM syst\u00E8me +label.action.start.systemvm.processing=D\u00E9marrage de la VM syst\u00E8me... +label.action.stop.instance=Arr\u00EAter l\\'Instance +label.action.stop.instance.processing=Arr\u00EAt de l\\'Instance... +label.action.stop.router=Arr\u00EAter le routeur +label.action.stop.router.processing=Arr\u00EAt du routeur... +label.action.stop.systemvm=Arr\u00EAter la VM syst\u00E8me +label.action.stop.systemvm.processing=Arr\u00EAt de la VM syst\u00E8me... +label.action.take.snapshot=Prendre un instantan\u00E9 +label.action.take.snapshot.processing=Prise de l\\'instantan\u00E9... +label.action.unmanage.cluster=Ne plus g\u00E9rer le Cluster +label.action.unmanage.cluster.processing=Arr\u00EAt de la gestion du Cluster +label.action.update.OS.preference=Mettre \u00E0 jour les pr\u00E9f\u00E9rences d\\'OS +label.action.update.OS.preference.processing=Mise \u00E0 jour des pr\u00E9f\u00E9rences d\\'OS... +label.action.update.resource.count=Mettre \u00E0 jour le compteur des ressources +label.action.update.resource.count.processing=Mise \u00E0 jour du compteur... label.actions=Actions +label.activate.project=Activer projet label.active.sessions=Sessions actives +label.add=Ajouter +label.add.ACL=Ajouter une r\u00E8gle ACL +label.add.F5.device=Ajouter un F5 +label.add.NiciraNvp.device=Ajouter un contr\u00F4leur Nvp +label.add.SRX.device=Ajouter un SRX +label.add.VM.to.tier=Ajouter une machine virtuelle au tiers +label.add.VPN.gateway=Ajouter une passerelle VPN label.add.account=Ajouter un compte +label.add.account.to.project=Ajouter un compte au projet +label.add.accounts=Ajouter des comptes +label.add.accounts.to=Ajouter des comptes sur +label.add.by=Ajout\u00E9 par label.add.by.cidr=Ajouter par CIDR label.add.by.group=Ajouter par groupe label.add.cluster=Ajouter un cluster -label.add.direct.iprange=Ajouter une plage d\'adresse IP directe +label.add.compute.offering=Ajouter une offre de calcul +label.add.direct.iprange=Ajouter une plage d\\'adresse IP directe label.add.disk.offering=Ajouter une offre disque label.add.domain=Ajouter un domaine -label.add.firewall=Ajouter une règle firewall -label.add.host=Ajouter un hôte -label.add.ingress.rule=Ajouter une règle Ingress +label.add.egress.rule=Ajouter la r\u00E8gle sortante +label.add.firewall=Ajouter une r\u00E8gle de pare-feu +label.add.guest.network=Ajouter un r\u00E9seau d\\'invit\u00E9 +label.add.host=Ajouter un h\u00F4te +label.add.ingress.rule=Ajouter une r\u00E8gle d\\'entr\u00E9e label.add.ip.range=Ajouter une plage IP -label.add.load.balancer=Ajouter un partageur de charge +label.add.load.balancer=Ajouter un r\u00E9partiteur de charge label.add.more=Ajouter plus -label.add.network=Ajouter un réseau +label.add.netScaler.device=Ajouter un Netscaler +label.add.network=Ajouter un r\u00E9seau +label.add.network.ACL=Ajouter une r\u00E8gle d\\'acc\u00E8s r\u00E9seau ACL +label.add.network.device=Ajouter un \u00E9quipement r\u00E9seau +label.add.network.offering=Ajouter une offre r\u00E9seau +label.add.new.F5=Ajouter un F5 +label.add.new.NetScaler=Ajouter un Netscaler +label.add.new.SRX=Ajouter un SRX +label.add.new.gateway=Ajouter une nouvelle passerelle +label.add.new.tier=Ajouter un nouveau tiers +label.add.physical.network=Ajouter un r\u00E9seau physique label.add.pod=Ajouter un pod +label.add.port.forwarding.rule=Ajouter une r\u00E8gle de transfert de port label.add.primary.storage=Ajouter un stockage primaire +label.add.resources=Ajouter des ressources +label.add.route=Ajouter une route +label.add.rule=Ajouter une r\u00E8gle label.add.secondary.storage=Ajouter un stockage secondaire -label.add.security.group=Ajouter un groupe de sécurité -label.add.service.offering=Ajouter un offre de service -label.add.template=Ajouter un modèle +label.add.security.group=Ajouter un groupe de s\u00E9curit\u00E9 +label.add.service.offering=Ajouter une offre de service +label.add.static.nat.rule=Ajouter une r\u00E8gle de NAT statique +label.add.static.route=Ajouter une route statique +label.add.system.service.offering=Ajouter une offre de service syst\u00E8me +label.add.template=Ajouter un mod\u00E8le +label.add.to.group=Ajouter au groupe label.add.user=Ajouter un utilisateur -label.add.vlan=Ajouter un vlan +label.add.vlan=Ajouter un VLAN +label.add.vm=Ajouter VM +label.add.vms=Ajouter VMs +label.add.vms.to.lb=Ajouter une/des VM(s) \u00E0 la r\u00E8gle de r\u00E9partition de charge label.add.volume=Ajouter un volume +label.add.vpc=Ajouter un VPC +label.add.vpn.customer.gateway=Ajouter une passerelle VPN cliente +label.add.vpn.user=Ajouter un utilisateur VPN label.add.zone=Ajouter une zone -label.add=Ajouter +label.adding=Ajout label.adding.cluster=Ajout du Cluster -label.adding.failed=Echec de l\'ajout +label.adding.failed=\u00C9chec de l\\'ajout label.adding.pod=Ajout du Pod label.adding.processing=Ajout... -label.adding.succeeded=Ajout réussi -label.adding.user=Ajout de l\'utilisateur +label.adding.succeeded=Ajout r\u00E9ussi +label.adding.user=Ajout de l\\'utilisateur label.adding.zone=Ajout de la zone -label.adding=Ajout -label.additional.networks=Réseaux additionnels -label.admin.accounts=Comptes Administrateur +label.additional.networks=R\u00E9seaux additionnels label.admin=Administrateur -label.advanced.mode=Mode avancé -label.advanced.search=Recherche avancée -label.advanced=Avancé +label.admin.accounts=Comptes Administrateur +label.advanced=Avanc\u00E9 +label.advanced.mode=Mode avanc\u00E9 +label.advanced.search=Recherche avanc\u00E9e +label.agent.password=Mot de passe Agent +label.agent.username=Identifiant Agent +label.agree=Accepter label.alert=Alerte label.algorithm=Algorithme -label.allocated=Alloué -label.api.key=Clé d\'API -label.assign.to.load.balancer=Assigner l\'instance au partageur de charge +label.allocated=Allou\u00E9 +label.allocation.state=\u00C9tat de l\\'allocation +label.api.key=Cl\u00E9 d\\'API +label.apply=Appliquer label.assign=Assigner -label.associated.network.id=ID du réseau associé -label.attached.iso=Image ISO attachée -label.availability.zone=Zone de disponibilité -label.availability=Disponibilité -label.available.public.ips=Adresses IP publiques disponibles +label.assign.to.load.balancer=Assigner l\\'instance au r\u00E9partiteur de charge +label.associated.network=R\u00E9seau associ\u00E9 +label.associated.network.id=ID du r\u00E9seau associ\u00E9 +label.attached.iso=Image ISO attach\u00E9e +label.availability=Disponibilit\u00E9 +label.availability.zone=Zone de disponibilit\u00E9 label.available=Disponible +label.available.public.ips=Adresses IP publiques disponibles label.back=Retour +label.bandwidth=Bande passante +label.basic=Basique label.basic.mode=Mode basique -label.bootable=Bootable -label.broadcast.domain.type=Type de domaine de broadcast +label.bootable=Amor\u00E7able +label.broadcast.domain.range=Plage du domaine multi-diffusion +label.broadcast.domain.type=Type de domaine de multi-diffusion +label.broadcast.uri=URI multi-diffusion label.by.account=Par compte -label.by.availability=Par disponibilité +label.by.availability=Par disponibilit\u00E9 label.by.domain=Par domaine label.by.end.date=Par date de fin label.by.level=Par niveau label.by.pod=Par Pod -label.by.role=Par role -label.by.start.date=Par date de début -label.by.state=Par état -label.by.traffic.type=Par type de traffic -label.by.type.id=Par type d\'ID +label.by.role=Par r\u00F4le +label.by.start.date=Par date de d\u00E9but +label.by.state=Par \u00E9tat +label.by.traffic.type=Par type de trafic label.by.type=Par type +label.by.type.id=Par type d\\'ID label.by.zone=Par zone -label.bytes.received=Octets reçus -label.bytes.sent=Octets envoyés +label.bytes.received=Octets re\u00E7us +label.bytes.sent=Octets envoy\u00E9s label.cancel=Annuler +label.capacity=Capacit\u00E9 label.certificate=Certificat -label.privatekey=Clé privée PKCS\#8 -label.domain.suffix=Suffixe de domaine DNS (i.e., xyz.com) -label.character=Caractère -label.cidr.account=CIDR ou Compte/Groupe de sécurité +label.change.service.offering=Modifier l\\'offre de service +label.change.value=Modifier la valeur +label.character=Caract\u00E8re +label.checksum=Somme de contr\u00F4le MD5 +label.cidr=CIDR +label.cidr.account=CIDR ou Compte/Groupe de s\u00E9curit\u00E9 +label.cidr.list=CIDR Source +label.clean.up=Nettoyage +label.clear.list=Purger la liste label.close=Fermer -label.cloud.console=Console de gestion du cloud -label.cloud.managed=Géré par Cloud.com -label.cluster.type=Type de Cluster +label.cloud.console=Console d\\'Administration du Cloud +label.cloud.managed=G\u00E9r\u00E9 par Cloud.com label.cluster=Cluster +label.cluster.name=Nom du cluster +label.cluster.type=Type de Cluster +label.clusters=Clusters +label.clvm=CLVM label.code=Code +label.community=Communaut\u00E9 +label.compute=Processeur +label.compute.and.storage=Processeur et Stockage +label.compute.offering=Offre de calcul +label.compute.offerings=Offres de calcul +label.configuration=Configuration +label.configure=Configurer +label.configure.network.ACLs=Configurer les r\u00E8gles d\\'acc\u00E8s r\u00E9seau ACL +label.configure.vpc=Configurer le VPC +label.confirm.password=Confirmer le mot de passe label.confirmation=Confirmation -label.cpu.allocated.for.VMs=CPU alloué aux VMs -label.cpu.allocated=CPU alloué -label.cpu.utilized=CPU utilisé +label.congratulations=F\u00E9licitations \! +label.conserve.mode=Conserver le mode +label.console.proxy=Console proxy +label.continue=Continuer +label.continue.basic.install=Continuer avec l\\'installation basique +label.corrections.saved=Modifications enregistr\u00E9es label.cpu=CPU -label.created=Créé +label.cpu.allocated=CPU allou\u00E9e +label.cpu.allocated.for.VMs=CPU allou\u00E9e aux VMs +label.cpu.mhz=CPU (en MHz) +label.cpu.utilized=CPU utilis\u00E9e +label.create.VPN.connection=Cr\u00E9er une connexion VPN +label.create.project=Cr\u00E9er un projet +label.create.template=Cr\u00E9er un mod\u00E8le +label.created=Cr\u00E9\u00E9 +label.created.by.system=Cr\u00E9\u00E9 par le syst\u00E8me label.cross.zones=Multi Zones -label.custom.disk.size=Taille de disque personnalisée +label.custom.disk.size=Taille de disque personnalis\u00E9e label.daily=Quotidien -label.data.disk.offering=Offre de disque de données +label.data.disk.offering=Offre de disque de donn\u00E9es label.date=Date label.day.of.month=Jour du mois label.day.of.week=Jour de la semaine +label.dead.peer.detection=D\u00E9tection de pair mort +label.decline.invitation=Refuser l\\'invitation +label.dedicated=D\u00E9di\u00E9 +label.default=Par d\u00E9faut +label.default.use=Utilisation par d\u00E9faut +label.default.view=Vue par d\u00E9faut label.delete=Supprimer -label.deleting.failed=Suppression échouée +label.delete.F5=Supprimer F5 +label.delete.NetScaler=Supprimer Netscaler +label.delete.NiciraNvp=Supprimer un contr\u00F4leur Nvp +label.delete.SRX=Supprimer SRX +label.delete.VPN.connection=Supprimer la connexion VPN +label.delete.VPN.customer.gateway=Supprimer la passerelle VPN client +label.delete.VPN.gateway=Supprimer la passerelle VPN +label.delete.gateway=Supprimer la passerelle +label.delete.project=Supprimer projet +label.delete.vpn.user=Supprimer l\\'utilisateur VPN +label.deleting.failed=Suppression \u00E9chou\u00E9e label.deleting.processing=Suppression... label.description=Description -label.detaching.disk=Détacher le disque -label.details=Details -label.device.id=ID du périphérique -label.disabled=Désactivé -label.disabling.vpn.access=Désactiver l\'accès VPN -label.disk.allocated=Disque Alloué +label.destination.physical.network.id=Identifiant du r\u00E9seau physique de destination +label.destination.zone=Zone de destination +label.destroy=D\u00E9truire +label.destroy.router=Supprimer le routeur +label.detaching.disk=D\u00E9tacher le disque +label.details=D\u00E9tails +label.device.id=ID du p\u00E9riph\u00E9rique +label.devices=Machines +label.dhcp=DHCP +label.direct.ips=Adresses IP du r\u00E9seau partag\u00E9 +label.disable.provider=D\u00E9sactiver ce fournisseur +label.disable.vpn=D\u00E9sactiver le VPN +label.disabled=D\u00E9sactiv\u00E9 +label.disabling.vpn.access=D\u00E9sactiver l\\'acc\u00E8s VPN +label.disk.allocated=Disque Allou\u00E9 label.disk.offering=Offre de Disque -label.disk.size.gb=Taille du disque (en Go) label.disk.size=Taille du disque +label.disk.size.gb=Taille du disque (en Go) label.disk.total=Espace disque total label.disk.volume=Volume disque -label.display.text=Texte affiché -label.dns.1=DNS1 -label.dns.2=DNS2 +label.display.name=Nom d\\'affichage +label.display.text=Texte affich\u00E9 +label.dns=DNS +label.dns.1=DNS 1 +label.dns.2=DNS 2 +label.domain=Domaine label.domain.admin=Administrateur du domaine label.domain.id=ID du domaine label.domain.name=Nom de domaine -label.domain=Domaine -label.double.quotes.are.not.allowed=Les guillemets ne sont pas autorisés -label.download.progress=Progression du téléchargement +label.domain.router=Routeur du domaine +label.domain.suffix=Suffixe de domaine DNS (i.e., xyz.com) +label.done=Termin\u00E9 +label.double.quotes.are.not.allowed=Les guillemets ne sont pas autoris\u00E9es +label.download.progress=Progression du t\u00E9l\u00E9chargement +label.drag.new.position=D\u00E9placer sur une autre position label.edit=Modifier +label.edit.lb.rule=Modifier la r\u00E8gle LB +label.edit.network.details=Modifier les param\u00E8tres r\u00E9seau +label.edit.project.details=Modifier les d\u00E9tails du projet +label.edit.tags=Modifier les balises +label.edit.traffic.type=Modifier le type de trafic +label.edit.vpc=Modifier le VPC +label.egress.rule=R\u00E8gle sortante +label.egress.rules=R\u00E8gles de sortie +label.elastic=\u00C9lastique +label.elastic.IP=IP extensible +label.elastic.LB=R\u00E9partition de charge extensible label.email=Email -label.enabling.vpn.access=Activation de l\'accès VPN +label.enable.provider=Activer le fournisseur +label.enable.s3=Activer le stockage secondaire de type S3 +label.enable.swift=Activer Swift +label.enable.vpn=Activer VPN label.enabling.vpn=Activation du VPN +label.enabling.vpn.access=Activation de l\\'acc\u00E8s VPN +label.end.IP=R\u00E9silier l\\'IP label.end.port=Port de fin -label.endpoint.or.operation=Terminaison ou Opération -label.error.code=Code d\'erreur +label.end.reserved.system.IP=Adresse IP de fin r\u00E9serv\u00E9e Syst\u00E8me +label.end.vlan=VLAN de fin +label.endpoint.or.operation=Terminaison ou Op\u00E9ration +label.enter.token=Entrez le jeton unique label.error=Erreur -label.esx.host=Hôte ESX/ESXi +label.error.code=Code d\\'erreur +label.esx.host=H\u00F4te ESX/ESXi label.example=Exemple -label.failed=Echoué -label.featured=Sponsorisé -label.firewall=Parefeu -label.first.name=Prénom +label.f5=F5 +label.failed=\u00C9chou\u00E9 +label.featured=Sponsoris\u00E9 +label.fetch.latest=Rafra\u00EEchir +label.filterBy=Filtrer par +label.firewall=Pare-feu +label.first.name=Pr\u00E9nom label.format=Format label.friday=Vendredi label.full=Complet +label.full.path=Chemin complet label.gateway=Passerelle -label.general.alerts=Alertes générales -label.generating.url=Génération de l\'URL -label.go.step.2=Aller à l\'étape 2 -label.go.step.3=Aller à l\'étape 3 -label.go.step.4=Aller à l\'étape 4 -label.go.step.5=Aller à l\'étape 5 -label.group.optional=Groupe (optionnel) +label.general.alerts=Alertes g\u00E9n\u00E9rales +label.generating.url=G\u00E9n\u00E9ration de l\\'URL +label.go.step.2=Aller \u00E0 l\\'\u00E9tape 2 +label.go.step.3=Aller \u00E0 l\\'\u00E9tape 3 +label.go.step.4=Aller \u00E0 l\\'\u00E9tape 4 +label.go.step.5=Aller \u00E0 l\\'\u00E9tape 5 label.group=Groupe -label.guest.cidr=CIDR invités -label.guest.gateway=Passerelle pour les invités -label.guest.ip.range=Plage d\'adresses IP des invités -label.guest.ip=Adresse IP des invités -label.guest.netmask=Masque de réseau des invités -label.ha.enabled=Haute disponibilité active +label.group.optional=Groupe (optionnel) +label.guest=Invit\u00E9 +label.guest.cidr=CIDR invit\u00E9 +label.guest.end.ip=Adresse IP de fin pour les invit\u00E9s +label.guest.gateway=Passerelle pour les invit\u00E9s +label.guest.ip=Adresse IP des invit\u00E9s +label.guest.ip.range=Plage d\\'adresses IP des invit\u00E9s +label.guest.netmask=Masque de r\u00E9seau des invit\u00E9s +label.guest.networks=R\u00E9seaux d\\'invit\u00E9 +label.guest.start.ip=Adresse IP de d\u00E9but pour les invit\u00E9s +label.guest.traffic=Trafic invit\u00E9 +label.guest.type=Type d\\'invit\u00E9 +label.ha.enabled=Haute disponibilit\u00E9 activ\u00E9e label.help=Aide -label.host.alerts=Alertes des hôtes -label.host.name=Nom d\'hôte -label.host=Serveur -label.hosts=Serveurs -label.hourly=A l\'heure -label.hypervisor.type=Type d\'hyperviseur +label.hide.ingress.rule=Cacher la r\u00E8gle d\\'entr\u00E9e +label.hints=Astuces +label.host=H\u00F4te +label.host.MAC=Adresse MAC h\u00F4te +label.host.alerts=Alertes des h\u00F4tes +label.host.name=Nom d\\'h\u00F4te +label.host.tags=\u00C9tiquettes d\\'h\u00F4te +label.hosts=H\u00F4tes +label.hourly=Chaque heure label.hypervisor=Hyperviseur +label.hypervisor.capabilities=Fonctions hyperviseur +label.hypervisor.type=Type d\\'hyperviseur +label.hypervisor.version=Version hyperviseur label.id=ID label.info=Information -label.ingress.rule=Règle Ingress -label.initiated.by=Initié par -label.instance.limits=Limites des instances -label.instance.name=Nom de lìnstance +label.ingress.rule=R\u00E8gle d\\'entr\u00E9e +label.initiated.by=Initi\u00E9 par +label.installWizard.addClusterIntro.subtitle=Qu\\'est ce qu\\'un cluster ? +label.installWizard.addClusterIntro.title=Ajoutons un cluster +label.installWizard.addHostIntro.subtitle=Qu\\'est ce qu\\'un h\u00F4te ? +label.installWizard.addHostIntro.title=Ajoutons un h\u00F4te +label.installWizard.addPodIntro.subtitle=Qu\\'est ce qu\\'un pod ? +label.installWizard.addPodIntro.title=Ajoutons un pod +label.installWizard.addPrimaryStorageIntro.subtitle=Qu\\'est ce que le stockage primaire ? +label.installWizard.addPrimaryStorageIntro.title=Ajoutons du stockage primaire +label.installWizard.addSecondaryStorageIntro.subtitle=Qu\\'est ce que le stockage secondaire ? +label.installWizard.addSecondaryStorageIntro.title=Ajoutons du stockage secondaire +label.installWizard.addZone.title=Ajouter une zone +label.installWizard.addZoneIntro.subtitle=Qu\\'est ce qu\\'une zone ? +label.installWizard.addZoneIntro.title=Ajoutons une zone +label.installWizard.click.launch=Appuyer sur le bouton d\u00E9marrer. +label.installWizard.subtitle=Ce tutoriel vous aidera \u00E0 configurer votre installation CloudStack&\#8482; +label.installWizard.title=Bonjour et bienvenue dans CloudStack&\#8482; label.instance=Instance +label.instance.limits=Limites des instances +label.instance.name=Nom de l\\'instance label.instances=Instances label.internal.dns.1=DNS interne 1 label.internal.dns.2=DNS interne 2 -label.interval.type=Type d\'ìntervalle +label.internal.name=Nom interne +label.interval.type=Type d\\'intervalle +label.introduction.to.cloudstack=Introduction \u00E0 CloudStack&\#8482; label.invalid.integer=Nombre entier invalide label.invalid.number=Nombre invalide +label.invitations=Invitations +label.invite=Inviter +label.invite.to=Inviter sur +label.invited.accounts=Comptes invit\u00E9s +label.ip=IP label.ip.address=Adresse IP label.ip.allocations=Allocations de IPs label.ip.limits=Limite de IPs publiques label.ip.or.fqdn=IP ou FQDN label.ip.range=Plage IP -label.ip=IP +label.ip.ranges=Plages IP +label.ipaddress=Adresse IP label.ips=IPs -label.is.default=Est par défaut -label.is.shared=Est partagé -label.is.system=Type système +label.is.default=Est par d\u00E9faut +label.is.redundant.router=Redondant +label.is.shared=Est partag\u00E9 +label.is.system=Est Syst\u00E8me label.iscsi=iSCSI -label.iso.boot=Démarrage par ISO label.iso=ISO -label.isolation.mode=Mode d\'isolation +label.iso.boot=D\u00E9marrage par ISO +label.isolated.networks=R\u00E9seaux isol\u00E9s +label.isolation.method=M\u00E9thode de s\u00E9paration +label.isolation.mode=Mode d\\'isolation +label.isolation.uri=URI d\\'isolation +label.item.listing=Liste des \u00E9l\u00E9ments label.keep=Conserver -label.lang.chinese=Chinois (simplifié) +label.key=Clef +label.keyboard.type=Type de clavier +label.kvm.traffic.label=Libell\u00E9 pour le trafic KVM +label.label=Libell\u00E9 +label.lang.brportugese=Portuguais Br\u00E9sil +label.lang.chinese=Chinois (simplifi\u00E9) label.lang.english=Anglais +label.lang.french=Fran\u00E7ais label.lang.japanese=Japonais -label.lang.korean=Coréen +label.lang.russian=Russe label.lang.spanish=Espagnol -label.last.disconnected=Dernière Déconnexion -label.last.name=Nom de famille +label.last.disconnected=Derni\u00E8re D\u00E9connexion +label.last.name=Nom +label.latest.events=Derniers \u00E9v\u00E9nements +label.launch=D\u00E9marrer +label.launch.vm=D\u00E9marrer VM +label.launch.zone=D\u00E9marrer la zone +label.least.connections=Le moins de connexions label.level=Niveau label.linklocal.ip=Adresse IP de lien local -label.load.balancer=Partageur de charge +label.load.balancer=R\u00E9partiteur de charge +label.load.balancing=R\u00E9partition de charge +label.load.balancing.policies=R\u00E8gles de r\u00E9partition de charge label.loading=Chargement en cours label.local=Local -# label.local.storage.enabled=Local storage enabled +label.local.storage=Stockage local +label.local.storage.enabled=Stockage local activ\u00E9 label.login=Connexion -label.logout=Déconnexion +label.logout=D\u00E9connexion label.lun=LUN -label.manage=Géré +label.make.project.owner=Devenir propri\u00E9taire du projet +label.manage=G\u00E9r\u00E9 +label.manage.resources=G\u00E9rer les ressources +label.management=Administration +label.management.ips=Adresses IP de gestion +label.max.guest.limit=Nombre maximum d\\'invit\u00E9s +label.max.networks=R\u00E9seaux Max. +label.max.public.ips=Max. IP publiques +label.max.snapshots=Max instantan\u00E9es +label.max.templates=Max. mod\u00E8les +label.max.vms=Max. VMs utilisateur +label.max.volumes=Max. volumes +label.max.vpcs=Max. VPCs label.maximum=Maximum -label.memory.allocated=Mémoire allouée -label.memory.total=Mémoire totale -label.memory.used=Mémoire utilisée -label.memory=Mémoire (en MB) +label.may.continue=Vous pouvez continuer. +label.memory=M\u00E9moire (en Mo) +label.memory.allocated=M\u00E9moire allou\u00E9e +label.memory.mb=M\u00E9moire (en MB) +label.memory.total=M\u00E9moire totale +label.memory.used=M\u00E9moire utilis\u00E9e label.menu.accounts=Comptes label.menu.alerts=Alertes -label.menu.all.accounts=Tout les comptes +label.menu.all.accounts=Tous les comptes label.menu.all.instances=Toutes les instances -label.menu.community.isos=ISO de la communauté -label.menu.community.templates=Modèles de la communauté +label.menu.community.isos=ISO de la communaut\u00E9 +label.menu.community.templates=Mod\u00E8les de la communaut\u00E9 label.menu.configuration=Configuration label.menu.dashboard=Tableau de bord -label.menu.destroyed.instances=Instances détruites +label.menu.destroyed.instances=Instances d\u00E9truites label.menu.disk.offerings=Offres de disque label.menu.domains=Domaines -label.menu.events=Evénements -label.menu.featured.isos=ISOs Sponsorisées -label.menu.featured.templates=Modèles sponsorisés -label.menu.global.settings=Paramètres globaux +label.menu.events=\u00C9v\u00E9nements +label.menu.featured.isos=ISOs Sponsoris\u00E9es +label.menu.featured.templates=Mod\u00E8les sponsoris\u00E9s +label.menu.global.settings=Param\u00E8tres globaux +label.menu.infrastructure=Infrastructure label.menu.instances=Instances label.menu.ipaddresses=Adresses IP label.menu.isos=ISOs label.menu.my.accounts=Mes comptes label.menu.my.instances=Mes instances label.menu.my.isos=Mes ISOs -label.menu.my.templates=Mes modèles -label.menu.network.offerings=Offres de Service Réseau -label.menu.network=Réseau +label.menu.my.templates=Mes mod\u00E8les +label.menu.network=R\u00E9seau +label.menu.network.offerings=Offres de Service R\u00E9seau label.menu.physical.resources=Ressources physiques label.menu.running.instances=Instances actives -label.menu.security.groups=Groupes de sécurité +label.menu.security.groups=Groupes de s\u00E9curit\u00E9 label.menu.service.offerings=Offres de Service -label.menu.snapshots=Instantanés -label.menu.stopped.instances=Instances Arrêtées +label.menu.snapshots=Instantan\u00E9s +label.menu.stopped.instances=Instances Arr\u00EAt\u00E9es label.menu.storage=Stockage -label.menu.system.vms=\ VMs Systèmes -label.menu.system=Système -label.menu.templates=Modèles +label.menu.system=Syst\u00E8me +label.menu.system.service.offerings=Offres syst\u00E8me +label.menu.system.vms=\ VMs Syst\u00E8mes +label.menu.templates=Mod\u00E8les label.menu.virtual.appliances=Appliances Virtuelles label.menu.virtual.resources=Ressources Virtuelles label.menu.volumes=Volumes -label.migrate.instance.to=Migrer l\'instance vers +label.migrate.instance.to=Migrer l\\'instance vers +label.migrate.instance.to.host=Migration de l\\'instance sur un autre h\u00F4te +label.migrate.instance.to.ps=Migration de l\\'instance sur un autre stockage primaire +label.migrate.router.to=Migrer le routeur vers +label.migrate.systemvm.to=Migrer la VM syst\u00E8me vers +label.migrate.to.host=Migrer vers un h\u00F4te +label.migrate.to.storage=Migrer vers un stockage +label.migrate.volume=Migration du volume vers un autre stockage primaire label.minimum=Minimum label.minute.past.hour=minute(s) label.monday=Lundi label.monthly=Mensuel -label.more.templates=Plus de modèles +label.more.templates=Plus de mod\u00E8les +label.move.down.row=Descendre d\\'un cran +label.move.to.bottom=D\u00E9placer en bas +label.move.to.top=Placer au dessus +label.move.up.row=Monter d\\'un cran label.my.account=Mon compte -label.name.optional=Nom (optionnel) +label.my.network=Mon r\u00E9seau +label.my.templates=Mes mod\u00E8les label.name=Nom -label.netmask=Masque de réseau -label.network.desc=Description réseau +label.name.optional=Nom (optionnel) +label.nat.port.range=Plage de port NAT +label.netScaler=NetScaler +label.netmask=Masque de r\u00E9seau +label.network=R\u00E9seau +label.network.ACL=R\u00E8gles d\\'acc\u00E8s r\u00E9seau ACL +label.network.ACL.total=Total R\u00E8gles d\\'acc\u00E8s r\u00E9seau +label.network.ACLs=R\u00E8gles d\\'acc\u00E8s r\u00E9seau +label.network.desc=Description r\u00E9seau +label.network.device=\u00C9quipement R\u00E9seau +label.network.device.type=Type d\\'\u00E9quipement r\u00E9seau label.network.domain=Nom de domaine -label.network.id=ID réseau -label.network.name=Nom du réseau -label.network.offering.display.text=Texte affiché d\'Offre de Réseau -label.network.offering.id=ID de l\'Offre de Service Réseau -label.network.offering.name=Nom de l\'Offre de Service Réseau -label.network.offering=Offre de Service Réseau -label.network.rate=Débit réseau -label.network.read=Lecture réseau -label.network.type=Type de réseau -label.network.write=Écriture réseau -label.network=Réseau +label.network.domain.text=Domaine r\u00E9seau +label.network.id=ID r\u00E9seau +label.network.label.display.for.blank.value=Utiliser la passerelle par d\u00E9faut +label.network.name=Nom du r\u00E9seau +label.network.offering=Offre de Service R\u00E9seau +label.network.offering.display.text=Texte affich\u00E9 d\\'Offre de R\u00E9seau +label.network.offering.id=ID de l\\'Offre de Service R\u00E9seau +label.network.offering.name=Nom de l\\'Offre de Service R\u00E9seau +label.network.rate=D\u00E9bit R\u00E9seau +label.network.rate.megabytes=D\u00E9bit r\u00E9seau (Mo/s) +label.network.read=Lecture r\u00E9seau +label.network.service.providers=Fournisseurs de service r\u00E9seau +label.network.type=Type de r\u00E9seau +label.network.write=\u00C9criture r\u00E9seau +label.networking.and.security=R\u00E9seau et s\u00E9curit\u00E9 +label.networks=R\u00E9seaux +label.new=Nouveau label.new.password=Nouveau mot de passe +label.new.project=Nouveau projet +label.new.vm=Nouvelle VM label.next=Suivant +label.nexusVswitch=Nexus 1000v +label.nfs=NFS label.nfs.server=Serveur NFS label.nfs.storage=Stockage NFS -label.nfs=NFS +label.nic.adapter.type=Type de carte r\u00E9seau +label.nicira.controller.address=Adresse du contr\u00F4leur +label.nicira.l3gatewayserviceuuid=Uuid du service passerelle L3 +label.nicira.transportzoneuuid=Uuid de la Zone Transport label.nics=Cartes NIC -label.no.actions=Aucune action disponibles -label.no.alerts=Aucune alerte récentes -label.no.errors=Aucune erreur récentes -label.no.isos=Aucun ISOs disponibles -label.no.items=Aucun élément disponibles -label.no.security.groups=Aucun groupe de sécurité disponibles -label.no.thanks=Non merci label.no=Non +label.no.actions=Aucune action disponible +label.no.alerts=Aucune alerte r\u00E9cente +label.no.data=Aucune donn\u00E9e +label.no.errors=Aucune erreur r\u00E9cente +label.no.isos=Aucun ISOs disponible +label.no.items=Aucun \u00E9l\u00E9ment disponible +label.no.security.groups=Aucun groupe de s\u00E9curit\u00E9 disponible +label.no.thanks=Non merci label.none=Aucun label.not.found=Introuvable -label.num.cpu.cores=Nombre de c\u0153urs de processeur +label.notifications=Messages +label.num.cpu.cores=Nombre de coeurs de processeur +label.number.of.clusters=Nombre de clusters +label.number.of.hosts=Nombre d\\'H\u00F4tes +label.number.of.pods=Nombre de Pods +label.number.of.system.vms=Nombre de VM Syst\u00E8me +label.number.of.virtual.routers=Nombre de routeurs virtuels +label.number.of.zones=Nombre de zones label.numretries=Nombre de tentatives -label.offer.ha=Offrir la haute disponibilité +label.ocfs2=OCFS2 +label.offer.ha=Offrir la haute disponibilit\u00E9 +label.ok=OK label.optional=Facultatif -label.os.preference=Préférence du OS +label.order=Ordre +label.os.preference=Pr\u00E9f\u00E9rence du OS label.os.type=Type du OS -label.owned.public.ips=Addresses IP Publique détenues -label.owner.account=Compte propriétaire -label.owner.domain=Propriétaire du domaine -label.parent.domain=Domaine Parent -label.password.enabled=Mot de passe activé +label.owned.public.ips=Adresses IP Publiques d\u00E9tenues +label.owner.account=Propri\u00E9taire du compte +label.owner.domain=Propri\u00E9taire du domaine +label.parent.domain=Parent du Domaine label.password=Mot de passe +label.password.enabled=Mot de passe activ\u00E9 label.path=Chemin -label.please.wait=Patientez s\'il vous plait +label.perfect.forward.secrecy=Confidentialit\u00E9 persistante +label.physical.network=R\u00E9seau physique +label.physical.network.ID=Identifiant du r\u00E9seau physique +label.please.specify.netscaler.info=Renseigner les informations sur le Netscaler +label.please.wait=Patientez s\\'il vous plait label.pod=Pod +label.pod.name=Nom du pod +label.pods=Pods label.port.forwarding=Redirection de port +label.port.forwarding.policies=R\u00E8gles de transfert de port label.port.range=Plage de ports -label.prev=Précédent -label.primary.allocated=Stockage primaire alloué -label.primary.network=Réseau primaire -label.primary.storage=Stockage primaire -label.primary.used=Stockage primaire utilisé -label.private.interface=Interface privée -label.private.ip.range=Plage d\'adresses IP Privées -label.private.ip=Adresse IP Privée -label.private.ips=Adresses IP Privées -label.private.port=Port privé -label.private.zone=Zone Privée +label.prev=Pr\u00E9c\u00E9dent +label.previous=Retour +label.primary.allocated=Stockage primaire allou\u00E9 +label.primary.network=R\u00E9seau primaire +label.primary.storage=Premier stockage +label.primary.storage.count=Groupes de stockage primaire +label.primary.used=Stockage primaire utilis\u00E9 +label.private.Gateway=Passerelle priv\u00E9e +label.private.interface=Interface priv\u00E9e +label.private.ip=Adresse IP Priv\u00E9e +label.private.ip.range=Plage d\\'adresses IP Priv\u00E9es +label.private.ips=Adresses IP Priv\u00E9es +label.private.network=R\u00E9seau priv\u00E9 +label.private.port=Port priv\u00E9 +label.private.zone=Zone Priv\u00E9e +label.privatekey=Cl\u00E9 priv\u00E9e PKCS\#8 +label.project=Projet +label.project.dashboard=Tableau de bord projet +label.project.id=ID projet +label.project.invite=Inviter sur le projet +label.project.name=Nom du projet +label.project.view=Vue projet +label.projects=Projets label.protocol=Protocole +label.providers=Fournisseurs +label.public=Publique label.public.interface=Interface publique label.public.ip=Adresse IP publique label.public.ips=Adresses IP publiques +label.public.network=R\u00E9seau public label.public.port=Port public +label.public.traffic=Trafic public label.public.zone=Zone publique -label.public=Publique -label.recent.errors=Erreurs récentes +label.purpose=R\u00F4le +label.quickview=Aper\u00E7u +label.reboot=Red\u00E9marrer +label.recent.errors=Erreurs r\u00E9centes +label.redundant.router=Routeur redondant +label.redundant.router.capability=Router redondant +label.redundant.state=\u00C9tat de la redondance label.refresh=Actualiser label.related=Connexes -label.remove.from.load.balancer=Supprimer l\'instance du partageur de charge -label.removing.user=Retrait de l\'utilisateur +label.remind.later=Rappeler moi plus tard +label.remove.ACL=Supprimer une r\u00E8gle ACL +label.remove.egress.rule=Supprimer la r\u00E8gle sortante +label.remove.from.load.balancer=Supprimer l\\'instance du r\u00E9partiteur de charge +label.remove.ingress.rule=Supprimer la r\u00E8gle entrante +label.remove.ip.range=Supprimer la plage IP +label.remove.pf=Supprimer la r\u00E8gle de transfert de port +label.remove.project.account=Supprimer le compte projet +label.remove.rule=Supprimer la r\u00E8gle +label.remove.static.nat.rule=Supprimer le NAT statique +label.remove.static.route=Supprimer une route statique +label.remove.tier=Supprimer le tiers +label.remove.vm.from.lb=Supprimer la VM de la r\u00E8gle de r\u00E9partition de charge +label.remove.vpc=Supprimer le VPC +label.removing=Suppression +label.removing.user=Retrait de l\\'utilisateur label.required=Requis -label.reserved.system.ip=Adresse IP Système réservé -label.resource.limits=Limite des ressources +label.reserved.system.gateway=Passerelle r\u00E9serv\u00E9e Syst\u00E8me +label.reserved.system.ip=Adresse IP Syst\u00E8me r\u00E9serv\u00E9e +label.reserved.system.netmask=Masque de sous-r\u00E9seau r\u00E9serv\u00E9 Syst\u00E8me +label.reset.VPN.connection=R\u00E9-initialiser la connexion VPN label.resource=Ressource +label.resource.limits=Limite des ressources +label.resource.state=\u00C9tat des ressources label.resources=Ressources -label.role=Rôle +label.restart.network=Red\u00E9marrage du r\u00E9seau +label.restart.required=Red\u00E9marrage n\u00E9cessaire +label.restart.vpc=Red\u00E9marrer le VPC +label.restore=Restaurer +label.review=Revoir +label.revoke.project.invite=R\u00E9voquer l\\'invitation +label.role=R\u00F4le +label.root.disk.controller=Contr\u00F4leur de disque principal label.root.disk.offering=Offre de disque racine +label.round.robin=Al\u00E9atoire +label.rules=R\u00E8gles label.running.vms=VMs actives +label.s3.access_key=Cl\u00E9 d\\'Acc\u00E8s +label.s3.bucket=Seau +label.s3.connection_timeout=D\u00E9lai d\\'expiration de connexion +label.s3.endpoint=Terminaison +label.s3.max_error_retry=Nombre d\\'essai en erreur max. +label.s3.secret_key=Cl\u00E9 Priv\u00E9e +label.s3.socket_timeout=D\u00E9lai d\\'expiration de la socket +label.s3.use_https=Utiliser HTTPS label.saturday=Samedi label.save=Sauvegarder -label.saving.processing=Sauvegarde en cours.... -label.scope=Portée +label.save.and.continue=Enregistrer et continuer +label.saving.processing=Sauvegarde en cours... +label.scope=Port\u00E9e label.search=Rechercher label.secondary.storage=Stockage secondaire -label.secondary.used=Stockage secondaire utilisé -label.secret.key=clé privée -label.security.group.name=Nom du groupe de sécurité -label.security.group=Groupe de sécurité -label.security.groups.enabled=Groupes de sécurité Activés -label.security.groups=Groupes de sécurité -label.sent=Envoyer +label.secondary.storage.count=Groupes de stockage secondaire +label.secondary.storage.vm=VM stockage secondaire +label.secondary.used=Stockage secondaire utilis\u00E9 +label.secret.key=Cl\u00E9 priv\u00E9e +label.security.group=Groupe de s\u00E9curit\u00E9 +label.security.group.name=Nom du groupe de s\u00E9curit\u00E9 +label.security.groups=Groupes de s\u00E9curit\u00E9 +label.security.groups.enabled=Groupes de s\u00E9curit\u00E9 Activ\u00E9s +label.select=S\u00E9lectionner +label.select-view=S\u00E9lectionner la vue +label.select.a.template=S\u00E9lectionner un mod\u00E8le +label.select.a.zone=S\u00E9lectionner une zone +label.select.instance=S\u00E9lectionner une instance +label.select.instance.to.attach.volume.to=S\u00E9lectionner l\\'instance \u00E0 laquelle rattacher ce volume +label.select.iso.or.template=S\u00E9lectionner un ISO ou un mod\u00E8le +label.select.offering=S\u00E9lectionner une offre +label.select.project=S\u00E9lectionner un projet +label.select.tier=S\u00E9lectionner le tiers +label.select.vm.for.static.nat=S\u00E9lectionner une VM pour le NAT statique +label.sent=Envoy\u00E9 label.server=Serveur +label.service.capabilities=Fonctions disponibles label.service.offering=Offre de Service -label.system.service.offering=Offre de Service Système -label.session.expired=Session expiré +label.session.expired=Session expir\u00E9e +label.set.up.zone.type=Configurer le type de zone +label.setup=Configuration +label.setup.network=Configurer le r\u00E9seau +label.setup.zone=Configurer la zone label.shared=En partage +label.show.ingress.rule=Montrer la r\u00E8gle d\\'entr\u00E9e +label.shutdown.provider=\u00C9teindre ce fournisseur +label.site.to.site.VPN=VPN Site-\u00E0-Site label.size=Taille -label.snapshot.limits=Limite d\'instantanés -label.snapshot.name=Nom de l\'instantané -label.snapshot.s=Instantané(s) -label.snapshot.schedule=Configurer un snapshot récurrent -label.snapshot=Instantané -label.snapshots=Instantanés +label.skip.guide=J\\'ai d\u00E9j\u00E0 utilis\u00E9 CloudStack avant, passer ce tutoriel +label.snapshot=Instantan\u00E9 +label.snapshot.limits=Limites d\\'instantan\u00E9 +label.snapshot.name=Nom de l\\'instantan\u00E9 +label.snapshot.s=Instantan\u00E9(s) +label.snapshot.schedule=Configurer un instantan\u00E9 r\u00E9current +label.snapshots=Instantan\u00E9s +label.source=Origine label.source.nat=NAT Source -label.specify.vlan=Précisez le VLAN -label.start.port=Port de -label.state=État -label.static.nat.to=NAT Static vers -label.static.nat=NAT Static +label.specify.IP.ranges=Sp\u00E9cifier des plages IP +label.specify.vlan=Pr\u00E9ciser le VLAN +label.srx=SRX +label.start.IP=D\u00E9marrer l\\'IP +label.start.port=Port de d\u00E9but +label.start.reserved.system.IP=Adresse IP de d\u00E9but r\u00E9serv\u00E9e Syst\u00E8me +label.start.vlan=VLAN de d\u00E9part +label.state=\u00C9tat +label.static.nat=NAT Statique +label.static.nat.enabled=NAT statique activ\u00E9 +label.static.nat.to=NAT Statique vers +label.static.nat.vm.details=D\u00E9tails des NAT statique VM label.statistics=Statistiques label.status=Statut -label.step.1.title=Etape 1 \: Sélectionnez un modèle -label.step.1=Étape 1 -label.step.2.title=Etape 2\: Offre de Service -label.step.2=Étape 2 -label.step.3.title=Etape 3\: Sélectionnez une offre de service -label.step.3=Étape 3 -label.step.4.title=Etape 4\: Réseau -label.step.4=Étape 4 -label.step.5.title=Etape 5\: Vérification -label.step.5=Étape 5 -label.stopped.vms=VMs arrêtés -label.storage.type=Type de stockage +label.step.1=\u00C9tape 1 +label.step.1.title=\u00C9tape 1 \: S\u00E9lectionnez un mod\u00E8le +label.step.2=\u00C9tape 2 +label.step.2.title=\u00C9tape 2 \: Offre de Service +label.step.3=\u00C9tape 3 +label.step.3.title=\u00C9tape 3 \: S\u00E9lectionnez une offre de service +label.step.4=\u00C9tape 4 +label.step.4.title=\u00C9tape 4 \: R\u00E9seau +label.step.5=\u00C9tape 5 +label.step.5.title=\u00C9tape 5 \: V\u00E9rification +label.stickiness=Fid\u00E9lit\u00E9 +label.sticky.cookie-name=Nom du cookie +label.sticky.domain=Domaine +label.sticky.expire=Expiration +label.sticky.holdtime=Temps de pause +label.sticky.indirect=Indirect +label.sticky.length=Longueur +label.sticky.mode=Mode +label.sticky.nocache=Pas de cache +label.sticky.postonly=Apr\u00E8s seulement +label.sticky.prefix=Pr\u00E9fixe +label.sticky.request-learn=Apprendre la requ\u00EAte +label.sticky.tablesize=Taille du tableau +label.stop=Arr\u00EAter +label.stopped.vms=VMs arr\u00EAt\u00E9es label.storage=Stockage +label.storage.tags=\u00C9tiquettes de stockage +label.storage.traffic=Trafic stockage +label.storage.type=Type de stockage +label.subdomain.access=Acc\u00E8s sous-domaine label.submit=Envoyer -label.submitted.by=[Soumis par\: ] -label.succeeded=Réussi +label.submitted.by=[Soumis par \: ] +label.succeeded=R\u00E9ussi label.sunday=Dimanche -label.system.capacity=Capacité système -label.system.vm.type=Type de VM système -label.system.vm=VM Système -label.system.vms=\ VMs systèmes -label.tagged=Taggé -label.tags=Tags -label.target.iqn=IQN de la Cible -label.template.limits=Limites des modèles -label.template=Modèle -label.theme.default=Thème par défaut -label.theme.grey=Personnalisé - Gris -label.theme.lightblue=Personnalisé - Blue clair +label.super.cidr.for.guest.networks=Super CIDR pour les r\u00E9seaux invit\u00E9s +label.supported.services=Services support\u00E9s +label.supported.source.NAT.type=Type de NAT support\u00E9 +label.suspend.project=Suspendre projet +label.system.capacity=Capacit\u00E9 syst\u00E8me +label.system.offering=Offre de syst\u00E8me +label.system.service.offering=Offre de Service Syst\u00E8me +label.system.vm=VM Syst\u00E8me +label.system.vm.type=Type de VM syst\u00E8me +label.system.vms=\ VMs Syst\u00E8mes +label.system.wide.capacity=Capacit\u00E9 globale +label.tagged=\u00C9tiquet\u00E9 +label.tags=\u00C9tiquette +label.target.iqn=Cible IQN +label.task.completed=T\u00E2che termin\u00E9e +label.template=Mod\u00E8le +label.template.limits=Limites de mod\u00E8le +label.theme.default=Th\u00E8me par d\u00E9faut +label.theme.grey=Personnalis\u00E9 - Gris +label.theme.lightblue=Personnalis\u00E9 - Bleu clair label.thursday=Jeudi -label.time.zone=Fuseau horaire +label.tier=Tiers +label.tier.details=D\u00E9tails du tiers label.time=Temps -label.timeout.in.second = Timeout (secondes) +label.time.zone=Fuseau horaire +label.timeout=D\u00E9lai d\\'expiration +label.timeout.in.second=D\u00E9lai d\\'expiration (secondes) label.timezone=Fuseau horaire -label.total.cpu=Capacité Totale en CPU +label.token=Jeton unique +label.total.CPU=Capacit\u00E9 totale en CPU +label.total.cpu=Capacit\u00E9 Totale en CPU +label.total.hosts=Total H\u00F4tes +label.total.memory=Total m\u00E9moire +label.total.of.ip=Total adresses IP +label.total.of.vm=Total VM +label.total.storage=Total stockage label.total.vms=Nombre total de VMs -label.traffic.type=Type de Traffic +label.traffic.label=Libell\u00E9 de trafic +label.traffic.type=Type de Trafic +label.traffic.types=Types de trafic label.tuesday=Mardi -label.type.id=ID du Type label.type=Type +label.type.id=ID du Type label.unavailable=Indisponible -label.unlimited=Illimité -label.untagged=Non Taggé -label.update.ssl.cert=Mettre à jour le certificate SSL -label.update.ssl=Mettre à jour le certificate SSL -label.updating=Mise à jour +label.unlimited=Illimit\u00E9 +label.untagged=Non Tagg\u00E9 +label.update.project.resources=Mettre \u00E0 jour les ressources du projet +label.update.ssl=Certificat SSL +label.update.ssl.cert=Certificat SSL +label.updating=Mise \u00E0 jour +label.upload=Charger +label.upload.volume=Charger un volume label.url=URL -label.usage.interface=Interface d\'Usage -label.used=Utilisé +label.usage.interface=Interface Utilisation +label.used=Utilis\u00E9 label.user=Utilisateur -label.username=Nom d\'Utilisateur +label.username=Nom d\\'Utilisateur label.users=Utilisateurs label.value=Valeur +label.vcdcname=Nom du DC vCenter label.vcenter.cluster=Cluster vCenter label.vcenter.datacenter=Datacenter vCenter label.vcenter.datastore=Datastore vCenter -label.vcenter.host=Hôte Vcenter +label.vcenter.host=H\u00F4te vCenter label.vcenter.password=Mot de passe vCenter -label.vcenter.username=Nom d\'utilisateur vCenter +label.vcenter.username=Nom d\\'utilisateur vCenter +label.vcipaddress=Adresse IP vCenter label.version=Version +label.view=Voir +label.view.all=Voir tout +label.view.console=Voir la console +label.view.more=Voir plus +label.viewing=Consultation en cours label.virtual.appliance=Appliance Virtuelle label.virtual.appliances=Appliances Virtuelles -label.virtual.network=Réseau virtuel +label.virtual.machines=Machines virtuelles +label.virtual.network=R\u00E9seau virtuel +label.virtual.router=Routeur Virtuel +label.virtual.routers=Routeurs virtuels +label.vlan=VLAN label.vlan.id=ID du VLAN label.vlan.range=Plage du VLAN label.vm.add=Ajouter une instance -label.vm.destroy=Détruire -label.vm.reboot=Redémarrer -label.vm.start=Démarrer -label.vm.stop=Arrêtez +label.vm.destroy=D\u00E9truire +label.vm.display.name=Nom d\\'affichage de la VM +label.vm.name=Nom de la VM +label.vm.reboot=Red\u00E9marrer +label.vm.start=D\u00E9marrer +label.vm.state=\u00C9tat VM +label.vm.stop=Arr\u00EAter label.vmfs=VMFS label.vms=VMs +label.vmware.traffic.label=Libell\u00E9 pour le trafic VMware +label.volgroup=Groupe de Volume +label.volume=Volume label.volume.limits=Limites des volumes label.volume.name=Nom du volume -label.volume=Volume label.volumes=Volumes -label.vsphere.managed=Gérée par vSphere +label.vpc=VPC +label.vpc.id=ID VPC +label.vpn=VPN +label.vpn.customer.gateway=Passerelle VPN client +label.vsmctrlvlanid=\ ID VLAN Contr\u00F4le +label.vsmpktvlanid=ID VLAN Paquet +label.vsmstoragevlanid=VLAN ID Stockage +label.vsphere.managed=G\u00E9r\u00E9e par vSphere label.waiting=En attente -label.warn=Avertir +label.warn=Avertissement label.wednesday=Mercredi label.weekly=Hebdomadaire -label.welcome.cloud.console=Bienvenue dans la console de gestion label.welcome=Bienvenue +label.welcome.cloud.console=Bienvenue dans la Console d\\'Administration +label.what.is.cloudstack=Qu\\'est-ce-que CloudStack&\#8482; ? +label.xen.traffic.label=Libell\u00E9 pour le trafic XenServer label.yes=Oui -label.zone.id=ID de la zone -label.zone.step.1.title=Etape 1\: Sélectionnez un réseau -label.zone.step.2.title=Etape 2\: Ajoutez une zone -label.zone.step.3.title=Etape 3\: Ajoutez un Pod -label.zone.step.4.title=Etape 4\: Ajoutez une plage d\'adresses IP -label.zone.wide=Transverse à la zone label.zone=Zone - -#Messages -message.acquire.public.ip=Sélectionnez la zone dans laquelle vous voulez acquérir votre nouvelle adresse IP. -message.action.cancel.maintenance.mode=Confirmer que vous souhaitez annuler cette maintenance. -message.action.cancel.maintenance=Votre hôte a été annulée de la maintenance. Ce processus peut prendre jusqu\'à plusieurs minutes. -message.action.delete.ISO.for.all.zones=L\'ISO est utilisé par toutes les zones. S\'il vous plaît confirmer que vous voulez le supprimer de toutes les zones. +label.zone.details=D\u00E9tails de la zone +label.zone.id=ID de la zone +label.zone.name=Nom de la zone +label.zone.step.1.title=\u00C9tape 1 \: S\u00E9lectionnez un r\u00E9seau +label.zone.step.2.title=\u00C9tape 2 \: Ajoutez une zone +label.zone.step.3.title=\u00C9tape 3 \: Ajoutez un Pod +label.zone.step.4.title=\u00C9tape 4 \: Ajoutez une plage d\\'adresses IP +label.zone.type=Type de zone +label.zone.wide=Transverse \u00E0 la zone +label.zoneWizard.trafficType.guest=Invit\u00E9 \: Trafic entre les machines virtuelles utilisateurs +label.zoneWizard.trafficType.management=Administration \: Trafic entre les ressources internes de CloudStack, incluant tous les composants qui communiquent avec le serveur d\\'administration, tels que les h\u00F4tes and les machines virtuelles Syst\u00E8mes CloudStack +label.zoneWizard.trafficType.public=Public \: Trafic entre Internet et les machines virtuelles dans le nuage +label.zoneWizard.trafficType.storage=Stockage \: Trafic entre les serveurs de stockages primaires et secondaires, tel que le transfert de machines virtuelles mod\u00E8les et des instantan\u00E9s de disques +label.zones=Zones +managed.state=\u00C9tat de la gestion +message.Zone.creation.complete=Cr\u00E9ation de la zone termin\u00E9e +message.acquire.new.ip=Confirmer l\\'acquisition d\\'une nouvelle adresse IP pour ce r\u00E9seau. +message.acquire.new.ip.vpc=Veuillez confirmer que vous voulez une nouvelle adresse IP pour ce VPC +message.acquire.public.ip=S\u00E9lectionnez la zone dans laquelle vous voulez acqu\u00E9rir votre nouvelle adresse IP. +message.action.cancel.maintenance=Votre h\u00F4te a quitt\u00E9 la maintenance. Ce processus peut prendre jusqu\\'\u00E0 plusieurs minutes. +message.action.cancel.maintenance.mode=Confirmer l\\'annulation de cette maintenance. +message.action.change.service.warning.for.instance=Votre instance doit \u00EAtre arr\u00EAt\u00E9e avant d\\'essayer de changer son offre de service. +message.action.change.service.warning.for.router=Votre routeur doit \u00EAtre arr\u00EAt\u00E9 avant d\\'essayer de changer son offre de service. message.action.delete.ISO=Confirmer que vous souhaitez supprimer cette ISO. +message.action.delete.ISO.for.all.zones=L\\'ISO est utilis\u00E9 par toutes les zones. S\\'il vous pla\u00EEt confirmer que vous voulez le supprimer de toutes les zones. message.action.delete.cluster=Confirmer que vous voulez supprimer ce cluster. message.action.delete.disk.offering=Confirmer que vous souhaitez supprimer cette offre de disque. message.action.delete.domain=Confirmer que vous voulez supprimer ce domaine. -message.action.delete.external.firewall=Confirmer que vous souhaitez supprimer ce pare-feu externe. Attention\: Si vous prévoyez de rajouter le même pare-feu externe de nouveau, vous devez réinitialiser les données d\'utilisation sur l\'appareil. -message.action.delete.external.load.balancer=Confirmez que vous souhaitez supprimer ce partageur de charge externe. Attention \: Si vous pensez ajouter le même partageur de charge plus tard, vous devez remettre à zéro les statistiques d\'usage de cet équipement. -message.action.delete.ingress.rule=Confirmez que vous souhaitez supprimer cette règle Ingress -message.action.delete.network=Confirmer que vous voulez supprimer ce réseau. +message.action.delete.external.firewall=Confirmer que vous souhaitez supprimer ce pare-feu externe. Attention \: Si vous pr\u00E9voyez de rajouter le m\u00EAme pare-feu externe de nouveau, vous devez r\u00E9-initialiser les donn\u00E9es d\\'utilisation sur l\\'appareil. +message.action.delete.external.load.balancer=Confirmez que vous souhaitez supprimer ce r\u00E9partiteur de charge externe. Attention \: Si vous pensez ajouter le m\u00EAme r\u00E9partiteur de charge plus tard, vous devez remettre \u00E0 z\u00E9ro les statistiques d\\'utilisation de cet \u00E9quipement. +message.action.delete.ingress.rule=Confirmez que vous souhaitez supprimer cette r\u00E8gle d\\'entr\u00E9e. +message.action.delete.network=Confirmer que vous voulez supprimer ce r\u00E9seau. +message.action.delete.nexusVswitch=Confirmer la suppession de ce Nexus 1000v +message.action.delete.physical.network=Confirmer la suppression du r\u00E9seau physique message.action.delete.pod=Confirmez que vous souhaitez supprimer ce pod. message.action.delete.primary.storage=Confirmer que vous voulez supprimer ce stockage primaire. message.action.delete.secondary.storage=Confirmez que vous souhaitez supprimer ce stockage secondaire. -message.action.delete.security.group=Confirmez que vous souhaitez supprimer ce groupe de sécurité. +message.action.delete.security.group=Confirmez que vous souhaitez supprimer ce groupe de s\u00E9curit\u00E9. message.action.delete.service.offering=Confirmez que vous souhaitez supprimer cette offre de service. -message.action.delete.snapshot=Confirmez que vous souhaitez supprimer cet instantané -message.action.delete.template.for.all.zones=Ce modèle est utilisé par toutes les zones. Confirmez que vous souhaitez le supprimer de toutes les zones. -message.action.delete.template=Confirmez que vous souhaitez supprimer ce modèle. +message.action.delete.snapshot=Confirmez que vous souhaitez supprimer cet instantan\u00E9 +message.action.delete.system.service.offering=Confirmer la suppression de l\\'offre syst\u00E8me. +message.action.delete.template=Confirmez que vous souhaitez supprimer ce mod\u00E8le. +message.action.delete.template.for.all.zones=Ce mod\u00E8le est utilis\u00E9 par toutes les zones. Confirmez que vous souhaitez le supprimer de toutes les zones. message.action.delete.volume=Confirmez que vous souhaitez supprimer ce volume. message.action.delete.zone=Confirmez que vous souhaitez supprimer cette zone. message.action.destroy.instance=Confirmez que vous souhaitez supprimer cette instance. -message.action.destroy.systemvm=Confirmez que vous souhaitez supprimer cette VM Système. -message.action.disable.static.NAT=Confirmez que vous souhaitez désactiver le NAT statique. -message.action.enable.maintenance=Votre hôte a été mis en mode maintenance avec succès. Ce processus peut durer plusieurs minutes ou plus suivant le nombre de VMs actives sur cet hôte. -message.action.force.reconnect=Votre hôte a été forcé à se reconnecter avec succès. Ce processus peut prendre jusqu\'à plusieurs minutes. -message.action.host.enable.maintenance.mode=Activer le mode maintenance va causer la migration à chaud de l\'ensemble des instances de cet hôte sur les autres hôtes disponibles. +message.action.destroy.systemvm=Confirmez que vous souhaitez supprimer cette VM Syst\u00E8me. +message.action.disable.cluster=Confirmez que vous souhaitez d\u00E9sactiver ce cluster +message.action.disable.nexusVswitch=Confirmer la d\u00E9sactivation de ce Nexus 1000v +message.action.disable.physical.network=Confirmer l\\'activation de ce r\u00E9seau physique. +message.action.disable.pod=Confirmez que vous voulez d\u00E9sactiver ce Pod +message.action.disable.static.NAT=Confirmez que vous souhaitez d\u00E9sactiver le NAT statique. +message.action.disable.zone=Confirmez que vous voulez d\u00E9sactiver cette zone +message.action.download.iso=Confirmer le t\u00E9l\u00E9chargement de cet ISO +message.action.download.template=Confirmer le t\u00E9l\u00E9chargement de ce mod\u00E8le +message.action.enable.cluster=Confirmez que vous souhaitez activer ce cluster +message.action.enable.maintenance=Votre h\u00F4te a \u00E9t\u00E9 mis en mode maintenance avec succ\u00E8s. Ce processus peut durer plusieurs minutes ou plus, suivant le nombre de VMs actives sur cet h\u00F4te. +message.action.enable.nexusVswitch=Confirmer l\\'activation de ce Nexus 1000v +message.action.enable.physical.network=Confirmer l\\'activation de ce r\u00E9seau physique. +message.action.enable.pod=Confirmez que vous souhaitez activer ce Pod +message.action.enable.zone=Confirmez que vous voulez activer cette zone +message.action.force.reconnect=Votre h\u00F4te a \u00E9t\u00E9 forc\u00E9e \u00E0 se reconnecter avec succ\u00E8s. Ce processus peut prendre jusqu\\'\u00E0 plusieurs minutes. +message.action.host.enable.maintenance.mode=Activer le mode maintenance va causer la migration \u00E0 chaud de l\\'ensemble des instances de cet h\u00F4te sur les autres h\u00F4tes disponibles. message.action.instance.reset.password=Confirmer le changement du mot de passe ROOT pour cette machine virtuelle. -message.action.primarystorage.enable.maintenance.mode=Attention \: placer ce stockage primaire en mode maintenance que l\'ensemble des VMs utilisant des volumes sur ce stockage. Souhaitez vous continuer ? -message.action.reboot.instance=Confirmez que vous souhaitez redémarrer cette instance. -message.action.reboot.systemvm=Confirmez que vous souhaitez redémarrer cette VM Système -message.action.release.ip=Confirmez que vous souhaitez libérer cette IP. -message.action.remove.host=Supprimer le dernier/seul hôte dans le cluster et le réinstaller va supprimer l\'environnement/la base de données sur l\'hôte et rendre les VMs invitées inutilisables. +message.action.manage.cluster=Confirmez que vous voulez g\u00E9rer le cluster +message.action.primarystorage.enable.maintenance.mode=Attention \: placer ce stockage primaire en mode maintenance va provoquer l\\'arr\u00EAt de l\\'ensemble des VMs utilisant des volumes sur ce stockage. Souhaitez-vous continuer ? +message.action.reboot.instance=Confirmez que vous souhaitez red\u00E9marrer cette instance. +message.action.reboot.router=Tous les services fournit par ce routeur virtuel vont \u00EAtre interrompus. Confirmer le r\u00E9-amor\u00E7age de ce routeur. +message.action.reboot.systemvm=Confirmez que vous souhaitez red\u00E9marrer cette VM Syst\u00E8me +message.action.release.ip=Confirmez que vous souhaitez lib\u00E9rer cette IP. +message.action.remove.host=Confirmer la suppression de cet h\u00F4te. +message.action.reset.password.off=Votre instance ne supporte pas pour le moment cette fonctionnalit\u00E9. +message.action.reset.password.warning=Votre instance doit \u00EAtre arr\u00EAt\u00E9e avant d\\'essayer de changer son mot de passe. message.action.restore.instance=Confirmez que vous souhaitez restaurer cette instance. -message.action.start.instance=Confirmez que vous souhaitez démarrer cette instance. -message.action.start.router=Confirmez que vous souhaitez démarrer ce routeur. -message.action.start.systemvm=Confirmez que vous souhaitez redémarrer cette VM système. -message.action.stop.instance=Confirmez que vous souhaitez arrêter cette instance. -message.action.stop.systemvm=Confirmez que vous souhaitez arrêter cette VM. -message.action.take.snapshot=Confirmer la prise d\'un snapshot pour ce volume. -message.add.cluster.zone=Ajouter un cluster d\'hyperviseurs géré pour cette zone -message.add.cluster=Ajouter un cluster d\'hyperviseurs géré pour cette zone , pod -message.add.disk.offering=Renseignez les paramètres suivants pour ajouter un offre de service de disques -message.add.firewall=Ajouter un parefeu à cette zone -message.add.host=Renseignez les paramètres suivant pour ajouter un hôte -message.add.ip.range.direct.network=Ajouter une plage IP au réseau direct dans la zone +message.action.start.instance=Confirmez que vous souhaitez d\u00E9marrer cette instance. +message.action.start.router=Confirmez que vous souhaitez d\u00E9marrer ce routeur. +message.action.start.systemvm=Confirmez que vous souhaitez red\u00E9marrer cette VM syst\u00E8me. +message.action.stop.instance=Confirmez que vous souhaitez arr\u00EAter cette instance. +message.action.stop.router=Tous les services fournit par ce routeur virtuel vont \u00EAtre interrompus. Confirmer l\\'arr\u00EAt de ce routeur. +message.action.stop.systemvm=Confirmez que vous souhaitez arr\u00EAter cette VM. +message.action.take.snapshot=Confirmer la prise d\\'un instantan\u00E9 pour ce volume. +message.action.unmanage.cluster=Confirmez que vous ne voulez plus g\u00E9rer le cluster +message.activate.project=\u00CAtes-vous s\u00FBr de vouloir activer ce projet ? +message.add.VPN.gateway=Confirmer l\\'ajout d\\'une passerelle VPN +message.add.cluster=Ajouter un cluster d\\'hyperviseurs g\u00E9r\u00E9 pour cette zone , pod +message.add.cluster.zone=Ajouter un cluster d\\'hyperviseurs g\u00E9r\u00E9 pour cette zone +message.add.disk.offering=Renseignez les param\u00E8tres suivants pour ajouter un offre de service de disques +message.add.domain=Sp\u00E9cifier le sous domaine que vous souhaitez cr\u00E9er sous ce domaine +message.add.firewall=Ajouter un pare-feu \u00E0 cette zone +message.add.guest.network=Confirmer l\\'ajout du r\u00E9seau invit\u00E9 +message.add.host=Renseignez les param\u00E8tres suivants pour ajouter une h\u00F4te +message.add.ip.range=Ajouter une plage IP pour le r\u00E9seau publique dans la zone +message.add.ip.range.direct.network=Ajouter une plage IP au r\u00E9seau direct dans la zone message.add.ip.range.to.pod=

Ajouter une plage IP pour le pod\:

-message.add.ip.range=Ajouter une plage IP pour le réseau publique dans la zone -message.add.load.balancer=Ajouter un partageur de charge à la zone -message.add.network=Ajouter un nouveau réseau à la zone\: -message.add.pod=Ajouter un nouveau pod à la zone -message.add.primary.storage=Ajouter un nouveau stockage primaire à la zone , pod -message.add.primary=Renseignez les paramètres suivants pour ajouter un sotckage primaire +message.add.load.balancer=Ajouter un r\u00E9partiteur de charge \u00E0 la zone +message.add.load.balancer.under.ip=La r\u00E8gle de r\u00E9partition de charge \u00E9t\u00E9 ajout\u00E9e sous l\\'adresse IP \: +message.add.network=Ajouter un nouveau r\u00E9seau \u00E0 la zone\: +message.add.new.gateway.to.vpc=Renseigner les informations suivantes pour ajouter une nouvelle passerelle pour ce VPC +message.add.pod=Ajouter un nouveau pod \u00E0 la zone +message.add.pod.during.zone.creation=Chaque zone doit contenir un ou plusieurs pods, et le premier pod sera ajout\u00E9 maintenant. Une pod contient les h\u00F4tes et les serveurs de stockage primaire, qui seront ajout\u00E9s dans une \u00E9tape ult\u00E9rieure. Configurer une plage d\\'adresses IP r\u00E9serv\u00E9es pour le trafic de gestion interne de CloudStack. La plage d\\'IP r\u00E9serv\u00E9e doit \u00EAtre unique pour chaque zone dans le nuage. +message.add.primary=Renseignez les param\u00E8tres suivants pour ajouter un stockage primaire +message.add.primary.storage=Ajouter un nouveau stockage primaire \u00E0 la zone , pod message.add.secondary.storage=Ajouter un nouveau stockage pour la zone -message.add.service.offering=Renseigner les informations suivantes pour ajouter une nouvelle offre de service computing. -message.add.template=Renseignez les informations suivantes pour créer votre nouveau modèle +message.add.service.offering=Renseigner les informations suivantes pour ajouter une nouvelle offre de service de calcul. +message.add.system.service.offering=Ajouter les informations suivantes pour cr\u00E9er une nouvelle offre syst\u00E8me. +message.add.template=Renseignez les informations suivantes pour cr\u00E9er votre nouveau mod\u00E8le message.add.volume=Renseignez les informations suivantes pour ajouter un nouveau volume -message.additional.networks.desc=Sélectionnez le(s) réseau(x) additionnel(s) au(x)quel(s) sera connectée votre instance. -message.advanced.mode.desc=Choisissez ce modèle de réseau si vous souhaitez bénéficier du support des VLANs. Ce mode de réseau donne le plus de flexibilité aux administrateurs pour fournir des offres de service réseau personnalisées comme fournir des parefeux, vpn, partageurs de charge ou également activer des réseaux virtuels ou directs. -message.advanced.security.group=Choisissez ceci si vous souhaitez utiliser les groupes de sécurité pour fournir l\'isolation des VMs invitées. -message.advanced.virtual=Choisissez ceci si vous souhaitez utiliser des VLANs pour fournir l\'isolation des VMs invitées. -message.allow.vpn.access=Entrez un nom d\'utilisateur et un mot de passe pour l\'utilisateur que vous souhaitez autorisé à utiliser l\'accès VPN. -message.attach.iso.confirm=Confirmez que vous souhaitez attacher l\'image ISO à cette instance. -message.attach.volume=Renseignez les données suivante pour attacher un nouveau volume. Si vous attachez un volume disque à une machine virtuelle sous Windows, vous aurez besoin de redémarrer l\'instance pour voir le nouveau disque. -message.basic.mode.desc=Choisissez ce modèle de réseau si vous *ne voulez pas* activer le support des VLANs. Toutes les instances créées avec ce modèle de réseau se verront assignées une adresse IP et les groupes de sécurité seront utilisés pour fournir l\'isolation entre les VMs.\n -message.change.offering.confirm=Confirmez que vous souhaitez changer l\'offre de service de cette instance. +message.adding.Netscaler.device=Ajouter un Netscaler +message.adding.Netscaler.provider=Ajouter un fournisseur Netscaler +message.adding.host=Ajout un h\u00F4te +message.additional.networks.desc=S\u00E9lectionnez le(s) r\u00E9seau(x) additionnel(s) au(x)quel(s) sera connect\u00E9e votre instance. +message.advanced.mode.desc=Choisissez ce mod\u00E8le de r\u00E9seau si vous souhaitez b\u00E9n\u00E9ficier du support des VLANs. Ce mode de r\u00E9seau donne le plus de flexibilit\u00E9 aux administrateurs pour fournir des offres de service r\u00E9seau personnalis\u00E9es comme fournir des pare-feux, VPN, r\u00E9partiteurs de charge ou \u00E9galement activer des r\u00E9seaux virtuels ou directs. +message.advanced.security.group=Choisissez ceci si vous souhaitez utiliser les groupes de s\u00E9curit\u00E9 pour fournir l\\'isolation des VMs invit\u00E9es. +message.advanced.virtual=Choisissez ceci si vous souhaitez utiliser des VLANs pour fournir l\\'isolation des VMs invit\u00E9es. +message.after.enable.s3=Le stockage secondaire S3 est configur\u00E9. Note \: Quand vous quitterez cette page, vous ne pourrez plus re-configurer le support S3. +message.after.enable.swift=Swift configur\u00E9. Remarque \: une fois que vous quitterez cette page, il ne sera plus possible de re-configurer Swift \u00E0 nouveau. +message.alert.state.detected=\u00C9tat d\\'alerte d\u00E9tect\u00E9 +message.allow.vpn.access=Entrez un nom d\\'utilisateur et un mot de passe pour l\\'utilisateur que vous souhaitez autoriser \u00E0 utiliser l\\'acc\u00E8s VPN. +message.apply.snapshot.policy=Vous avez mis \u00E0 jour votre politique d\\'instantan\u00E9s avec succ\u00E8s. +message.attach.iso.confirm=Confirmez que vous souhaitez attacher l\\'image ISO \u00E0 cette instance. +message.attach.volume=Renseignez les donn\u00E9es suivantes pour attacher un nouveau volume. Si vous attachez un volume disque \u00E0 une machine virtuelle sous Windows, vous aurez besoin de red\u00E9marrer l\\'instance pour voir le nouveau disque. +message.basic.mode.desc=Choisissez ce mod\u00E8le de r\u00E9seau si vous *ne voulez pas* activer le support des VLANs. Toutes les instances cr\u00E9\u00E9es avec ce mod\u00E8le de r\u00E9seau se verront assigner une adresse IP et les groupes de s\u00E9curit\u00E9 seront utilis\u00E9s pour fournir l\\'isolation entre les VMs. +message.change.offering.confirm=Confirmez que vous souhaitez changer l\\'offre de service de cette instance. +message.change.password=Merci de modifier votre mot de passe. +message.configure.all.traffic.types=Vous avez de multiples r\u00E9seaux physiques ; veuillez configurer les libell\u00E9s pour chaque type de trafic en cliquant sur le bouton Modifier. +message.configuring.guest.traffic=Configuration du r\u00E9seau VM +message.configuring.physical.networks=Configuration des r\u00E9seaux physiques +message.configuring.public.traffic=Configuration du r\u00E9seau public +message.configuring.storage.traffic=Configuration du r\u00E9seau de stockage +message.confirm.action.force.reconnect=Confirmer la re-connexion forc\u00E9e de cet h\u00F4te. +message.confirm.delete.F5=Confirmer la suppression du F5 +message.confirm.delete.NetScaler=Confirmer la suppression du Netscaler +message.confirm.delete.SRX=Confirmer la suppression du SRX +message.confirm.destroy.router=Confirmer la suppression de ce routeur +message.confirm.disable.provider=Confirmer la d\u00E9sactivation de ce fournisseur +message.confirm.enable.provider=Confirmer l\\'activation de ce fournisseur +message.confirm.join.project=Confirmer que vous souhaitez rejoindre ce projet. +message.confirm.remove.IP.range=Confirmer la suppression de cette plage d\\'adresses IP +message.confirm.shutdown.provider=Confirmer l\\'arr\u00EAt de ce fournisseur message.copy.iso.confirm=Confirmez que vous souhaitez copier votre image ISO vers -message.copy.template=Copier le modèle XXX de la zone vers -message.create.template.vm=Créer la VM depuis le modèle -message.create.template.volume=Renseignez les informations suivantes avec de créer un modèle à partir de votre volume de disque\:. La création du modèle peut prendre plusieurs minutes suivant la taille du volume. +message.copy.template=Copier le mod\u00E8le XXX de la zone vers +message.create.template=Voulez vous cr\u00E9er un mod\u00E8le ? +message.create.template.vm=Cr\u00E9er la VM depuis le mod\u00E8le +message.create.template.volume=Renseignez les informations suivantes avec de cr\u00E9er un mod\u00E8le \u00E0 partir de votre volume de disque\:. La cr\u00E9ation du mod\u00E8le peut prendre plusieurs minutes suivant la taille du volume. +message.creating.cluster=Cr\u00E9ation du cluster +message.creating.guest.network=Cr\u00E9ation du r\u00E9seau pour les invit\u00E9s +message.creating.physical.networks=Cr\u00E9ation des r\u00E9seaux physiques +message.creating.pod=Cr\u00E9ation d\\'un pod +message.creating.primary.storage=Cr\u00E9ation du stockage primaire +message.creating.secondary.storage=Cr\u00E9ation du stockage secondaire +message.creating.zone=Cr\u00E9ation de la zone +message.decline.invitation=Voulez-vous refuser cette invitation au projet ? +message.delete.VPN.connection=Confirmer la suppression de la connexion VPN +message.delete.VPN.customer.gateway=Confirmer la suppression de cette passerelle VPN client +message.delete.VPN.gateway=Confirmer la suppression de cette passerelle VPN message.delete.account=Confirmez que vous souhaitez supprimer ce compte. -message.detach.iso.confirm=Confirmez que vous souhaitez détacher l\'image ISO de cette instance. -# message.disable.account=Please confirm that you want to disable this account. By disabling the account, all users for this account will no longer have access to their cloud resources. All running virtual machines will be immediately shut down. -message.disable.vpn.access=Confirmez que vous souhaitez désactiver l\'accès VPN. -message.download.ISO=Cliquer le lien 00000 pour télécharger une image ISO -message.download.template=Cliquer le lien 00000 pour télécharger une template -message.download.volume=Cliquez sur 00000 pour télécharger le volume -message.edit.confirm=Confirmer les changement avant de cliquer sur "Enregistrer". -message.edit.limits=Renseignez les limites pour les ressources suivantes. "-1" indique qu\'il n\'y a pas de limites pour la création de ressources. +message.delete.gateway=Confirmer la suppression de cette passerelle +message.delete.project=\u00CAtes-vous s\u00FBr de vouloir supprimer ce projet ? +message.delete.user=Confirmer la suppression de cet utilisateur. +message.desc.advanced.zone=Pour des topologies de r\u00E9seau plus sophistiqu\u00E9es. Ce mod\u00E8le de r\u00E9seau permet plus de flexibilit\u00E9 dans la d\u00E9finition des r\u00E9seaux d\\'invit\u00E9s et propose des offres personnalis\u00E9es telles que le support de pare-feu, VPN ou d\\'\u00E9quilibrage de charge. +message.desc.basic.zone=Fournit un r\u00E9seau unique o\u00F9 chaque instance de machine virtuelle se voit attribuer une adresse IP directement depuis le r\u00E9seau. L\\'isolation des invit\u00E9s peut \u00EAtre assur\u00E9 au niveau de la couche r\u00E9seau-3 tels que les groupes de s\u00E9curit\u00E9 (filtrage d\\'adresse IP source). +message.desc.cluster=Chaque pod doit contenir un ou plusieurs clusters, et le premier cluster sera ajout\u00E9 tout de suite. Un cluster est un regroupement pour h\u00F4tes. Les h\u00F4tes d\\'un cluster ont tous un mat\u00E9riel identique, ex\u00E9cutent le m\u00EAme hyperviseur, sont dans le m\u00EAme sous-r\u00E9seau, et acc\u00E8dent au m\u00EAme stockage partag\u00E9. Chaque cluster comprend une ou plusieurs h\u00F4tes et un ou plusieurs serveurs de stockage primaire. +message.desc.host=Chaque cluster doit contenir au moins un h\u00F4te (machine) pour ex\u00E9cuter des machines virtuelles invit\u00E9es, et le premier h\u00F4te sera ajout\u00E9 tout de suite. Pour un h\u00F4te fonctionnant dans CloudStack, vous devez installer un logiciel hyperviseur sur l\\'h\u00F4te, attribuer une adresse IP \u00E0 l\\'h\u00F4te, et s\\'assurer que l\\'h\u00F4te est connect\u00E9 au serveur d\\'administration CloudStack.

Indiquer le nom de l\\'h\u00F4te ou son adresse IP, l\\'identifiant de connexion (g\u00E9n\u00E9ralement root) et le mot de passe ainsi que toutes les \u00E9tiquettes permettant de classer les h\u00F4tes. +message.desc.primary.storage=Chaque cluster doit contenir un ou plusieurs serveurs de stockage primaire, et le premier sera ajout\u00E9 tout de suite. Le stockage principal contient les volumes de disque pour les machines virtuelles s\\'ex\u00E9cutant sur les h\u00F4tes dans le cluster. Utiliser les protocoles standards pris en charge par l\\'hyperviseur sous-jacent. +message.desc.secondary.storage=Chaque zone doit avoir au moins un serveur NFS ou un serveur de stockage secondaire, et sera ajout\u00E9 en premier tout de suite. Le stockage secondaire entrepose les mod\u00E8les de machines virtuelles, les images ISO et les images disques des volumes des machines virtuelles. Ce serveur doit \u00EAtre accessible pour toutes les machines h\u00F4tes dans la zone.

Saisir l\\'adresse IP et le chemin d\\'export. +message.desc.zone=Une zone est la plus grande unit\u00E9 organisationnelle dans CloudStack, et correspond typiquement \u00E0 un centre de donn\u00E9es. Les zones fournissent un isolement physique et de la redondance. Une zone est constitu\u00E9e d\\'un ou plusieurs pods (dont chacun contient les h\u00F4tes et les serveurs de stockage primaire) et un serveur de stockage secondaire qui est partag\u00E9e par tous les pods dans la zone. +message.detach.disk=Voulez-vous d\u00E9tacher ce disque ? +message.detach.iso.confirm=Confirmez que vous souhaitez d\u00E9tacher l\\'image ISO de cette instance. +message.disable.account=Veuillez confirmer que vous voulez d\u00E9sactiver ce compte. En d\u00E9sactivant le compte, tous les utilisateurs pour ce compte n\\'auront plus acc\u00E8s \u00E0 leurs ressources sur le cloud. Toutes les machines virtuelles vont \u00EAtre arr\u00EAt\u00E9es imm\u00E9diatement. +message.disable.snapshot.policy=Vous avez d\u00E9sactiv\u00E9 votre politique d\\'instantan\u00E9 avec succ\u00E8s. +message.disable.user=Confirmer la d\u00E9sactivation de cet utilisateur. +message.disable.vpn=\u00CAtes-vous s\u00FBr de vouloir d\u00E9sactiver le VPN ? +message.disable.vpn.access=Confirmez que vous souhaitez d\u00E9sactiver l\\'acc\u00E8s VPN. +message.download.ISO=Cliquer 00000 pour t\u00E9l\u00E9charger une image ISO +message.download.template=Cliquer sur 00000 pour t\u00E9l\u00E9charger le mod\u00E8le +message.download.volume=Cliquer sur 00000 pour t\u00E9l\u00E9charger le volume +message.download.volume.confirm=Confirmer le t\u00E9l\u00E9chargement du volume +message.edit.account=Modifier ("-1" signifie pas de limite de ressources) +message.edit.confirm=Confirmer les changements avant de cliquer sur "Enregistrer". +message.edit.limits=Renseignez les limites pour les ressources suivantes. "-1" indique qu\\'il n\\'y a pas de limites pour la cr\u00E9ation de ressources. +message.edit.traffic.type=Sp\u00E9cifier le libell\u00E9 de trafic associ\u00E9 avec ce type de trafic. message.enable.account=Confirmez que vous souhaitez activer ce compte. -message.enable.vpn.access=Le VPN est désactivé pour cette adresse IP. Voulez vous activer l\'accès VPN ? -message.enabled.vpn.ip.sec=Votre clé partagée IPSec est -message.enabled.vpn=Votre accès VPN est activé et peut être accédé par l\'IP -message.launch.vm.on.private.network=Souhaitez vous démarrer cette instance sur votre propre réseau privé ? -message.lock.account=Confirmez que vous souhaitez verrouiller ce compte. En le verrouillant, les utilisateurs de ce compte ne seront plus capable de gérer leurs ressources. Les ressources existantes resteront toutefois accessibles. -message.migrate.instance.confirm=Confirmez l\'hôte vers lequel vous souhaitez migrer cette instance +message.enable.user=Confirmer l\\'activation de cet utilisateur. +message.enable.vpn=Confirmer l\\'activation de l\\'acc\u00E8s VPN pour cette adresse IP. +message.enable.vpn.access=Le VPN est d\u00E9sactiv\u00E9 pour cette adresse IP. Voulez vous activer l\\'acc\u00E8s VPN ? +message.enabled.vpn=Votre acc\u00E8s VPN est activ\u00E9 et peut \u00EAtre acc\u00E9d\u00E9 par l\\'IP +message.enabled.vpn.ip.sec=Votre cl\u00E9 partag\u00E9e IPSec est +message.enabling.security.group.provider=Activation du fournisseur de groupe de s\u00E9curit\u00E9 +message.enabling.zone=Activation de la zone +message.enter.token=Entrer le jeton unique re\u00E7u dans le message d\\'invitation. +message.generate.keys=Confirmer la g\u00E9n\u00E9ration de nouvelles clefs pour cet utilisateur. +message.guest.traffic.in.advanced.zone=Le trafic r\u00E9seau d\\'invit\u00E9 est la communication entre les machines virtuelles utilisateur. Sp\u00E9cifier une plage d\\'identifiant VLAN pour le trafic des invit\u00E9s pour chaque r\u00E9seau physique. +message.guest.traffic.in.basic.zone=Le trafic r\u00E9seau d\\'invit\u00E9 est la communication entre les machines virtuelles utilisateur. Sp\u00E9cifier une plage d\\'adresses IP que CloudStack peut assigner aux machines virtuelles Invit\u00E9. S\\'assurer que cette plage n\\'empi\u00E8te pas sur la plage r\u00E9serv\u00E9e aux adresses IP Syst\u00E8me. +message.installWizard.click.retry=Appuyer sur le bouton pour essayer \u00E0 nouveau le d\u00E9marrage. +message.installWizard.copy.whatIsACluster=Un cluster permet de grouper les h\u00F4tes. Les h\u00F4tes d\\'un cluster ont un mat\u00E9riel identique, ex\u00E9cutent le m\u00EAme hyperviseur, sont sur le m\u00EAme sous-r\u00E9seau, et acc\u00E8dent au m\u00EAme stockage partag\u00E9. Les instances de machines virtuelles (VM) peuvent \u00EAtre migr\u00E9es \u00E0 chaud d\\'un h\u00F4te \u00E0 un autre au sein du m\u00EAme groupe, sans interrompre les services utilisateur. Un cluster est la trois \u00E8me plus large unit\u00E9 organisationnelle dans un d\u00E9ploiement CloudStack&\#8482;. Les clusters sont contenus dans les pods et les pods sont contenus dans les zones.

CloudStack&\#8482; permet d\\'avoir plusieurs clusters dans un d\u00E9ploiement en nuage, mais pour une installation basique, il n\\'y a qu\\'un seul cluster. +message.installWizard.copy.whatIsAHost=Un h\u00F4te est une machine. Les h\u00F4tes fournissent les ressources informatiques qui ex\u00E9cutent les machines virtuelles invit\u00E9es. Chaque h\u00F4te a un logiciel hyperviseur install\u00E9 pour g\u00E9rer les machines virtuelles invit\u00E9es (sauf pour les h\u00F4tes de type \\'bare-metal\\', qui sont un cas particulier d\u00E9taill\u00E9 dans le Guide d\\'installation avanc\u00E9e). Par exemple, un serveur Linux avec KVM, un serveur Citrix XenServer, et un serveur ESXi sont des h\u00F4tes. Dans une installation basique, un seul h\u00F4te ex\u00E9cutant XenServer ou KVM est utilis\u00E9.

L\\'h\u00F4te est la plus petite unit\u00E9 organisation au sein d\\'un d\u00E9ploiement CloudStack&\#8482;. Les h\u00F4tes sont contenus dans les clusters, les clusters sont contenus dans les pods et les pods sont contenus dans les zones. +message.installWizard.copy.whatIsAPod=Un pod repr\u00E9sente souvent un seul rack. Les h\u00F4tes dans le m\u00EAme pod sont dans le m\u00EAme sous-r\u00E9seau.
Un pod est la deuxi\u00E8me plus grande unit\u00E9 organisationnelle au sein d\\'un d\u00E9ploiement CloudStack&\#8482;. Les pods sont contenus dans les zones. Chaque zone peut contenir un ou plusieurs pods ; dans l\\'Installation Basique, vous aurez juste un pod dans votre zone. +message.installWizard.copy.whatIsAZone=Une zone est la plus grande unit\u00E9 organisationnelle au sein d\\'un d\u00E9ploiement CloudStack&\#8482;. Une zone correspond typiquement \u00E0 un centre de donn\u00E9es, mais il est permis d\\'avoir plusieurs zones dans un centre de donn\u00E9es. L\\'avantage d\\'organiser une infrastructure en zones est de fournir une isolation physique et de la redondance. Par exemple, chaque zone peut avoir sa propre alimentation et de liaison avec le r\u00E9seau, et les zones peuvent \u00EAtre tr\u00E8s \u00E9loign\u00E9es g\u00E9ographiquement (m\u00EAme si ce n\\'est pas une obligation). +message.installWizard.copy.whatIsCloudStack=CloudStack&\#8482; est une plate-forme logicielle de pools de ressources informatiques pour construire des infrastructures publiques, priv\u00E9es et hybrides en tant que services (IaaS) dans les nuages. CloudStack&\#8482; g\u00E8re le r\u00E9seau, le stockage et les noeuds de calcul qui composent une infrastructure dans les nuages. Utilisez CloudStack&\#8482; pour d\u00E9ployer, g\u00E9rer et configurer les environnements d\\'informatiques dans les nuages.

S\\'\u00E9tendant au-del\u00E0 des machines virtuelles individuelles fonctionnant sur du mat\u00E9riel standard, CloudStack&\#8482; offre une solution d\\'informatique en nuage cl\u00E9 en main pour fournir des centres de donn\u00E9es virtuels comme service - fournissant tous les composants essentiels pour construire, d\u00E9ployer et g\u00E9rer des applications \\'cloud\\' multi-niveaux et multi-locataire. Les versions libre et Premium sont disponibles, la version Libre offrant des caract\u00E9ristiques presque identiques. +message.installWizard.copy.whatIsPrimaryStorage=Une infrastructure CloudStack&\#8482; utilise deux types de stockage \: stockage primaire et stockage secondaire. Les deux peuvent \u00EAtre des serveurs iSCSI ou NFS, ou sur disque local.

Le stockage primaire est associ\u00E9 \u00E0 un cluster, et stocke les volumes disques de chaque machine virtuelle pour toutes les VMs s\\'ex\u00E9cutant sur les h\u00F4tes dans le cluster. Le serveur de stockage primaire est typiquement proche des h\u00F4tes. +message.installWizard.copy.whatIsSecondaryStorage=Le stockage secondaire est associ\u00E9 \u00E0 une zone, et il stocke les \u00E9l\u00E9ments suivants\:
  • Mod\u00E8les - images de syst\u00E8mes d\\'exploitation qui peuvent \u00EAtre utilis\u00E9es pour d\u00E9marrer les machines virtuelles et peuvent inclure des informations de configuration suppl\u00E9mentaires, telles que les applications pr\u00E9-install\u00E9es
  • Images ISO - images de syst\u00E8me d\\'exploitation ou d\\'installation d\\'OS qui peuvent \u00EAtre amor\u00E7able ou non-amor\u00E7able
  • Images de volume disque - capture des donn\u00E9es de machines virtuelles qui peuvent \u00EAtre utilis\u00E9es pour la r\u00E9cup\u00E9ration des donn\u00E9es ou cr\u00E9er des mod\u00E8les
+message.installWizard.now.building=Construction de votre Cloud en cours +message.installWizard.tooltip.addCluster.name=Un nom pour le cluster. Ce choix est libre et n\\'est pas utilis\u00E9 par CloudStack. +message.installWizard.tooltip.addHost.hostname=Le nom DNS ou adresse IP du serveur. +message.installWizard.tooltip.addHost.password=Le mot de passe pour l\\'utilisateur indiqu\u00E9 pr\u00E9c\u00E9demment (issu de l\\'installation XenServer). +message.installWizard.tooltip.addHost.username=Habituellement root. +message.installWizard.tooltip.addPod.name=Nom pour le pod +message.installWizard.tooltip.addPod.reservedSystemEndIp=Ceci est la plage d\\'adresses IP dans le r\u00E9seau priv\u00E9 que CloudStack utilise la gestion des VMs du stockage secondaire et les VMs Console Proxy. Ces adresses IP sont prises dans le m\u00EAme sous-r\u00E9seau que les serveurs h\u00F4tes. +message.installWizard.tooltip.addPod.reservedSystemGateway=Passerelle pour les serveurs dans ce pod +message.installWizard.tooltip.addPod.reservedSystemNetmask=Le masque r\u00E9seau que les instances utiliseront sur le r\u00E9seau +message.installWizard.tooltip.addPod.reservedSystemStartIp=Ceci est la plage d\\'adresses IP dans le r\u00E9seau priv\u00E9 que CloudStack utilise la gestion des VMs du stockage secondaire et les VMs Console Proxy. Ces adresses IP sont prises dans le m\u00EAme sous-r\u00E9seau que les serveurs h\u00F4tes. +message.installWizard.tooltip.addPrimaryStorage.name=Nom pour ce stockage +message.installWizard.tooltip.addPrimaryStorage.path=(pour NFS) Dans NFS, ceci est le chemin d\\'export depuis le serveur. (pour SharedMountPoint) Le chemin. Avec KVM, c\\'est le chemin sur chaque h\u00F4te o\u00F9 ce stockage primaire est mont\u00E9. Par exemple, "/mnt/primary". +message.installWizard.tooltip.addPrimaryStorage.server=(pour NFS, iSCSI ou PreSetup) Adresse IP ou nom DNS du stockage +message.installWizard.tooltip.addSecondaryStorage.nfsServer=Adresse IP du serveur NFS supportant le stockage secondaire +message.installWizard.tooltip.addSecondaryStorage.path=Le chemin export\u00E9, situ\u00E9 sur le serveur sp\u00E9cifi\u00E9 pr\u00E9c\u00E9demment +message.installWizard.tooltip.addZone.dns1=Ces serveurs DNS sont utilis\u00E9s par les machines virtuelles Invit\u00E9es dans la zone. Ces serveurs DNS seront accessibles par le r\u00E9seau public, ce dernier sera ajout\u00E9 plus tard. Les adresses IP publiques pour la zone doivent avoir une route vers les serveurs DNS indiqu\u00E9s ici. +message.installWizard.tooltip.addZone.dns2=Ces serveurs DNS sont utilis\u00E9s par les machines virtuelles Invit\u00E9es dans la zone. Ces serveurs DNS seront accessibles par le r\u00E9seau public, ce dernier sera ajout\u00E9 plus tard. Les adresses IP publiques pour la zone doivent avoir une route vers les serveurs DNS indiqu\u00E9s ici. +message.installWizard.tooltip.addZone.internaldns1=Ces serveurs DNS sont utilis\u00E9s par les machines virtuelles Invit\u00E9es dans la zone. Ces serveurs DNS seront accessibles par le r\u00E9seau public, ce dernier sera ajout\u00E9 plus tard. Les adresses IP publiques pour la zone doivent avoir une route vers les serveurs DNS indiqu\u00E9s ici. +message.installWizard.tooltip.addZone.internaldns2=Ces serveurs DNS sont utilis\u00E9s par les machines virtuelles Invit\u00E9es dans la zone. Ces serveurs DNS seront accessibles par le r\u00E9seau public, ce dernier sera ajout\u00E9 plus tard. Les adresses IP publiques pour la zone doivent avoir une route vers les serveurs DNS indiqu\u00E9s ici. +message.installWizard.tooltip.addZone.name=Nom pour la zone +message.installWizard.tooltip.configureGuestTraffic.description=Description pour ce r\u00E9seau +message.installWizard.tooltip.configureGuestTraffic.guestEndIp=La plage d\\'adresses IP qui sera disponible en allocation pour les machines invit\u00E9es dans cette zone. Si une carte r\u00E9seau est utilis\u00E9e, ces adresses IP peuvent \u00EAtre dans le m\u00EAme CIDR que le CIDR du pod. +message.installWizard.tooltip.configureGuestTraffic.guestGateway=La passerelle que les instances invit\u00E9es doivent utiliser +message.installWizard.tooltip.configureGuestTraffic.guestNetmask=Le masque r\u00E9seau que les instances devrait utiliser sur le r\u00E9seau +message.installWizard.tooltip.configureGuestTraffic.guestStartIp=La plage d\\'adresses IP qui sera disponible en allocation pour les machines invit\u00E9es dans cette zone. Si une carte r\u00E9seau est utilis\u00E9e, ces adresses IP peuvent \u00EAtre dans le m\u00EAme CIDR que le CIDR du pod. +message.installWizard.tooltip.configureGuestTraffic.name=Nom pour ce r\u00E9seau +message.instanceWizard.noTemplates=Vous n\\'avez pas de image disponible ; Ajouter un mod\u00E8le compatible puis relancer l\\'assistant de cr\u00E9ation d\\'instance. +message.ip.address.changed=Vos adresses IP ont peut \u00EAtre chang\u00E9es ; Voulez vous rafra\u00EEchir la liste ? Dans ce cas, le panneau de d\u00E9tail se fermera. +message.iso.desc=Image disque contenant des donn\u00E9es ou un support amor\u00E7able pour OS +message.join.project=Vous avez rejoint un projet. S\u00E9lectionnez la vue Projet pour le voir. +message.launch.vm.on.private.network=Souhaitez vous d\u00E9marrer cette instance sur votre propre r\u00E9seau priv\u00E9 ? +message.launch.zone=La zone est pr\u00EAte \u00E0 d\u00E9marrer ; passer \u00E0 l\\'\u00E9tape suivante. +message.lock.account=Confirmez que vous souhaitez verrouiller ce compte. En le verrouillant, les utilisateurs de ce compte ne seront plus capables de g\u00E9rer leurs ressources. Les ressources existantes resteront toutefois accessibles. +message.migrate.instance.confirm=Confirmez l\\'h\u00F4te vers lequel vous souhaitez migrer cette instance +message.migrate.instance.to.host=Confirmer la migration de l\\'instance vers un autre h\u00F4te +message.migrate.instance.to.ps=Confirmer la migration de l\\'instance vers un autre stockage primaire +message.migrate.router.confirm=Confirmer la migration du routeur vers \: +message.migrate.systemvm.confirm=Confirmer la migration de la VM syst\u00E8me vers \: +message.migrate.volume=Confirmer la migration du volume vers un autre stockage primaire. message.new.user=Renseigner les informations suivantes pour ajouter un nouveau compte utilisateur -message.no.network.support.configuration.not.true=Il n\'y a pas de zone avec la fonction groupe de sécurité active. Dès lors, pas de fonction réseau supplémentaires disponibles. Continuer à l\'étape 5. -message.no.network.support=Sélectionnez l\'hyperviseur. vSphere, n\'a pas de fonctionnalités supplémentaires pour le réseau. Continuez à l\'étape 5. -message.number.clusters=

\# of Clusters

-message.number.hosts=

\# of Hôtes

-message.number.pods=

\# of Pods

-message.number.storage=

\# of Volumes de Stockage Primaire

-message.number.zones=

\# of Zones

-message.remove.vpn.access=Confirmez que vous souhaitez supprimer l\'accès VPN à l\'utilisateur suivant. -message.restart.mgmt.server=Redémarrez votre(vos) serveur(s) de management pour appliquer les nouveaux paramètres. -message.restart.mgmt.usage.server=Redémarrer le ou les management server et usage server pour que les nouveaux paramètres soient pris en compte. -message.security.group.usage=(Utilisez Ctrl-clic pour séléctionner les groupes de sécurité visés) -message.snapshot.schedule=Vous pouvez mettre en place les politiques de génération d\'instantanés en sélectionnant les options disponibles ci-dessous et en appliquant votre politique. -message.step.1.continue=Sélectionnez un modèle ou une image ISO pour continuer -message.step.1.desc=Sélectionnez un template pour votre nouvelle instance virtuelle. Vous pouvez également choisir un modèle vierge sur lequel une image ISO pourra être installé. -message.step.2.continue=Sélectionnez une offre de service pour continuer -message.step.2.desc= -message.step.3.continue=Sélectionnez un offre de service de disque pour continuer -message.step.3.desc= -message.step.4.continue=Sélectionnez au moins un réseau pour continuer -message.step.4.desc=Sélectionnez le réseau principal auquel votre instance va être connecté. -message.update.os.preference=Choisissez votre OS préféré pour cet hôte. Toutes les instances avec des préférences similaires seront d\'abord allouées à cet hôte avant d\'en choisir un autre. -message.update.ssl=Soumettez un nouveau certificat SSL compatible X.509 qui sera mis à jour sur l\'ensemble de instance de proxy console. -message.virtual.network.desc=Un réseau virtuel dédié pour votre compte. Ce domaine de broadcast est contenu dans un VLAN et l\'ensemble de accès réseau publique sont routés par un routeur virtuel. -message.volume.create.template.confirm=Confirmez que vous souhaitez créer un modèle pour ce disque. La création peut prendre plusieurs minutes, voire plus, selon la taille du volume. -message.zone.step.1.desc=Sélectionnez un modèle de réseau pour votre zone. +message.no.network.support=S\u00E9lectionnez l\\'hyperviseur. vSphere, n\\'a pas de fonctionnalit\u00E9s suppl\u00E9mentaires pour le r\u00E9seau. Continuez \u00E0 l\\'\u00E9tape 5. +message.no.network.support.configuration.not.true=Il n\\'y a pas de zone avec la fonction groupe de s\u00E9curit\u00E9 active. D\u00E8s lors, pas de fonction r\u00E9seau suppl\u00E9mentaires disponibles. Continuer \u00E0 l\\'\u00E9tape 5. +message.no.projects=Vous n\\'avez pas de projet.
Vous pouvez en cr\u00E9er un depuis la section projets. +message.no.projects.adminOnly=Vous n\\'avez pas de projet.
Contacter votre administrateur pour ajouter un projet. +message.number.clusters=

\# de Clusters

+message.number.hosts=

\# d\\' H\u00F4tes

+message.number.pods=

\# de Pods

+message.number.storage=

\# de Volumes de Stockage Primaire

+message.number.zones=

\# de Zones

+message.pending.projects.1=Vous avez des invitations projet en attente \: +message.pending.projects.2=Pour les visualiser, aller dans la section projets, puis s\u00E9lectionner invitation dans la liste d\u00E9roulante. +message.please.add.at.lease.one.traffic.range=Ajouter au moins une plage r\u00E9seau +message.please.proceed=Continuer vers la prochaine \u00E9tape. +message.please.select.a.configuration.for.your.zone=S\u00E9lectionner une configuration pour la zone. +message.please.select.a.different.public.and.management.network.before.removing=S\u00E9lectionner un r\u00E9seau public et d\\'administration diff\u00E9rent avant de supprimer +message.please.select.networks=S\u00E9lectionner les r\u00E9seaux pour votre machine virtuelle. +message.please.wait.while.zone.is.being.created=Patienter pendant la cr\u00E9ation de la zone, cela peut prendre du temps... +message.project.invite.sent=Invitation envoy\u00E9e ; les utilisateurs seront ajout\u00E9s apr\u00E8s acceptation de l\\'invitation +message.public.traffic.in.advanced.zone=Le trafic public est g\u00E9n\u00E9r\u00E9 lorsque les machines virtuelles dans le nuage acc\u00E8dent \u00E0 Internet. Des adresses IP publiquement accessibles doivent \u00EAtre pr\u00E9vues \u00E0 cet effet. Les utilisateurs peuvent utiliser l\\'interface d\\'administration de CloudStack pour acqu\u00E9rir ces adresses IP qui impl\u00E9menteront une translation d\\'adresse NAT entre le r\u00E9seau d\\'invit\u00E9 et le r\u00E9seau public.

Fournir au moins une plage d\\'adresses IP pour le trafic Internet. +message.public.traffic.in.basic.zone=Le trafic public est g\u00E9n\u00E9r\u00E9 lorsque les machines virtuelles dans le nuage acc\u00E8dent \u00E0 Internet ou fournissent des services \u00E0 des utilisateurs sur Internet. Des adresses IP publiquement accessibles doivent \u00EAtre pr\u00E9vus \u00E0 cet effet. Quand une instance est cr\u00E9\u00E9e, une adresse IP publique depuis un ensemble d\\'adresses IP publiques sera allou\u00E9e \u00E0 l\\'instance, en plus de l\\'adresse IP de l\\'invit\u00E9. La translation d\\'adresses statique NAT 1-1 sera mises en place automatiquement entre l\\'adresse IP publique et l\\'adresse IP de l\\'invit\u00E9. Les utilisateurs peuvent \u00E9galement utiliser l\\'interface d\\'administration CloudStack pour acqu\u00E9rir des adresses IP suppl\u00E9mentaires pour ajouter une translation d\\'adresse statique NAT entre leurs instances et le r\u00E9seau d\\'adresses IP publiques. +message.remove.vpc=Confirmer la suppression du VPC +message.remove.vpn.access=Confirmez que vous souhaitez supprimer l\\'acc\u00E8s VPN \u00E0 l\\'utilisateur suivant. +message.reset.VPN.connection=Confirmer le r\u00E9-initialisation de la connexion VPN +message.reset.password.warning.notPasswordEnabled=Le mod\u00E8le de cette instance a \u00E9t\u00E9 cr\u00E9\u00E9 sans la gestion de mot de passe +message.reset.password.warning.notStopped=Votre instance doit \u00EAtre arr\u00EAt\u00E9e avant de changer son mot de passe +message.restart.mgmt.server=Red\u00E9marrez votre(vos) serveur(s) de management pour appliquer les nouveaux param\u00E8tres. +message.restart.mgmt.usage.server=Red\u00E9marrer le ou les serveur(s) de gestion et le ou les serveur(s) de consommation pour que les nouveaux param\u00E8tres soient pris en compte. +message.restart.network=Tous les services fournit par ce routeur virtuel vont \u00EAtre interrompus. Confirmer le red\u00E9marrage de ce routeur. +message.restart.vpc=Confirmer le red\u00E9marrage du VPC +message.security.group.usage=(Utilisez Ctrl-clic pour s\u00E9lectionner les groupes de s\u00E9curit\u00E9 vis\u00E9s) +message.select.a.zone=Une zone correspond typiquement \u00E0 un seul centre de donn\u00E9es. Des zones multiples peuvent permettre de rendre votre cloud plus fiable en apportant une isolation physique et de la redondance. +message.select.instance=S\u00E9lectionner une instance. +message.select.iso=S\u00E9lectionner un ISO pour votre nouvelle instance virtuelle. +message.select.item=Merci de s\u00E9lectionner un \u00E9l\u00E9ment. +message.select.security.groups=Merci de s\u00E9lectionner un(des) groupe(s) de s\u00E9curit\u00E9 pour la nouvelle VM +message.select.template=S\u00E9lectionner un mod\u00E8le pour votre nouvelle instance virtuelle. +message.setup.physical.network.during.zone.creation=Lorsque vous ajoutez une zone avanc\u00E9e, vous avez besoin de d\u00E9finir un ou plusieurs r\u00E9seaux physiques. Chaque r\u00E9seau correspond \u00E0 une carte r\u00E9seau sur l\\'hyperviseur. Chaque r\u00E9seau physique peut supporter un ou plusieurs types de trafic, avec certaines restrictions sur la fa\u00E7on dont ils peuvent \u00EAtre combin\u00E9s.

Glisser et d\u00E9poser un ou plusieurs types de trafic sur chaque r\u00E9seau physique. +message.setup.physical.network.during.zone.creation.basic=Quand vous ajoutez une zone basique, vous pouvez param\u00E9trer un seul r\u00E9seau physique, correspondant \u00E0 une carte r\u00E9seau sur l\\'hyperviseur. Ce r\u00E9seau comportera plusieurs types de trafic.

Vous pouvez \u00E9galement glisser et d\u00E9poser d\\'autres types de trafic sur le r\u00E9seau physique. +message.setup.successful=Installation du Cloud r\u00E9ussie \! +message.snapshot.schedule=Vous pouvez mettre en place les politiques de g\u00E9n\u00E9ration d\\'instantan\u00E9s en s\u00E9lectionnant les options disponibles ci-dessous et en appliquant votre politique. +message.specify.url=Renseigner l\\'URL +message.step.1.continue=S\u00E9lectionnez un mod\u00E8le ou une image ISO pour continuer +message.step.1.desc=S\u00E9lectionnez un mod\u00E8le pour votre nouvelle instance virtuelle. Vous pouvez \u00E9galement choisir un mod\u00E8le vierge sur lequel une image ISO pourra \u00EAtre install\u00E9e. +message.step.2.continue=S\u00E9lectionnez une offre de service pour continuer +message.step.3.continue=S\u00E9lectionnez un offre de service de disque pour continuer +message.step.4.continue=S\u00E9lectionnez au moins un r\u00E9seau pour continuer +message.step.4.desc=S\u00E9lectionnez le r\u00E9seau principal auquel votre instance va \u00EAtre connect\u00E9. +message.storage.traffic=Trafic entre les ressources internes de CloudStack, incluant tous les composants qui communiquent avec le serveur d\\'administration, tels que les h\u00F4tes et les machines virtuelles Syst\u00E8mes CloudStack. Veuillez configurer le trafic de stockage ici. +message.suspend.project=\u00CAtes-vous s\u00FBr de vouloir suspendre ce projet ? +message.template.desc=Image OS pouvant \u00EAtre utilis\u00E9e pour d\u00E9marrer une VM +message.tooltip.dns.1=Nom d\\'un serveur DNS utilis\u00E9 par les VM de la zone. Les adresses IP publiques de cette zone doivent avoir une route vers ce serveur. +message.tooltip.dns.2=Nom d\\'un serveur DNS secondaire utilis\u00E9 par les VM de la zone. Les adresses IP publiques de cette zone doivent avoir une route vers ce serveur. +message.tooltip.internal.dns.1=Nom d\\'un serveur DNS que CloudStack peut utiliser pour les VM syst\u00E8me dans cette zone. Les adresses IP priv\u00E9es des pods doivent avoir une route vers ce serveur. +message.tooltip.internal.dns.2=Nom d\\'un serveur DNS que CloudStack peut utiliser pour les VM syst\u00E8me dans cette zone. Les adresses IP priv\u00E9es des pods doivent avoir une route vers ce serveur. +message.tooltip.network.domain=Suffixe DNS qui cr\u00E9era un nom de domaine personnalis\u00E9 pour les r\u00E9seau accessible par les VM invit\u00E9es. +message.tooltip.pod.name=Nom pour ce pod. +message.tooltip.reserved.system.gateway=La passerelle pour les h\u00F4tes du pod. +message.tooltip.reserved.system.netmask=Le pr\u00E9fixe r\u00E9seau utilis\u00E9 par le sous-r\u00E9seau du pod. Au format CIDR. +message.tooltip.zone.name=Nom pour cette zone. +message.update.os.preference=Choisissez votre OS pr\u00E9f\u00E9r\u00E9 pour cet h\u00F4te. Toutes les instances avec des pr\u00E9f\u00E9rences similaires seront d\\'abord allou\u00E9es \u00E0 cet h\u00F4te avant d\\'en choisir un autre. +message.update.resource.count=Confirmer la mise \u00E0 jour des ressources pour ce compte. +message.update.ssl=Soumettez un nouveau certificat SSL compatible X.509 qui sera mis \u00E0 jour sur l\\'ensemble de instance de proxy console. +message.validate.instance.name=Le nom de l\\'instance ne peut d\u00E9passer 63 caract\u00E8res. Seuls les lettres de a \u00E0 z, les chiffres de 0 \u00E0 9 et les tirets sont accept\u00E9s. Le nom doit commencer par une lettre et se terminer par une lettre ou un chiffre. +message.virtual.network.desc=Un r\u00E9seau virtuel d\u00E9di\u00E9 pour votre compte. Ce domaine de multi-diffusion est contenu dans un VLAN et l\\'ensemble des r\u00E9seaux d\\'acc\u00E8s publique sont rout\u00E9s par un routeur virtuel. +message.vm.create.template.confirm=Cr\u00E9er un mod\u00E8le va red\u00E9marrer la VM automatiquement +message.vm.review.launch=Merci de v\u00E9rifier les informations suivantes et de confirmer que votre instance virtuelle est correcte avant de la d\u00E9marrer. +message.volume.create.template.confirm=Confirmez que vous souhaitez cr\u00E9er un mod\u00E8le pour ce disque. La cr\u00E9ation peut prendre plusieurs minutes, voire plus, selon la taille du volume. +message.you.must.have.at.least.one.physical.network=Vous devez avoir au moins un r\u00E9seau physique +message.zone.creation.complete.would.you.like.to.enable.this.zone=Cr\u00E9ation de la zone termin\u00E9e. Voulez-vous l\\'activer ? +message.zone.no.network.selection=La zone s\u00E9lectionn\u00E9e ne propose pas le r\u00E9seau choisi +message.zone.step.1.desc=S\u00E9lectionnez un mod\u00E8le de r\u00E9seau pour votre zone. message.zone.step.2.desc=Renseigner les informations suivantes pour ajouter une nouvelle zone message.zone.step.3.desc=Renseigner les informations suivantes pour ajouter un nouveau pod -message.apply.snapshot.policy=Vous avez mis à jour votre politique d\'instantanés avec succès. -message.disable.snapshot.policy=Vous avez désactivé votre politique de snapshots avec succès. -message.action.change.service.warning.for.instance=Votre instance doit être arrêtée avant d\'essayer de changer son offre de service. -message.action.change.service.warning.for.router=Votre routeur doit être arrêté avant d\'essayer de changer son offre de service. -message.action.reset.password.warning=Votre instance doit être arrêtée avant d\'essayer de changer son mot de passe. -message.action.reset.password.off=Votre instance ne supporte pas pour le moment cette fonctionnalité. - -#Errors -error.login=Votre nom d\'utilisateur /mot de passe ne correspond pas à nos données. -error.menu.select=Echec de l\'action car il n\'y a aucun élément sélectionné. -error.mgmt.server.inaccessible=Le serveur de management est indisponible. Essayez plus tard. -error.session.expired=Votre session a expiré. -error.unresolved.internet.name=Votre nom internet ne peut pas être résolu. -#resizeVolumes -label.resize.new.size=New Size(GB) -label.action.resize.volume=Resize Volume -label.action.resize.volume.processing=Resizing Volume.... -label.resize.new.offering.id=New Offering -label.resize.shrink.ok=Shrink OK +message.zoneWizard.enable.local.storage=ATTENTION \: si vous activez le stockage local pour cette zone, vous devez effectuer les op\u00E9rations suivantes, selon l\\'endroit o\u00F9 vous souhaitez lancer vos machines virtuelles Syst\u00E8mes \:

1. Si les machines virtuelles Syst\u00E8mes doivent \u00EAtre lanc\u00E9es depuis le stockage primaire, ce dernier doit \u00EAtre ajout\u00E9 \u00E0 la zone apr\u00E8s la cr\u00E9ation. Vous devez \u00E9galement d\u00E9marrer la zone dans un \u00E9tat d\u00E9sactiv\u00E9.

2. Si les machines virtuelles Syst\u00E8mes doivent \u00EAtre lanc\u00E9es depuis le stockage local, le param\u00E8tre system.vm.use.local.storage doit \u00EAtre d\u00E9fini \u00E0 \\'true\\' avant d\\'activer la zone.


Voulez-vous continuer ? +mode=Mode +network.rate=D\u00E9bit R\u00E9seau +notification.reboot.instance=Red\u00E9marrer l\\'instance +notification.start.instance=D\u00E9marrer l\\'instance +notification.stop.instance=Stopper l\\'instance +side.by.side=C\u00F4te \u00E0 c\u00F4te +state.Accepted=Accept\u00E9 +state.Active=Actif +state.Allocated=Allou\u00E9 +state.Allocating=Allocation en cours +state.BackedUp=Sauvegard\u00E9 +state.BackingUp=Sauvegarde en cours +state.Completed=Termin\u00E9 +state.Creating=Cr\u00E9ation en cours +state.Declined=Refus\u00E9 +state.Destroyed=Supprim\u00E9e +state.Disabled=D\u00E9sactiv\u00E9 +state.Enabled=Actifs +state.Error=Erreur +state.Expunging=Purge en cours +state.Migrating=Migration en cours +state.Pending=En attente +state.Ready=Pr\u00EAt +state.Running=D\u00E9marr\u00E9e +state.Starting=D\u00E9marrage en cours +state.Stopped=Arr\u00EAt\u00E9e +state.Stopping=Arr\u00EAt en cours +state.Suspended=Suspendu +state.enabled=Actifs +state.ready=Pr\u00EAt +ui.listView.filters.all=Tous +ui.listView.filters.mine=Mon diff --git a/client/WEB-INF/web.xml b/client/WEB-INF/web.xml index 0d75165659e..da2f13c8cf7 100644 --- a/client/WEB-INF/web.xml +++ b/client/WEB-INF/web.xml @@ -19,6 +19,14 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd" version="2.5"> + + + log4jConfigLocation + classpath:log4j-cloud.xml + + + org.springframework.web.util.Log4jConfigListener + org.springframework.web.context.ContextLoaderListener diff --git a/client/pom.xml b/client/pom.xml index 0c37df3a5f5..cda6ab8b4e7 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -17,7 +17,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT @@ -85,6 +85,16 @@ cloud-plugin-hypervisor-xen ${project.version} + + org.apache.cloudstack + cloud-plugin-hypervisor-baremetal + ${project.version} + + + org.apache.cloudstack + cloud-plugin-hypervisor-ucs + ${project.version} + org.apache.cloudstack cloud-plugin-hypervisor-ovm @@ -229,9 +239,12 @@ -XX:MaxPermSize=512m -Xmx2g - /client ${project.build.directory}/${project.build.finalName}/WEB-INF/web.xml ${project.build.directory}/${project.build.finalName} + + /client + ${project.build.directory}/utilities/scripts/db/;${project.build.directory}/utilities/scripts/db/db/ + @@ -255,6 +268,13 @@ + + + + + + + @@ -267,12 +287,7 @@ - - - - + @@ -291,7 +306,7 @@ + value="${cs.replace.properties}" /> @@ -302,7 +317,7 @@ - + @@ -313,7 +328,7 @@ - + @@ -324,7 +339,7 @@ - + @@ -334,10 +349,26 @@ - + + + + + + + + + + + + + + + + + @@ -368,8 +399,8 @@ test + match="classpath:componentContext.xml" + replace="classpath:nonossComponentContext.xml" byline="true" /> @@ -392,14 +423,14 @@ org.jasypt jasypt - 1.9.0` + 1.9.0 false ${project.build.directory}/pythonlibs org.jasypt jasypt - 1.8` + 1.8 false ${project.build.directory}/pythonlibs diff --git a/client/tomcatconf/applicationContext.xml.in b/client/tomcatconf/applicationContext.xml.in index 9503a6c137e..6cec8b38223 100644 --- a/client/tomcatconf/applicationContext.xml.in +++ b/client/tomcatconf/applicationContext.xml.in @@ -37,7 +37,7 @@ - + + - - + - diff --git a/client/tomcatconf/commands.properties.in b/client/tomcatconf/commands.properties.in index e1d0fb20731..5018236e5e2 100644 --- a/client/tomcatconf/commands.properties.in +++ b/client/tomcatconf/commands.properties.in @@ -218,9 +218,13 @@ listZones=15 #### events commands listEvents=15 listEventTypes=15 +archiveEvents=15 +deleteEvents=15 #### alerts commands listAlerts=3 +archiveAlerts=1 +deleteAlerts=1 #### system capacity commands listCapacity=3 @@ -326,6 +330,11 @@ addNicToVirtualMachine=15 removeNicFromVirtualMachine=15 updateDefaultNicForVirtualMachine=15 +#### +addIpToNic=15 +removeIpFromNic=15 +listNics=15 + #### SSH key pair commands registerSSHKeyPair=15 createSSHKeyPair=15 @@ -543,3 +552,6 @@ listVMSnapshot=15 createVMSnapshot=15 deleteVMSnapshot=15 revertToSnapshot=15 + +#### Baremetal commands +addBaremetalHost=1 diff --git a/client/tomcatconf/componentContext.xml.in b/client/tomcatconf/componentContext.xml.in index c45ab1bd91b..7b64f49ee20 100644 --- a/client/tomcatconf/componentContext.xml.in +++ b/client/tomcatconf/componentContext.xml.in @@ -44,15 +44,22 @@ - - + + + + + + + + + - + + - + @@ -119,6 +136,14 @@ + + + + + + + + @@ -174,7 +199,14 @@ - + + + + @@ -198,10 +230,18 @@ - + + + + + + + + + + + + + + diff --git a/client/tomcatconf/components-cloudzones.xml.in b/client/tomcatconf/components-cloudzones.xml.in deleted file mode 100755 index 4bd39108e1f..00000000000 --- a/client/tomcatconf/components-cloudzones.xml.in +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - - - - true - - - diff --git a/client/tomcatconf/components-nonoss.xml.in b/client/tomcatconf/components-nonoss.xml.in deleted file mode 100755 index fbfc5cc2726..00000000000 --- a/client/tomcatconf/components-nonoss.xml.in +++ /dev/null @@ -1,97 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - - - - - true - - - - - - diff --git a/client/tomcatconf/components.xml.in b/client/tomcatconf/components.xml.in deleted file mode 100755 index c62abe8ff2a..00000000000 --- a/client/tomcatconf/components.xml.in +++ /dev/null @@ -1,309 +0,0 @@ - - - - - - - - - - - - - - - true - - - - 1 - 25 - 50000 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/client/tomcatconf/log4j-cloud.xml.in b/client/tomcatconf/log4j-cloud.xml.in index 02f70998d53..086669376aa 100755 --- a/client/tomcatconf/log4j-cloud.xml.in +++ b/client/tomcatconf/log4j-cloud.xml.in @@ -105,7 +105,7 @@ under the License. - + diff --git a/client/tomcatconf/nonossComponentContext.xml.in b/client/tomcatconf/nonossComponentContext.xml.in index 5532becba18..7e3552db67e 100644 --- a/client/tomcatconf/nonossComponentContext.xml.in +++ b/client/tomcatconf/nonossComponentContext.xml.in @@ -49,7 +49,7 @@ - + @@ -58,18 +58,25 @@ - + + + + + - - + + - - + + + + + @@ -104,16 +111,21 @@ + + + + - + - + @@ -128,6 +140,10 @@ + + + + @@ -193,10 +209,11 @@ - + + @@ -224,11 +241,10 @@ - + - @@ -274,14 +290,6 @@ - - - - - - - - @@ -290,6 +298,18 @@ + + + + + + + + + + + + diff --git a/client/tomcatconf/tomcat6-nonssl.conf.in b/client/tomcatconf/tomcat6-nonssl.conf.in index d69d6ed94c0..4a9a70f619e 100644 --- a/client/tomcatconf/tomcat6-nonssl.conf.in +++ b/client/tomcatconf/tomcat6-nonssl.conf.in @@ -41,7 +41,7 @@ CATALINA_TMPDIR="@MSENVIRON@/temp" # Use JAVA_OPTS to set java.library.path for libtcnative.so #JAVA_OPTS="-Djava.library.path=/usr/lib64" -JAVA_OPTS="-Djava.awt.headless=true -Dcom.sun.management.jmxremote.port=45219 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:PermSize=256M" +JAVA_OPTS="-Djava.awt.headless=true -Dcom.sun.management.jmxremote.port=45219 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Xmx2g -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:PermSize=512M -XX:MaxPermSize=800m" # What user should run tomcat TOMCAT_USER="@MSUSER@" diff --git a/client/tomcatconf/tomcat6-ssl.conf.in b/client/tomcatconf/tomcat6-ssl.conf.in index ecb93b23abc..84b6d6275bb 100644 --- a/client/tomcatconf/tomcat6-ssl.conf.in +++ b/client/tomcatconf/tomcat6-ssl.conf.in @@ -40,7 +40,7 @@ CATALINA_TMPDIR="@MSENVIRON@/temp" # Use JAVA_OPTS to set java.library.path for libtcnative.so #JAVA_OPTS="-Djava.library.path=/usr/lib64" -JAVA_OPTS="-Djava.awt.headless=true -Djavax.net.ssl.trustStore=/etc/cloud/management/cloudmanagementserver.keystore -Djavax.net.ssl.trustStorePassword=vmops.com -Dcom.sun.management.jmxremote.port=45219 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:PermSize=256M" +JAVA_OPTS="-Djava.awt.headless=true -Djavax.net.ssl.trustStore=/etc/cloud/management/cloudmanagementserver.keystore -Djavax.net.ssl.trustStorePassword=vmops.com -Dcom.sun.management.jmxremote.port=45219 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Xmx2g -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:MaxPermSize=800m -XX:PermSize=512M" # What user should run tomcat TOMCAT_USER="@MSUSER@" diff --git a/core/pom.xml b/core/pom.xml index 3d6356e561e..0da69529400 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT @@ -31,7 +31,11 @@ cloud-api ${project.version} - + + org.apache.cloudstack + cloud-engine-api + ${project.version} + commons-httpclient commons-httpclient diff --git a/core/src/com/cloud/agent/api/NetworkRulesVmSecondaryIpCommand.java b/core/src/com/cloud/agent/api/NetworkRulesVmSecondaryIpCommand.java new file mode 100644 index 00000000000..ce4080878a1 --- /dev/null +++ b/core/src/com/cloud/agent/api/NetworkRulesVmSecondaryIpCommand.java @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api; + +import com.cloud.vm.VirtualMachine; + +public class NetworkRulesVmSecondaryIpCommand extends Command { + + private String vmName; + private VirtualMachine.Type type; + private String vmSecIp; + private String vmMac; + private String action; + + public NetworkRulesVmSecondaryIpCommand(String vmName, VirtualMachine.Type type) { + this.vmName = vmName; + this.type = type; + } + + + public NetworkRulesVmSecondaryIpCommand(String vmName, String vmMac, + String secondaryIp, boolean action) { + this.vmName = vmName; + this.vmMac = vmMac; + this.vmSecIp = secondaryIp; + if (action) { + this.action = "-A"; + } else { + this.action = "-D"; + } + } + + public String getVmName() { + return vmName; + } + + public VirtualMachine.Type getType() { + return type; + } + + public String getVmSecIp() { + return vmSecIp; + } + + public String getVmMac() { + return vmMac; + } + + public String getAction() { + return action; + } + + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java b/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java index fc7f08f76a2..7148e0710ca 100755 --- a/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java +++ b/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java @@ -600,6 +600,10 @@ public class VirtualRoutingResource implements Manager { command.add("-6", cmd.getVmIp6Address()); command.add("-u", cmd.getDuid()); } + + if (!cmd.isDefault()) { + command.add("-z"); + } final String result = command.execute(); return new Answer(cmd, result==null, result); @@ -859,35 +863,29 @@ public class VirtualRoutingResource implements Manager { } public void assignVpcIpToRouter(final String routerIP, final boolean add, final String pubIP, - final String nicname, final String gateway, final String netmask, final String subnet) throws Exception { - try { - String args = ""; + final String nicname, final String gateway, final String netmask, final String subnet) throws InternalErrorException { + String args = ""; - if (add) { - args += " -A "; - } else { - args += " -D "; - } + if (add) { + args += " -A "; + } else { + args += " -D "; + } - args += " -l "; - args += pubIP; - args += " -c "; - args += nicname; - args += " -g "; - args += gateway; - args += " -m "; - args += netmask; - args += " -n "; - args += subnet; + args += " -l "; + args += pubIP; + args += " -c "; + args += nicname; + args += " -g "; + args += gateway; + args += " -m "; + args += netmask; + args += " -n "; + args += subnet; - String result = routerProxy("vpc_ipassoc.sh", routerIP, args); - if (result != null) { - throw new InternalErrorException("KVM plugin \"vpc_ipassoc\" failed:"+result); - } - } catch (Exception e) { - String msg = "Unable to assign public IP address due to " + e.toString(); - s_logger.warn(msg, e); - throw new Exception(msg); + String result = routerProxy("vpc_ipassoc.sh", routerIP, args); + if (result != null) { + throw new InternalErrorException("KVM plugin \"vpc_ipassoc\" failed:"+result); } } diff --git a/core/src/com/cloud/alert/AlertVO.java b/core/src/com/cloud/alert/AlertVO.java index f6089d65043..3f014aa2b1f 100755 --- a/core/src/com/cloud/alert/AlertVO.java +++ b/core/src/com/cloud/alert/AlertVO.java @@ -28,9 +28,7 @@ import javax.persistence.Table; import javax.persistence.Temporal; import javax.persistence.TemporalType; -import org.apache.cloudstack.api.Identity; import com.cloud.utils.db.GenericDao; -import org.apache.cloudstack.api.InternalIdentity; @Entity @Table(name="alert") @@ -68,16 +66,19 @@ public class AlertVO implements Alert { @Temporal(TemporalType.TIMESTAMP) @Column(name="resolved", updatable=true, nullable=true) private Date resolved; - + @Column(name="uuid") private String uuid; + @Column(name="archived") + private boolean archived; + public AlertVO() { - this.uuid = UUID.randomUUID().toString(); + this.uuid = UUID.randomUUID().toString(); } public AlertVO(Long id) { this.id = id; - this.uuid = UUID.randomUUID().toString(); + this.uuid = UUID.randomUUID().toString(); } @Override @@ -103,12 +104,12 @@ public class AlertVO implements Alert { } public Long getClusterId() { - return clusterId; - } - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - @Override + return clusterId; + } + public void setClusterId(Long clusterId) { + this.clusterId = clusterId; + } + @Override public Long getPodId() { return podId; } @@ -164,10 +165,19 @@ public class AlertVO implements Alert { @Override public String getUuid() { - return this.uuid; + return this.uuid; } - + public void setUuid(String uuid) { - this.uuid = uuid; + this.uuid = uuid; + } + + @Override + public boolean getArchived() { + return archived; + } + + public void setArchived(Boolean archived) { + this.archived = archived; } } diff --git a/core/src/com/cloud/event/EventVO.java b/core/src/com/cloud/event/EventVO.java index ac46f24b2ee..2c30eadebdc 100644 --- a/core/src/com/cloud/event/EventVO.java +++ b/core/src/com/cloud/event/EventVO.java @@ -29,74 +29,75 @@ import javax.persistence.Id; import javax.persistence.Table; import javax.persistence.Transient; -import org.apache.cloudstack.api.Identity; import com.cloud.utils.db.GenericDao; -import org.apache.cloudstack.api.InternalIdentity; @Entity @Table(name="event") public class EventVO implements Event { - @Id + @Id @GeneratedValue(strategy=GenerationType.IDENTITY) @Column(name="id") - private long id = -1; + private long id = -1; - @Column(name="type") - private String type; - - @Enumerated(value=EnumType.STRING) - @Column(name="state") + @Column(name="type") + private String type; + + @Enumerated(value=EnumType.STRING) + @Column(name="state") private State state = State.Completed; - @Column(name="description", length=1024) - private String description; + @Column(name="description", length=1024) + private String description; - @Column(name=GenericDao.CREATED_COLUMN) - private Date createDate; + @Column(name=GenericDao.CREATED_COLUMN) + private Date createDate; @Column(name="user_id") private long userId; - @Column(name="account_id") - private long accountId; + @Column(name="account_id") + private long accountId; @Column(name="domain_id") private long domainId; - @Column(name="level") - private String level = LEVEL_INFO; - - @Column(name="start_id") + @Column(name="level") + private String level = LEVEL_INFO; + + @Column(name="start_id") private long startId; - @Column(name="parameters", length=1024) - private String parameters; - - @Column(name="uuid") - private String uuid; + @Column(name="parameters", length=1024) + private String parameters; - @Transient - private int totalSize; + @Column(name="uuid") + private String uuid; - public static final String LEVEL_INFO = "INFO"; - public static final String LEVEL_WARN = "WARN"; - public static final String LEVEL_ERROR = "ERROR"; - - public EventVO() { - this.uuid = UUID.randomUUID().toString(); - } - - public long getId() { - return id; - } - @Override + @Column(name="archived") + private boolean archived; + + @Transient + private int totalSize; + + public static final String LEVEL_INFO = "INFO"; + public static final String LEVEL_WARN = "WARN"; + public static final String LEVEL_ERROR = "ERROR"; + + public EventVO() { + this.uuid = UUID.randomUUID().toString(); + } + + public long getId() { + return id; + } + @Override public String getType() { - return type; - } - public void setType(String type) { - this.type = type; - } - @Override + return type; + } + public void setType(String type) { + this.type = type; + } + @Override public State getState() { return state; } @@ -105,27 +106,27 @@ public class EventVO implements Event { this.state = state; } - @Override + @Override public String getDescription() { - return description; - } - public void setDescription(String description) { - this.description = description; - } - @Override + return description; + } + public void setDescription(String description) { + this.description = description; + } + @Override public Date getCreateDate() { - return createDate; - } - public void setCreatedDate(Date createdDate) { - createDate = createdDate; - } - @Override + return createDate; + } + public void setCreatedDate(Date createdDate) { + createDate = createdDate; + } + @Override public long getUserId() { - return userId; - } - public void setUserId(long userId) { - this.userId = userId; - } + return userId; + } + public void setUserId(long userId) { + this.userId = userId; + } @Override public long getAccountId() { return accountId; @@ -165,21 +166,29 @@ public class EventVO implements Event { this.startId = startId; } - @Override + @Override public String getParameters() { - return parameters; - } - public void setParameters(String parameters) { - this.parameters = parameters; - } - - @Override - public String getUuid() { - return this.uuid; - } - - public void setUuid(String uuid) { - this.uuid = uuid; - } + return parameters; + } + public void setParameters(String parameters) { + this.parameters = parameters; + } + @Override + public String getUuid() { + return this.uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } + + @Override + public boolean getArchived() { + return archived; + } + + public void setArchived(Boolean archived) { + this.archived = archived; + } } diff --git a/core/src/com/cloud/event/dao/EventDao.java b/core/src/com/cloud/event/dao/EventDao.java index bfcb818f20f..da5f47a90b4 100644 --- a/core/src/com/cloud/event/dao/EventDao.java +++ b/core/src/com/cloud/event/dao/EventDao.java @@ -30,4 +30,9 @@ public interface EventDao extends GenericDao { public List listOlderEvents(Date oldTime); EventVO findCompletedEvent(long startId); + + public List listToArchiveOrDeleteEvents(List ids, String type, Date olderThan, Long accountId); + + public void archiveEvents(List events); + } diff --git a/core/src/com/cloud/event/dao/EventDaoImpl.java b/core/src/com/cloud/event/dao/EventDaoImpl.java index 44fbb030dcc..6ba59c56b0a 100644 --- a/core/src/com/cloud/event/dao/EventDaoImpl.java +++ b/core/src/com/cloud/event/dao/EventDaoImpl.java @@ -30,24 +30,34 @@ import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.SearchCriteria.Op; @Component @Local(value={EventDao.class}) public class EventDaoImpl extends GenericDaoBase implements EventDao { - public static final Logger s_logger = Logger.getLogger(EventDaoImpl.class.getName()); - protected final SearchBuilder CompletedEventSearch; - - public EventDaoImpl () { - CompletedEventSearch = createSearchBuilder(); - CompletedEventSearch.and("state",CompletedEventSearch.entity().getState(),SearchCriteria.Op.EQ); - CompletedEventSearch.and("startId", CompletedEventSearch.entity().getStartId(), SearchCriteria.Op.EQ); - CompletedEventSearch.done(); - } + public static final Logger s_logger = Logger.getLogger(EventDaoImpl.class.getName()); + protected final SearchBuilder CompletedEventSearch; + protected final SearchBuilder ToArchiveOrDeleteEventSearch; - @Override - public List searchAllEvents(SearchCriteria sc, Filter filter) { - return listIncludingRemovedBy(sc, filter); - } + public EventDaoImpl () { + CompletedEventSearch = createSearchBuilder(); + CompletedEventSearch.and("state",CompletedEventSearch.entity().getState(),SearchCriteria.Op.EQ); + CompletedEventSearch.and("startId", CompletedEventSearch.entity().getStartId(), SearchCriteria.Op.EQ); + CompletedEventSearch.done(); + + ToArchiveOrDeleteEventSearch = createSearchBuilder(); + ToArchiveOrDeleteEventSearch.and("id", ToArchiveOrDeleteEventSearch.entity().getId(), Op.IN); + ToArchiveOrDeleteEventSearch.and("type", ToArchiveOrDeleteEventSearch.entity().getType(), Op.EQ); + ToArchiveOrDeleteEventSearch.and("accountId", ToArchiveOrDeleteEventSearch.entity().getAccountId(), Op.EQ); + ToArchiveOrDeleteEventSearch.and("createDateL", ToArchiveOrDeleteEventSearch.entity().getCreateDate(), Op.LT); + ToArchiveOrDeleteEventSearch.done(); + } + + @Override + public List searchAllEvents(SearchCriteria sc, Filter filter) { + return listIncludingRemovedBy(sc, filter); + } @Override public List listOlderEvents(Date oldTime) { @@ -55,9 +65,8 @@ public class EventDaoImpl extends GenericDaoBase implements Event SearchCriteria sc = createSearchCriteria(); sc.addAnd("createDate", SearchCriteria.Op.LT, oldTime); return listIncludingRemovedBy(sc, null); - } - + @Override public EventVO findCompletedEvent(long startId) { SearchCriteria sc = CompletedEventSearch.create(); @@ -65,4 +74,36 @@ public class EventDaoImpl extends GenericDaoBase implements Event sc.setParameters("startId", startId); return findOneIncludingRemovedBy(sc); } + + @Override + public List listToArchiveOrDeleteEvents(List ids, String type, Date olderThan, Long accountId) { + SearchCriteria sc = ToArchiveOrDeleteEventSearch.create(); + if (ids != null) { + sc.setParameters("id", ids.toArray(new Object[ids.size()])); + } + if (type != null) { + sc.setParameters("type", type); + } + if (olderThan != null) { + sc.setParameters("createDateL", olderThan); + } + if (accountId != null) { + sc.setParameters("accountId", accountId); + } + return search(sc, null); + } + + @Override + public void archiveEvents(List events) { + + Transaction txn = Transaction.currentTxn(); + txn.start(); + for (EventVO event : events) { + event = lockRow(event.getId(), true); + event.setArchived(true); + update(event.getId(), event); + txn.commit(); + } + txn.close(); + } } diff --git a/core/src/com/cloud/hypervisor/HypervisorCapabilitiesVO.java b/core/src/com/cloud/hypervisor/HypervisorCapabilitiesVO.java index 56e8e0a734d..b525a2d05d5 100644 --- a/core/src/com/cloud/hypervisor/HypervisorCapabilitiesVO.java +++ b/core/src/com/cloud/hypervisor/HypervisorCapabilitiesVO.java @@ -59,6 +59,9 @@ public class HypervisorCapabilitiesVO implements HypervisorCapabilities { @Column(name="max_data_volumes_limit") private Integer maxDataVolumesLimit; + @Column(name="max_hosts_per_cluster") + private Integer maxHostsPerCluster; + protected HypervisorCapabilitiesVO() { this.uuid = UUID.randomUUID().toString(); } @@ -157,6 +160,15 @@ public class HypervisorCapabilitiesVO implements HypervisorCapabilities { this.maxDataVolumesLimit = maxDataVolumesLimit; } + @Override + public Integer getMaxHostsPerCluster() { + return maxHostsPerCluster; + } + + public void setMaxHostsPerCluster(Integer maxHostsPerCluster) { + this.maxHostsPerCluster = maxHostsPerCluster; + } + @Override public boolean equals(Object obj) { if (obj instanceof HypervisorCapabilitiesVO) { diff --git a/core/src/com/cloud/resource/storage/PrimaryStorageHeadResource.java b/core/src/com/cloud/resource/storage/PrimaryStorageHeadResource.java deleted file mode 100644 index 65297a39b96..00000000000 --- a/core/src/com/cloud/resource/storage/PrimaryStorageHeadResource.java +++ /dev/null @@ -1,52 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.resource.storage; - -import com.cloud.agent.api.storage.CreateAnswer; -import com.cloud.agent.api.storage.CreateCommand; -import com.cloud.agent.api.storage.DestroyAnswer; -import com.cloud.agent.api.storage.DestroyCommand; -import com.cloud.agent.api.storage.DownloadAnswer; -import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand; -import com.cloud.resource.ServerResource; - -/** - * a primary storage. - * - */ -public interface PrimaryStorageHeadResource extends ServerResource { - /** - * Downloads the template to the primary storage. - * @param cmd - * @return - */ - DownloadAnswer execute(PrimaryStorageDownloadCommand cmd); - - /** - * Creates volumes for the VM. - * @param cmd - * @return - */ - CreateAnswer execute(CreateCommand cmd); - - /** - * Destroys volumes for the VM. - * @param cmd - * @return - */ - DestroyAnswer execute(DestroyCommand cmd); -} diff --git a/core/src/com/cloud/storage/DiskOfferingVO.java b/core/src/com/cloud/storage/DiskOfferingVO.java index 5f4f18bcd34..e4fc21c7c13 100755 --- a/core/src/com/cloud/storage/DiskOfferingVO.java +++ b/core/src/com/cloud/storage/DiskOfferingVO.java @@ -311,4 +311,8 @@ public class DiskOfferingVO implements DiskOffering { public int getSortKey() { return sortKey; } + + public void setRecreatable(boolean recreatable) { + this.recreatable = recreatable; + } } diff --git a/core/src/com/cloud/storage/SnapshotVO.java b/core/src/com/cloud/storage/SnapshotVO.java index 68336cb97ec..78b96ec9779 100644 --- a/core/src/com/cloud/storage/SnapshotVO.java +++ b/core/src/com/cloud/storage/SnapshotVO.java @@ -117,7 +117,7 @@ public class SnapshotVO implements Snapshot { this.snapshotType = snapshotType; this.typeDescription = typeDescription; this.size = size; - this.state = State.Creating; + this.state = State.Allocated; this.prevSnapshotId = 0; this.hypervisorType = hypervisorType; this.version = "2.2"; @@ -175,7 +175,7 @@ public class SnapshotVO implements Snapshot { } @Override - public Type getType() { + public Type getRecurringType() { if (snapshotType < 0 || snapshotType >= Type.values().length) { return null; } @@ -248,6 +248,7 @@ public class SnapshotVO implements Snapshot { return state; } + public void setState(State state) { this.state = state; } diff --git a/core/src/com/cloud/storage/StoragePoolDiscoverer.java b/core/src/com/cloud/storage/StoragePoolDiscoverer.java index 816e899f941..c7dd362a5c3 100644 --- a/core/src/com/cloud/storage/StoragePoolDiscoverer.java +++ b/core/src/com/cloud/storage/StoragePoolDiscoverer.java @@ -19,6 +19,8 @@ package com.cloud.storage; import java.net.URI; import java.util.Map; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + import com.cloud.exception.DiscoveryException; import com.cloud.utils.component.Adapter; diff --git a/core/src/com/cloud/storage/StoragePoolVO.java b/core/src/com/cloud/storage/StoragePoolVO.java deleted file mode 100644 index af6e4e2905c..00000000000 --- a/core/src/com/cloud/storage/StoragePoolVO.java +++ /dev/null @@ -1,346 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.storage; - -import java.util.Date; -import java.util.UUID; - -import javax.persistence.Column; -import javax.persistence.Entity; -import javax.persistence.EnumType; -import javax.persistence.Enumerated; -import javax.persistence.Id; -import javax.persistence.Table; -import javax.persistence.TableGenerator; -import javax.persistence.Temporal; -import javax.persistence.TemporalType; -import javax.persistence.Transient; - -import com.cloud.storage.Storage.StoragePoolType; -import com.cloud.utils.db.GenericDao; - -@Entity -@Table(name="storage_pool") -public class StoragePoolVO implements StoragePool { - @Id - @TableGenerator(name="storage_pool_sq", table="sequence", pkColumnName="name", valueColumnName="value", pkColumnValue="storage_pool_seq", allocationSize=1) - @Column(name="id", updatable=false, nullable = false) - private long id; - - @Column(name="name", updatable=false, nullable=false, length=255) - private String name = null; - - @Column(name="uuid", length=255) - private String uuid = null; - - @Column(name="pool_type", updatable=false, nullable=false, length=32) - @Enumerated(value=EnumType.STRING) - private StoragePoolType poolType; - - @Column(name=GenericDao.CREATED_COLUMN) - Date created; - - @Column(name=GenericDao.REMOVED_COLUMN) - private Date removed; - - @Column(name="update_time", updatable=true) - @Temporal(value=TemporalType.TIMESTAMP) - private Date updateTime; - - @Column(name="data_center_id", updatable=true, nullable=false) - private long dataCenterId; - - @Column(name="pod_id", updatable=true) - private Long podId; - - @Column(name="available_bytes", updatable=true, nullable=true) - private long availableBytes; - - @Column(name="capacity_bytes", updatable=true, nullable=true) - private long capacityBytes; - - @Column(name="status", updatable=true, nullable=false) - @Enumerated(value=EnumType.STRING) - private StoragePoolStatus status; - - // TODO, disable persisency of storageProvider and storageType, javelin new code not - // sync with the schema! - - // @Column(name="storage_provider", updatable=true, nullable=false) - @Transient private String storageProvider; - - // Column(name="storage_type", nullable=false) - @Transient private String storageType; - - @Override - public long getId() { - return id; - } - - @Override - public StoragePoolStatus getStatus() { - return status; - } - - public StoragePoolVO() { - // TODO Auto-generated constructor stub - } - - @Override - public String getName() { - return name; - } - - @Override - public String getUuid() { - return uuid; - } - - @Override - public StoragePoolType getPoolType() { - return poolType; - } - - @Override - public Date getCreated() { - return created; - } - - public Date getRemoved() { - return removed; - } - - @Override - public Date getUpdateTime() { - return updateTime; - } - - @Override - public long getDataCenterId() { - return dataCenterId; - } - - @Override - public long getAvailableBytes() { - return availableBytes; - } - - @Override - public String getStorageProvider() { - return storageProvider; - } - - public void setStorageProvider(String provider) { - storageProvider = provider; - } - - @Override - public String getStorageType() { - return storageType; - } - - public void setStorageType(String type) { - storageType = type; - } - - @Override - public long getCapacityBytes() { - return capacityBytes; - } - - public void setAvailableBytes(long available) { - availableBytes = available; - } - - public void setCapacityBytes(long capacity) { - capacityBytes = capacity; - } - - @Column(name="host_address") - private String hostAddress; - - @Column(name="path") - private String path; - - @Column(name="port") - private int port; - - @Column(name="user_info") - private String userInfo; - - @Column(name="cluster_id") - private Long clusterId; - - - @Override - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - @Override - public String getHostAddress() { - return hostAddress; - } - - @Override - public String getPath() { - return path; - } - - @Override - public String getUserInfo() { - return userInfo; - } - - public StoragePoolVO(long poolId, String name, String uuid, StoragePoolType type, - long dataCenterId, Long podId, long availableBytes, long capacityBytes, String hostAddress, int port, String hostPath) { - this.name = name; - this.id = poolId; - this.uuid = uuid; - this.poolType = type; - this.dataCenterId = dataCenterId; - this.availableBytes = availableBytes; - this.capacityBytes = capacityBytes; - this.hostAddress = hostAddress; - this.path = hostPath; - this.port = port; - this.podId = podId; - this.setStatus(StoragePoolStatus.Creating); - } - - public StoragePoolVO(StoragePoolVO that) { - this(that.id, that.name, that.uuid, that.poolType, that.dataCenterId, that.podId, that.availableBytes, that.capacityBytes, that.hostAddress, that.port, that.path); - } - - public StoragePoolVO(StoragePoolType type, String hostAddress, int port, String path) { - this.poolType = type; - this.hostAddress = hostAddress; - this.port = port; - this.path = path; - this.setStatus(StoragePoolStatus.Creating); - this.uuid = UUID.randomUUID().toString(); - } - - public StoragePoolVO(StoragePoolType type, String hostAddress, int port, String path, String userInfo) { - this.poolType = type; - this.hostAddress = hostAddress; - this.port = port; - this.path = path; - this.userInfo = userInfo; - this.setStatus(StoragePoolStatus.Creating); - this.uuid = UUID.randomUUID().toString(); - } - - public void setStatus(StoragePoolStatus status) - { - this.status = status; - } - - public void setId(long id) { - this.id = id; - } - - public void setDataCenterId(long dcId) { - this.dataCenterId = dcId; - } - - public void setPodId(Long podId) { - this.podId = podId; - } - - public void setUuid(String uuid) { - this.uuid = uuid; - } - - public void setPath(String path) { - this.path = path; - } - - public void setUserInfo(String userInfo) { - this.userInfo = userInfo; - } - - @Override - public int getPort() { - return port; - } - - @Override - public boolean isShared() { - return poolType.isShared(); - } - - @Override - public boolean isLocal() { - return !poolType.isShared(); - } - - @Transient - public String toUri() { - /* - URI uri = new URI(); - try { - if (type == StoragePoolType.Filesystem) { - uri.setScheme("file"); - } else if (type == StoragePoolType.NetworkFilesystem) { - uri.setScheme("nfs"); - } else if (type == StoragePoolType.IscsiLUN) { - } - } catch (MalformedURIException e) { - throw new VmopsRuntimeException("Unable to form the uri " + id); - } - return uri.toString(); - */ - return null; - } - - @Override - public Long getPodId() { - return podId; - } - - public void setName(String name) { - this.name = name; - } - - public boolean isInMaintenance() { - return status == StoragePoolStatus.PrepareForMaintenance || status == StoragePoolStatus.Maintenance || status == StoragePoolStatus.ErrorInMaintenance || removed != null; - } - - @Override - public boolean equals(Object obj) { - if (!(obj instanceof StoragePoolVO) || obj == null) { - return false; - } - StoragePoolVO that = (StoragePoolVO)obj; - return this.id == that.id; - } - - @Override - public int hashCode() { - return new Long(id).hashCode(); - } - - @Override - public String toString() { - return new StringBuilder("Pool[").append(id).append("|").append(poolType).append("]").toString(); - } -} diff --git a/core/src/com/cloud/storage/VMTemplateHostVO.java b/core/src/com/cloud/storage/VMTemplateHostVO.java index 9eae1a00303..b8dfc41d51b 100755 --- a/core/src/com/cloud/storage/VMTemplateHostVO.java +++ b/core/src/com/cloud/storage/VMTemplateHostVO.java @@ -29,8 +29,10 @@ import javax.persistence.Table; import javax.persistence.Temporal; import javax.persistence.TemporalType; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; + import com.cloud.utils.db.GenericDaoBase; -import org.apache.cloudstack.api.InternalIdentity; /** * Join table for storage hosts and templates @@ -38,7 +40,7 @@ import org.apache.cloudstack.api.InternalIdentity; */ @Entity @Table(name="template_host_ref") -public class VMTemplateHostVO implements VMTemplateStorageResourceAssoc { +public class VMTemplateHostVO implements VMTemplateStorageResourceAssoc, DataObjectInStore { @Id @GeneratedValue(strategy=GenerationType.IDENTITY) Long id; @@ -90,6 +92,18 @@ public class VMTemplateHostVO implements VMTemplateStorageResourceAssoc { @Column(name="destroyed") boolean destroyed = false; + @Column(name="update_count", updatable = true, nullable=false) + protected long updatedCount; + + @Column(name = "updated") + @Temporal(value = TemporalType.TIMESTAMP) + Date updated; + + @Column(name = "state") + @Enumerated(EnumType.STRING) + ObjectInDataStoreStateMachine.State state; + + @Override public String getInstallPath() { return installPath; @@ -162,6 +176,7 @@ public class VMTemplateHostVO implements VMTemplateStorageResourceAssoc { super(); this.hostId = hostId; this.templateId = templateId; + this.state = ObjectInDataStoreStateMachine.State.Allocated; } public VMTemplateHostVO(long hostId, long templateId, Date lastUpdated, @@ -282,4 +297,26 @@ public class VMTemplateHostVO implements VMTemplateStorageResourceAssoc { return new StringBuilder("TmplHost[").append(id).append("-").append(templateId).append("-").append(hostId).append(installPath).append("]").toString(); } + @Override + public ObjectInDataStoreStateMachine.State getState() { + // TODO Auto-generated method stub + return this.state; + } + + public long getUpdatedCount() { + return this.updatedCount; + } + + public void incrUpdatedCount() { + this.updatedCount++; + } + + public void decrUpdatedCount() { + this.updatedCount--; + } + + public Date getUpdated() { + return updated; + } + } diff --git a/core/src/com/cloud/storage/VMTemplateStoragePoolVO.java b/core/src/com/cloud/storage/VMTemplateStoragePoolVO.java index 32c9dd2ece5..9b761764359 100644 --- a/core/src/com/cloud/storage/VMTemplateStoragePoolVO.java +++ b/core/src/com/cloud/storage/VMTemplateStoragePoolVO.java @@ -29,8 +29,11 @@ import javax.persistence.Table; import javax.persistence.Temporal; import javax.persistence.TemporalType; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; + import com.cloud.utils.db.GenericDaoBase; -import org.apache.cloudstack.api.InternalIdentity; /** * Join table for storage pools and templates @@ -38,7 +41,7 @@ import org.apache.cloudstack.api.InternalIdentity; */ @Entity @Table(name="template_spool_ref") -public class VMTemplateStoragePoolVO implements VMTemplateStorageResourceAssoc { +public class VMTemplateStoragePoolVO implements VMTemplateStorageResourceAssoc, DataObjectInStore { @Id @GeneratedValue(strategy=GenerationType.IDENTITY) long id; @@ -69,7 +72,18 @@ public class VMTemplateStoragePoolVO implements VMTemplateStorageResourceAssoc { @Column (name="template_size") long templateSize; @Column (name="marked_for_gc") boolean markedForGC; - + + @Column(name="update_count", updatable = true, nullable=false) + protected long updatedCount; + + @Column(name = "updated") + @Temporal(value = TemporalType.TIMESTAMP) + Date updated; + + @Column(name = "state") + @Enumerated(EnumType.STRING) + ObjectInDataStoreStateMachine.State state; + @Override public String getInstallPath() { return installPath; @@ -148,6 +162,7 @@ public class VMTemplateStoragePoolVO implements VMTemplateStorageResourceAssoc { this.poolId = poolId; this.templateId = templateId; this.downloadState = Status.NOT_DOWNLOADED; + this.state = ObjectInDataStoreStateMachine.State.Allocated; this.markedForGC = false; } @@ -235,4 +250,26 @@ public class VMTemplateStoragePoolVO implements VMTemplateStorageResourceAssoc { return new StringBuilder("TmplPool[").append(id).append("-").append(templateId).append("-").append("poolId").append("-").append(installPath).append("]").toString(); } + @Override + public State getState() { + return this.state; + } + + public long getUpdatedCount() { + return this.updatedCount; + } + + public void incrUpdatedCount() { + this.updatedCount++; + } + + public void decrUpdatedCount() { + this.updatedCount--; + } + + public Date getUpdated() { + return updated; + } + + } diff --git a/core/src/com/cloud/storage/VMTemplateVO.java b/core/src/com/cloud/storage/VMTemplateVO.java index fcfdd0067e1..e643d75bf1e 100755 --- a/core/src/com/cloud/storage/VMTemplateVO.java +++ b/core/src/com/cloud/storage/VMTemplateVO.java @@ -31,17 +31,18 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; import javax.persistence.Transient; -import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateState; + import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; import com.cloud.template.VirtualMachineTemplate; import com.cloud.utils.db.GenericDao; -import org.apache.cloudstack.api.InternalIdentity; +import com.cloud.utils.fsm.StateObject; @Entity @Table(name="vm_template") -public class VMTemplateVO implements VirtualMachineTemplate { +public class VMTemplateVO implements VirtualMachineTemplate, StateObject { @Id @TableGenerator(name="vm_template_sq", table="sequence", pkColumnName="name", valueColumnName="value", pkColumnValue="vm_template_seq", allocationSize=1) @Column(name="id", nullable = false) @@ -127,6 +128,22 @@ public class VMTemplateVO implements VirtualMachineTemplate { @Column(name="enable_sshkey") private boolean enableSshKey; + + @Column(name = "image_data_store_id") + private long imageDataStoreId; + + @Column(name = "size") + private Long size; + + @Column(name = "state") + private TemplateState state; + + @Column(name="update_count", updatable = true) + protected long updatedCount; + + @Column(name = "updated") + @Temporal(value = TemporalType.TIMESTAMP) + Date updated; @Transient Map details; @@ -140,8 +157,9 @@ public class VMTemplateVO implements VirtualMachineTemplate { this.uniqueName = uniqueName; } - protected VMTemplateVO() { + public VMTemplateVO() { this.uuid = UUID.randomUUID().toString(); + this.state = TemplateState.Allocated; } /** @@ -150,12 +168,14 @@ public class VMTemplateVO implements VirtualMachineTemplate { public VMTemplateVO(long id, String name, ImageFormat format, boolean isPublic, boolean featured, boolean isExtractable, TemplateType type, String url, boolean requiresHvm, int bits, long accountId, String cksum, String displayText, boolean enablePassword, long guestOSId, boolean bootable, HypervisorType hyperType, Map details) { this(id, generateUniqueName(id, accountId, name), name, format, isPublic, featured, isExtractable, type, url, null, requiresHvm, bits, accountId, cksum, displayText, enablePassword, guestOSId, bootable, hyperType, details); this.uuid = UUID.randomUUID().toString(); + this.state = TemplateState.Allocated; } public VMTemplateVO(long id, String name, ImageFormat format, boolean isPublic, boolean featured, boolean isExtractable, TemplateType type, String url, boolean requiresHvm, int bits, long accountId, String cksum, String displayText, boolean enablePassword, long guestOSId, boolean bootable, HypervisorType hyperType, String templateTag, Map details, boolean sshKeyEnabled) { this(id, name, format, isPublic, featured, isExtractable, type, url, requiresHvm, bits, accountId, cksum, displayText, enablePassword, guestOSId, bootable, hyperType, details); this.templateTag = templateTag; this.uuid = UUID.randomUUID().toString(); + this.state = TemplateState.Allocated; this.enableSshKey = sshKeyEnabled; } @@ -179,6 +199,7 @@ public class VMTemplateVO implements VirtualMachineTemplate { this.bootable = bootable; this.hypervisorType = hyperType; this.uuid = UUID.randomUUID().toString(); + this.state = TemplateState.Allocated; } // Has an extra attribute - isExtractable @@ -468,5 +489,46 @@ public class VMTemplateVO implements VirtualMachineTemplate { public void setEnableSshKey(boolean enable) { enableSshKey = enable; } + + public Long getImageDataStoreId() { + return this.imageDataStoreId; + } + + public void setImageDataStoreId(long dataStoreId) { + this.imageDataStoreId = dataStoreId; + } + + public void setSize(Long size) { + this.size = size; + } + + public Long getSize() { + return this.size; + } + + public TemplateState getState() { + return this.state; + } + + public long getUpdatedCount() { + return this.updatedCount; + } + + public void incrUpdatedCount() { + this.updatedCount++; + } + + public void decrUpdatedCount() { + this.updatedCount--; + } + + public Date getUpdated() { + return updated; + } + + public void setUpdated(Date updated) { + this.updated = updated; + } + } diff --git a/core/src/com/cloud/storage/VolumeHostVO.java b/core/src/com/cloud/storage/VolumeHostVO.java index f4fc7abc4ee..40bae499122 100755 --- a/core/src/com/cloud/storage/VolumeHostVO.java +++ b/core/src/com/cloud/storage/VolumeHostVO.java @@ -29,11 +29,13 @@ import javax.persistence.Table; import javax.persistence.Temporal; import javax.persistence.TemporalType; -//import com.cloud.storage.VMVolumeStorageResourceAssoc.Status; +import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; + import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.utils.db.GenericDaoBase; -import org.apache.cloudstack.api.InternalIdentity; /** * Join table for storage hosts and volumes @@ -41,7 +43,7 @@ import org.apache.cloudstack.api.InternalIdentity; */ @Entity @Table(name="volume_host_ref") -public class VolumeHostVO implements InternalIdentity { +public class VolumeHostVO implements InternalIdentity, DataObjectInStore { @Id @GeneratedValue(strategy=GenerationType.IDENTITY) Long id; @@ -99,6 +101,16 @@ public class VolumeHostVO implements InternalIdentity { @Column(name="destroyed") boolean destroyed = false; + @Column(name="update_count", updatable = true, nullable=false) + protected long updatedCount; + + @Column(name = "updated") + @Temporal(value = TemporalType.TIMESTAMP) + Date updated; + + @Column(name = "state") + @Enumerated(EnumType.STRING) + ObjectInDataStoreStateMachine.State state; public String getInstallPath() { return installPath; @@ -187,6 +199,7 @@ public class VolumeHostVO implements InternalIdentity { super(); this.hostId = hostId; this.volumeId = volumeId; + this.state = ObjectInDataStoreStateMachine.State.Allocated; } public VolumeHostVO(long hostId, long volumeId, long zoneId, Date lastUpdated, @@ -308,5 +321,27 @@ public class VolumeHostVO implements InternalIdentity { public String toString() { return new StringBuilder("VolumeHost[").append(id).append("-").append(volumeId).append("-").append(hostId).append(installPath).append("]").toString(); } + + public long getUpdatedCount() { + return this.updatedCount; + } + + public void incrUpdatedCount() { + this.updatedCount++; + } + + public void decrUpdatedCount() { + this.updatedCount--; + } + + public Date getUpdated() { + return updated; + } + + @Override + public ObjectInDataStoreStateMachine.State getState() { + // TODO Auto-generated method stub + return this.state; + } } diff --git a/core/src/com/cloud/storage/VolumeVO.java b/core/src/com/cloud/storage/VolumeVO.java index defc841e1e3..a287c26348b 100755 --- a/core/src/com/cloud/storage/VolumeVO.java +++ b/core/src/com/cloud/storage/VolumeVO.java @@ -32,11 +32,9 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; import javax.persistence.Transient; -import org.apache.cloudstack.api.Identity; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.GenericDao; -import org.apache.cloudstack.api.InternalIdentity; @Entity @Table(name = "volumes") @@ -69,7 +67,7 @@ public class VolumeVO implements Volume { Long deviceId = null; @Column(name = "size") - long size; + Long size; @Column(name = "folder") String folder; @@ -257,11 +255,11 @@ public class VolumeVO implements Volume { } @Override - public long getSize() { + public Long getSize() { return size; } - public void setSize(long size) { + public void setSize(Long size) { this.size = size; } diff --git a/core/src/com/cloud/storage/template/RawImageProcessor.java b/core/src/com/cloud/storage/template/RawImageProcessor.java index 7833eabcabf..a002df5c9b2 100644 --- a/core/src/com/cloud/storage/template/RawImageProcessor.java +++ b/core/src/com/cloud/storage/template/RawImageProcessor.java @@ -57,6 +57,7 @@ public class RawImageProcessor extends AdapterBase implements Processor { String imgPath = templatePath + File.separator + templateName + "." + ImageFormat.RAW.getFileExtension(); if (!_storage.exists(imgPath)) { s_logger.debug("Unable to find raw image:" + imgPath); + return null; } FormatInfo info = new FormatInfo(); info.format = ImageFormat.RAW; diff --git a/server/src/com/cloud/baremetal/DhcpServerResponse.java b/core/src/com/cloud/vm/UserVmCloneSettingVO.java similarity index 57% rename from server/src/com/cloud/baremetal/DhcpServerResponse.java rename to core/src/com/cloud/vm/UserVmCloneSettingVO.java index db46ccd1a1d..24bb1e87c3b 100644 --- a/server/src/com/cloud/baremetal/DhcpServerResponse.java +++ b/core/src/com/cloud/vm/UserVmCloneSettingVO.java @@ -14,22 +14,37 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package com.cloud.baremetal; +package com.cloud.vm; -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.BaseResponse; -import com.cloud.serializer.Param; -import com.google.gson.annotations.SerializedName; +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.Table; -public class DhcpServerResponse extends BaseResponse { - @SerializedName(ApiConstants.ID) @Param(description="the ID of the Dhcp server") - private String id; +@Entity +@Table(name="user_vm_clone_setting") +public class UserVmCloneSettingVO { + + @Column(name="vm_id") + private Long vmId; + + @Column(name="clone_type") + private String cloneType; + + public UserVmCloneSettingVO() { - public String getId() { - return id; } - public void setId(String id) { - this.id = id; + public UserVmCloneSettingVO(long id, + String cloneType) { + this.vmId = id; + this.cloneType = cloneType; + } + + public long getVmId() { + return this.vmId; + } + + public String getCloneType() { + return this.cloneType; } } diff --git a/core/src/com/cloud/vm/VirtualNetwork.java b/core/src/com/cloud/vm/VirtualNetwork.java deleted file mode 100644 index ace3b80769f..00000000000 --- a/core/src/com/cloud/vm/VirtualNetwork.java +++ /dev/null @@ -1,72 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.vm; - -import com.cloud.network.Networks.IsolationType; -import com.cloud.network.Networks.Mode; - -/** - * VirtualNetwork describes from a management level the - * machine. - */ -public class VirtualNetwork { - /** - * The gateway for this network. - */ - public String gateway; - - /** - * Netmask - */ - public String netmask; - - /** - * ip address. null if mode is DHCP. - */ - public String ip; - - /** - * Mac Address. - */ - public String mac; - - /** - * rate limit on this network. -1 if no limit. - */ - public long rate; - - /** - * tag for virtualization. - */ - public String tag; - - /** - * mode to acquire ip address. - */ - public Mode mode; - - /** - * Isolation method for networking. - */ - public IsolationType method; - - public boolean firewalled; - - public int[] openPorts; - - public int[] closedPorts; -} diff --git a/core/src/com/cloud/vm/snapshot/VMSnapshotVO.java b/core/src/com/cloud/vm/snapshot/VMSnapshotVO.java new file mode 100644 index 00000000000..03d4945fda0 --- /dev/null +++ b/core/src/com/cloud/vm/snapshot/VMSnapshotVO.java @@ -0,0 +1,224 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.vm.snapshot; + +import java.util.Date; +import java.util.UUID; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.TableGenerator; +import javax.persistence.Temporal; +import javax.persistence.TemporalType; + +import com.cloud.utils.db.GenericDao; + +@Entity +@Table(name = "vm_snapshots") +public class VMSnapshotVO implements VMSnapshot { + @Id + @TableGenerator(name = "vm_snapshots_sq", table = "sequence", pkColumnName = "name", valueColumnName = "value", pkColumnValue = "vm_snapshots_seq", allocationSize = 1) + @GeneratedValue(strategy = GenerationType.TABLE) + @Column(name = "id") + long id; + + @Column(name = "uuid") + String uuid = UUID.randomUUID().toString(); + + @Column(name = "name") + String name; + + @Column(name = "display_name") + String displayName; + + @Column(name = "description") + String description; + + @Column(name = "vm_id") + long vmId; + + @Column(name = "account_id") + long accountId; + + @Column(name = "domain_id") + long domainId; + + @Column(name = "vm_snapshot_type") + @Enumerated(EnumType.STRING) + VMSnapshot.Type type; + + @Column(name = "state", updatable = true, nullable = false) + @Enumerated(value = EnumType.STRING) + private State state; + + @Column(name = GenericDao.CREATED_COLUMN) + Date created; + + @Column(name = GenericDao.REMOVED_COLUMN) + Date removed; + + @Column(name = "current") + Boolean current; + + @Column(name = "parent") + Long parent; + + @Column(name = "updated") + @Temporal(value = TemporalType.TIMESTAMP) + Date updated; + + @Column(name="update_count", updatable = true, nullable=false) + protected long updatedCount; + + public Long getParent() { + return parent; + } + + public void setParent(Long parent) { + this.parent = parent; + } + + public VMSnapshotVO() { + + } + + public Date getRemoved() { + return removed; + } + + public VMSnapshotVO(Long accountId, Long domainId, Long vmId, + String description, String vmSnapshotName, String vsDisplayName, + Long serviceOfferingId, Type type, Boolean current) { + this.accountId = accountId; + this.domainId = domainId; + this.vmId = vmId; + this.state = State.Allocated; + this.description = description; + this.name = vmSnapshotName; + this.displayName = vsDisplayName; + this.type = type; + this.current = current; + } + + public String getDescription() { + return description; + } + + @Override + public Date getCreated() { + return created; + } + + public void setCreated(Date created) { + this.created = created; + } + + @Override + public long getId() { + return id; + } + + @Override + public Long getVmId() { + return vmId; + } + + public void setVmId(Long vmId) { + this.vmId = vmId; + } + + @Override + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Override + public State getState() { + return state; + } + + public void setState(State state) { + this.state = state; + } + + @Override + public String getUuid() { + return uuid; + } + + @Override + public long getAccountId() { + return accountId; + } + + @Override + public long getDomainId() { + return domainId; + } + + @Override + public String getDisplayName() { + return displayName; + } + + public void setDisplayName(String displayName) { + this.displayName = displayName; + } + + public Boolean getCurrent() { + return current; + } + + public void setCurrent(Boolean current) { + this.current = current; + } + + @Override + public long getUpdatedCount() { + return updatedCount; + } + + @Override + public void incrUpdatedCount() { + this.updatedCount++; + } + + @Override + public Date getUpdated() { + return updated; + } + + @Override + public Type getType() { + return type; + } + + public void setRemoved(Date removed) { + this.removed = removed; + } +} diff --git a/debian/README b/debian/README deleted file mode 100644 index cbfbf1bb9d9..00000000000 --- a/debian/README +++ /dev/null @@ -1,6 +0,0 @@ -The Debian Package ----------------------------- - -This is part of the Cloud Stack collection of packages. - - -- Manuel Amador (Rudd-O) Thu, 25 Mar 2010 15:12:06 -0700 diff --git a/debian/changelog b/debian/changelog index c3243aad5e3..cbbaad32cad 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +cloudstack (4.1.0-incubating-0.0.snapshot) unstable; urgency=low + + * Incorporate incubating into version, remove epoch + + -- Noa Resare Tue, 05 Feb 2013 18:05:28 +0000 + cloud (1:4.0.0-1) unstable; urgency=low * Bumping the version to 4.0.0 diff --git a/debian/cloud-agent.config b/debian/cloud-agent.config deleted file mode 100644 index 00ae6c00d2e..00000000000 --- a/debian/cloud-agent.config +++ /dev/null @@ -1,17 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - diff --git a/debian/cloud-cli.install b/debian/cloud-cli.install deleted file mode 100644 index ce178461150..00000000000 --- a/debian/cloud-cli.install +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -/etc/cloud/cli/commands.xml -/usr/bin/cloud-grab-dependent-library-versions -/usr/bin/cloud-tool -/usr/bin/cloudvoladm diff --git a/debian/cloud-client.install b/debian/cloud-client.install deleted file mode 100644 index aadb145ae1c..00000000000 --- a/debian/cloud-client.install +++ /dev/null @@ -1,58 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -/etc/cloud/management/catalina.policy -/etc/cloud/management/catalina.properties -/etc/cloud/management/commands.properties -/etc/cloud/management/components.xml -/etc/cloud/management/context.xml -/etc/cloud/management/db.properties -/etc/cloud/management/environment.properties -/etc/cloud/management/ehcache.xml -/etc/cloud/management/log4j-cloud.xml -/etc/cloud/management/logging.properties -/etc/cloud/management/server.xml -/etc/cloud/management/tomcat6.conf -/etc/cloud/management/classpath.conf -/etc/cloud/management/tomcat-users.xml -/etc/cloud/management/web.xml -/etc/cloud/management/server-nonssl.xml -/etc/cloud/management/tomcat6-nonssl.conf -/etc/cloud/management/virtualrouter_commands.properties -/etc/cloud/management/f5bigip_commands.properties -/etc/cloud/management/junipersrx_commands.properties -/etc/cloud/management/netscalerloadbalancer_commands.properties -/etc/cloud/management/cisconexusvsm_commands.properties -/etc/cloud/management/Catalina -/etc/cloud/management/Catalina/localhost -/etc/cloud/management/Catalina/localhost/client -/etc/init.d/cloud-management -/usr/share/cloud/management/bin -/usr/share/cloud/management/conf -/usr/share/cloud/management/lib -/usr/share/cloud/management/logs -/usr/share/cloud/management/temp -/usr/share/cloud/management/work -/var/cache/cloud/management -/var/cache/cloud/management/work -/var/cache/cloud/management/temp -/var/log/cloud/management -/var/lib/cloud/mnt -/var/lib/cloud/management -/usr/bin/cloud-setup-management -/usr/bin/cloud-update-xenserver-licenses -/etc/cloud/management/commands-ext.properties diff --git a/debian/cloud-client.postinst b/debian/cloud-client.postinst deleted file mode 100644 index 87c7610320c..00000000000 --- a/debian/cloud-client.postinst +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/sh -e -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -case "$1" in - configure) - if ! id cloud > /dev/null 2>&1 ; then - adduser --system --home /var/lib/cloud/management --no-create-home \ - --group --disabled-password --shell /bin/sh cloud - # update me in all the .postinst that you can find me in, as well - fi - - for i in /var/lib/cloud/mnt /var/cache/cloud/management \ - /var/cache/cloud/management/work /var/cache/cloud/management/temp \ - /var/log/cloud/management /etc/cloud/management/Catalina \ - /etc/cloud/management/Catalina/localhost /var/lib/cloud/management /etc/cloud/management/Catalina/localhost/client - do - chmod 0770 $i - chgrp cloud $i - done - - for i in /etc/cloud/management/db.properties - do - chmod 0640 $i - chgrp cloud $i - done - - if [ "$2" = "" ] ; then # no recently configured version, this is a first install - /usr/sbin/update-rc.d cloud-management defaults || true - fi - - ;; -esac - -#DEBHELPER# diff --git a/debian/cloud-core.install b/debian/cloud-core.install deleted file mode 100644 index 00a43d48259..00000000000 --- a/debian/cloud-core.install +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -/usr/share/java/cloud-core.jar - diff --git a/debian/cloud-deps.install b/debian/cloud-deps.install deleted file mode 100644 index 74aade12543..00000000000 --- a/debian/cloud-deps.install +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -/usr/share/java/ehcache-1.5.0.jar -/usr/share/java/mail-1.4.jar -/usr/share/java/httpcore-4.0.jar -/usr/share/java/log4j-*.jar -/usr/share/java/apache-log4j-extras-1.1.jar -/usr/share/java/trilead-ssh2-build213-svnkit-1.3-patch.jar -/usr/share/java/xmlrpc-common-3.*.jar -/usr/share/java/xmlrpc-client-3.*.jar -/usr/share/java/jstl-1.2.jar -/usr/share/java/axis2-1.5.1.jar -/usr/share/java/wsdl4j-1.6.2.jar -/usr/share/java/bcprov-*.jar -/usr/share/java/jasypt-1.*.jar -/usr/share/java/ejb-api-3.0.jar -/usr/share/java/javax.persistence-2.0.0.jar -/usr/share/java/gson-1.7.1.jar -/usr/share/java/xapi-5.6.100-1-SNAPSHOT.jar diff --git a/debian/cloud-python.install b/debian/cloud-python.install deleted file mode 100644 index b8eac722a6d..00000000000 --- a/debian/cloud-python.install +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -/usr/lib/python*/dist-packages/cloud* diff --git a/debian/cloud-scripts.install b/debian/cloud-scripts.install deleted file mode 100644 index 5e8896d43a3..00000000000 --- a/debian/cloud-scripts.install +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -/usr/lib/cloud/common/scripts/installer/* -/usr/lib/cloud/common/scripts/network/* -/usr/lib/cloud/common/scripts/storage/* -/usr/lib/cloud/common/scripts/util/* -/usr/lib/cloud/common/scripts/vm/network/* -/usr/lib/cloud/common/scripts/vm/systemvm/* -/usr/lib/cloud/common/scripts/vm/pingtest.sh -/usr/lib/cloud/common/scripts/vm/hypervisor/kvm/* -/usr/lib/cloud/common/scripts/vm/hypervisor/versions.sh -/usr/lib/cloud/common/scripts/vm/hypervisor/xenserver/* diff --git a/debian/cloud-server.install b/debian/cloud-server.install deleted file mode 100644 index f792cc2f7cd..00000000000 --- a/debian/cloud-server.install +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -/usr/share/java/cloud-server.jar -/usr/share/java/cloud-ovm.jar -/etc/cloud/server/* -/usr/share/java/cloud-dp-user-concentrated-pod.jar -/usr/share/java/cloud-dp-user-dispersing.jar -/usr/share/java/cloud-host-allocator-random.jar -/usr/share/java/cloud-plugin-elb.jar -/usr/share/java/cloud-plugin-ovs.jar -/usr/share/java/cloud-plugin-nicira-nvp.jar -/usr/share/java/cloud-plugin-bigswitch-vns.jar -/usr/share/java/cloud-storage-allocator-random.jar -/usr/share/java/cloud-user-authenticator-ldap.jar -/usr/share/java/cloud-user-authenticator-md5.jar -/usr/share/java/cloud-user-authenticator-plaintext.jar -/usr/share/java/cloud-plugin-hypervisor-xen.jar diff --git a/debian/cloud-system-iso.install b/debian/cloud-system-iso.install deleted file mode 100644 index 5a0b6364f4c..00000000000 --- a/debian/cloud-system-iso.install +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -/usr/lib/cloud/common/vms/systemvm.iso diff --git a/debian/cloud-usage.install b/debian/cloud-usage.install deleted file mode 100644 index 22f58344102..00000000000 --- a/debian/cloud-usage.install +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -/usr/share/java/cloud-usage.jar -/etc/init.d/cloud-usage -/var/log/cloud/usage -/etc/cloud/usage/usage-components.xml -/etc/cloud/usage/log4j-cloud_usage.xml -/etc/cloud/usage/db.properties diff --git a/debian/cloud-utils.install b/debian/cloud-utils.install deleted file mode 100644 index 39c357a1fa6..00000000000 --- a/debian/cloud-utils.install +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -/usr/share/java/cloud-utils.jar -/usr/share/java/cloud-api.jar -/usr/share/doc/cloud/version-info -/usr/bin/cloud-sccs -/usr/bin/cloud-gitrevs diff --git a/debian/cloud-agent.install b/debian/cloudstack-agent.install similarity index 77% rename from debian/cloud-agent.install rename to debian/cloudstack-agent.install index c67e90ab492..b1425717584 100644 --- a/debian/cloud-agent.install +++ b/debian/cloudstack-agent.install @@ -15,11 +15,12 @@ # specific language governing permissions and limitations # under the License. -/etc/cloud/agent/agent.properties -/etc/cloud/agent/developer.properties.template -/etc/cloud/agent/environment.properties -/etc/cloud/agent/log4j-cloud.xml -/etc/init.d/cloud-agent +/etc/cloudstack/agent/agent.properties +/etc/cloudstack/agent/environment.properties +/etc/cloudstack/agent/log4j-cloud.xml +/etc/init.d/cloudstack-agent /usr/bin/cloud-setup-agent /usr/bin/cloud-ssh -/var/log/cloud/agent +/var/log/cloudstack/agent +/usr/share/cloudstack-agent/lib/* +/usr/share/cloudstack-agent/plugins \ No newline at end of file diff --git a/debian/cloud-usage.postinst b/debian/cloudstack-agent.postinst similarity index 58% rename from debian/cloud-usage.postinst rename to debian/cloudstack-agent.postinst index 56f895df6e0..499ae6a695a 100644 --- a/debian/cloud-usage.postinst +++ b/debian/cloudstack-agent.postinst @@ -1,4 +1,5 @@ -#!/bin/sh -e +#!/bin/bash + # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -6,9 +7,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -16,32 +17,24 @@ # specific language governing permissions and limitations # under the License. +set -e + case "$1" in configure) + OLDCONFDIR="/etc/cloud/agent" + NEWCONFDIR="/etc/cloudstack/agent" + CONFFILES="agent.properties log4j.xml log4j-cloud.xml" - if ! id cloud > /dev/null 2>&1 ; then - adduser --system --home /var/lib/cloud/management --no-create-home \ - --group --disabled-password --shell /bin/sh cloud - # update me in cloud-client.postinst as well + # Copy old configuration so the admin doesn't have to do that + # Only do so when we are installing for the first time + if [ -z "$2" ]; then + for FILE in $CONFFILES; do + if [ -f "$OLDCONFDIR/${FILE}" ]; then + cp -a $OLDCONFDIR/$FILE $NEWCONFDIR/$FILE + fi + done fi - - for i in /var/log/cloud/usage - do - chmod 0770 $i - chgrp cloud $i - done - - for i in /etc/cloud/usage/db.properties - do - chmod 0640 $i - chgrp cloud $i - done - - if [ "$2" = "" ] ; then # no recently configured version, this is a first install - /usr/sbin/update-rc.d cloud-usage defaults || true - fi - - ;; + ;; esac -#DEBHELPER# +exit 0 \ No newline at end of file diff --git a/debian/cloud-cli.config b/debian/cloudstack-awsapi.install similarity index 96% rename from debian/cloud-cli.config rename to debian/cloudstack-awsapi.install index 00ae6c00d2e..02ba66829ef 100644 --- a/debian/cloud-cli.config +++ b/debian/cloudstack-awsapi.install @@ -15,3 +15,4 @@ # specific language governing permissions and limitations # under the License. +/var/log/cloudstack/awsapi \ No newline at end of file diff --git a/debian/cloud-management.config b/debian/cloudstack-cli.install similarity index 97% rename from debian/cloud-management.config rename to debian/cloudstack-cli.install index 00ae6c00d2e..287f9b1f651 100644 --- a/debian/cloud-management.config +++ b/debian/cloudstack-cli.install @@ -13,5 +13,4 @@ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations -# under the License. - +# under the License. \ No newline at end of file diff --git a/debian/cloud-agent-libs.install b/debian/cloudstack-common.install similarity index 54% rename from debian/cloud-agent-libs.install rename to debian/cloudstack-common.install index ba25935588c..9677f871cf0 100644 --- a/debian/cloud-agent-libs.install +++ b/debian/cloudstack-common.install @@ -15,5 +15,17 @@ # specific language governing permissions and limitations # under the License. -/usr/share/java/cloud-agent.jar -/usr/share/java/cloud-plugin-hypervisor-kvm.jar +/usr/share/cloudstack-common/vms/systemvm.iso +/usr/share/cloudstack-common/scripts/installer/* +/usr/share/cloudstack-common/scripts/network/* +/usr/share/cloudstack-common/scripts/storage/* +/usr/share/cloudstack-common/scripts/util/* +/usr/share/cloudstack-common/scripts/vm/network/* +/usr/share/cloudstack-common/scripts/vm/systemvm/* +/usr/share/cloudstack-common/scripts/vm/pingtest.sh +/usr/share/cloudstack-common/scripts/vm/hypervisor/kvm/* +/usr/share/cloudstack-common/scripts/vm/hypervisor/versions.sh +/usr/share/cloudstack-common/scripts/vm/hypervisor/xenserver/* +/usr/bin/cloud-set-guest-password +/usr/bin/cloud-set-guest-sshkey +/usr/lib/python2.?/*-packages/* diff --git a/debian/cloud-client.config b/debian/cloudstack-docs.install similarity index 97% rename from debian/cloud-client.config rename to debian/cloudstack-docs.install index 00ae6c00d2e..287f9b1f651 100644 --- a/debian/cloud-client.config +++ b/debian/cloudstack-docs.install @@ -13,5 +13,4 @@ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations -# under the License. - +# under the License. \ No newline at end of file diff --git a/debian/cloud-setup.install b/debian/cloudstack-management.install similarity index 68% rename from debian/cloud-setup.install rename to debian/cloudstack-management.install index 5c37c64920d..12478e132a1 100644 --- a/debian/cloud-setup.install +++ b/debian/cloudstack-management.install @@ -15,11 +15,17 @@ # specific language governing permissions and limitations # under the License. +/etc/cloudstack/server/* +/etc/cloudstack/management/* +/etc/init.d/cloudstack-management +/var/cache/cloudstack/management +/var/cache/cloudstack/management/work +/var/cache/cloudstack/management/temp +/var/log/cloudstack/management +/var/lib/cloudstack/mnt +/var/lib/cloudstack/management +/usr/bin/cloud-update-xenserver-licenses +/usr/bin/cloud-setup-management /usr/bin/cloud-setup-databases /usr/bin/cloud-migrate-databases -/usr/bin/cloud-set-guest-password -/usr/bin/cloud-set-guest-sshkey -/usr/share/cloud/setup/*.sql -/usr/share/cloud/setup/*.sh -/usr/share/cloud/setup/server-setup.xml -/usr/share/cloud/setup/db/*.sql +/usr/share/cloudstack-management/* diff --git a/debian/cloud-agent.postinst b/debian/cloudstack-management.postinst similarity index 72% rename from debian/cloud-agent.postinst rename to debian/cloudstack-management.postinst index f022f6d16bc..7b6a1ed6d71 100644 --- a/debian/cloud-agent.postinst +++ b/debian/cloudstack-management.postinst @@ -16,19 +16,11 @@ # specific language governing permissions and limitations # under the License. -case "$1" in - configure) - - for i in /var/log/cloud/agent - do - chmod 0770 $i - done - - if [ "$2" = "" ] ; then # no recently configured version, this is a first install - /usr/sbin/update-rc.d cloud-agent defaults || true - fi - - ;; -esac - -#DEBHELPER# +if [ "$1" = configure ]; then + if ! getent passwd cloud >/dev/null; then + adduser --quiet --system --group --no-create-home --home /var/lib/cloudstack/management cloud + else + usermod -m -d /var/lib/cloudstack/management cloud + fi + chown cloud /var/log/cloudstack/management +fi \ No newline at end of file diff --git a/debian/cloud-agent-deps.install b/debian/cloudstack-usage.install similarity index 83% rename from debian/cloud-agent-deps.install rename to debian/cloudstack-usage.install index b05b7d1d9d4..4722e688dcd 100644 --- a/debian/cloud-agent-deps.install +++ b/debian/cloudstack-usage.install @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -15,4 +15,8 @@ # specific language governing permissions and limitations # under the License. -/usr/share/java/libvirt-0.4.9.jar +/usr/share/cloudstack-usage/lib/* +/usr/share/cloudstack-usage/plugins +/etc/init.d/cloudstack-usage +/var/log/cloudstack/usage +/etc/cloudstack/usage/* diff --git a/debian/control b/debian/control index 380b2e4a78d..8f82fc3ab2f 100644 --- a/debian/control +++ b/debian/control @@ -1,118 +1,51 @@ -Source: cloud +Source: cloudstack Section: libs Priority: extra Maintainer: Wido den Hollander -Build-Depends: debhelper (>= 7), openjdk-6-jdk, tomcat6, libws-commons-util-java, libcommons-codec-java (>= 1.5), libcommons-httpclient-java (>= 3.1), libservlet2.5-java, genisoimage, python-mysqldb, maven3 | maven (>= 3), liblog4j1.2-java (>= 1.2.16) +Build-Depends: debhelper (>= 7), openjdk-6-jdk | openjdk-7-jdk, tomcat6, genisoimage, + python-mysqldb, maven3 | maven (>= 3), python (>= 2.6.6-3~) Standards-Version: 3.8.1 Homepage: http://www.cloudstack.org/ -Package: cloud-deps -Architecture: any -Depends: openjdk-6-jre, libcommons-codec-java (>= 1.5), libcommons-httpclient-java (>= 3.1) -Description: CloudStack library dependencies - This package contains a number of third-party dependencies - not shipped by distributions, required to run the CloudStack - Management Server. +Package: cloudstack-common +Architecture: all +Depends: bash, genisoimage +Conflicts: cloud-scripts, cloud-utils, cloud-system-iso, cloud-console-proxy, cloud-daemonize, cloud-deps, cloud-python, cloud-setup +Description: A common package which contains files which are shared by several CloudStack packages -Package: cloud-agent-deps -Architecture: any -Depends: openjdk-6-jre, cloud-deps (= ${source:Version}) -Description: CloudStack agent library dependencies - This package contains a number of third-party dependencies - not shipped by distributions, required to run the CloudStack - Agent. - -Package: cloud-utils -Architecture: any -Depends: openjdk-6-jre, python, libcglib-java (>= 2.2.2), libjsch-java (>= 0.1.42), libbackport-util-concurrent-java (>= 3.1), libcommons-dbcp-java (>= 1.4), libcommons-pool-java (>= 1.5.6) -Description: CloudStack utility library - The CloudStack utility libraries provide a set of Java classes used - in the CloudStack environment. - -Package: cloud-client-ui -Architecture: any -Depends: openjdk-6-jre, cloud-client (= ${source:Version}) -Description: CloudStack management server UI - The CloudStack management server is the central point of coordination, - management, and intelligence in the CloudStack Cloud Stack. This package - is a requirement of the cloud-client package, which installs the - CloudStack management server. - -Package: cloud-server -Architecture: any -Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-deps (= ${source:Version}), cloud-scripts (= ${source:Version}), libservlet2.5-java +Package: cloudstack-management +Architecture: all +Depends: cloudstack-common (= ${source:Version}), tomcat6, sysvinit-utils, chkconfig, sudo, jsvc, python-mysqldb, python-paramiko, augeas-tools +Conflicts: cloud-server, cloud-client, cloud-client-ui Description: CloudStack server library - The CloudStack server libraries provide a set of Java classes used - in the CloudStack management server. + The CloudStack management server -Package: cloud-scripts -Replaces: cloud-agent-scripts -Architecture: any -Depends: openjdk-6-jre, python, bash, bzip2, gzip, unzip, nfs-common, openssh-client -Description: CloudStack scripts - This package contains a number of scripts needed for the CloudStack Agent and Management Server. - Both the CloudStack Agent and Management server depend on this package - -Package: cloud-core -Architecture: any -Depends: openjdk-6-jre, cloud-utils (= ${source:Version}) -Description: CloudStack core library - The CloudStack core libraries provide a set of Java classes used - in the CloudStack Cloud Stack. - - -Package: cloud-client -Architecture: any -Depends: openjdk-6-jre, cloud-deps (= ${source:Version}), cloud-utils (= ${source:Version}), cloud-server (= ${source:Version}), cloud-client-ui (= ${source:Version}), cloud-setup (= ${source:Version}), cloud-python (= ${source:Version}), tomcat6, libws-commons-util-java, sysvinit-utils, chkconfig, sudo, jsvc, python-mysqldb, python-paramiko, augeas-tools, genisoimage, cloud-system-iso, libmysql-java (>= 5.1) -Description: CloudStack client - The CloudStack management server is the central point of coordination, - management, and intelligence in the CloudStack Cloud Stack. This package - is required for the management server to work. - -Package: cloud-setup -Architecture: any -Depends: openjdk-6-jre, python, cloud-utils (= ${source:Version}), cloud-deps (= ${source:Version}), cloud-server (= ${source:Version}), cloud-python (= ${source:Version}), python-mysqldb -Description: CloudStack client - The CloudStack setup tools let you set up your Management Server and Usage Server. - -Package: cloud-python -Architecture: any -Depends: python -Description: CloudStack Python library - The CloudStack Python library contains a few Python modules that the - CloudStack uses. - -Package: cloud-agent-libs -Architecture: any -Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-agent-deps (= ${source:Version}) -Description: CloudStack agent libraries - The CloudStack agent libraries are used by the Cloud Agent. - -Package: cloud-agent -Architecture: any -Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-agent-deps (= ${source:Version}), cloud-python (= ${source:Version}), cloud-agent-libs (= ${source:Version}), cloud-scripts (= ${source:Version}), cloud-system-iso (= ${source:Version}), libvirt0, sysvinit-utils, chkconfig, qemu-kvm, libvirt-bin, uuid-runtime, rsync, grep, iproute, ebtables, vlan, liblog4j1.2-java (>= 1.2.16), libjna-java, wget, jsvc, lsb-base (>= 3.2) +Package: cloudstack-agent +Architecture: all +Depends: openjdk-6-jre | openjdk-7-jre, cloudstack-common (= ${source:Version}), lsb-base (>= 3.2), libcommons-daemon-java, libjna-java, openssh-client, libvirt0, sysvinit-utils, chkconfig, qemu-kvm, libvirt-bin, uuid-runtime, rsync, grep, iproute, perl-base, perl-modules, ebtables, vlan, wget, jsvc +Conflicts: cloud-agent, cloud-agent-libs, cloud-agent-deps, cloud-agent-scripts Description: CloudStack agent The CloudStack agent is in charge of managing shared computing resources in a CloudStack powered cloud. Install this package if this computer will participate in your cloud as a KVM HyperVisor. -Package: cloud-system-iso -Architecture: any -Description: CloudStack system iso - The CloudStack agent is in charge of managing shared computing resources in - a CloudStack powered cloud. Install this package if this computer - will participate in your cloud. - -Package: cloud-usage -Architecture: any -Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-deps (= ${source:Version}), cloud-server (= ${source:Version}), cloud-setup (= ${source:Version}), cloud-client (= ${source:Version}), jsvc +Package: cloudstack-usage +Architecture: all +Depends: openjdk-6-jre | openjdk-7-jre, cloudstack-common (= ${source:Version}), jsvc Description: CloudStack usage monitor The CloudStack usage monitor provides usage accounting across the entire cloud for cloud operators to charge based on usage parameters. -Package: cloud-cli -Provides: cloud-cli -Architecture: any -Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-deps (= ${source:Version}), cloud-server (= ${source:Version}), cloud-setup (= ${source:Version}), cloud-client (= ${source:Version}) -Description: CloudStack commandline tool - The CloudStack commandline tool for invoking APi +Package: cloudstack-awsapi +Architecture: all +Depends: cloudstack-common (= ${source:Version}), cloudstack-management (= ${source:Version}) +Description: CloudStack Amazon EC2 API + +Package: cloudstack-cli +Architecture: all +Depends: cloudstack-common (= ${source:Version}) +Description: The CloudStack CLI called CloudMonkey + +Package: cloudstack-docs +Architecture: all +Description: The CloudStack documentation diff --git a/debian/rules b/debian/rules index 36b611ddc04..7fc6425b34f 100755 --- a/debian/rules +++ b/debian/rules @@ -10,6 +10,12 @@ # Modified to make a template file for a multi-binary package with separated # build-arch and build-indep targets by Bill Allombert 2001 +DEBVERS := $(shell dpkg-parsechangelog | sed -n -e 's/^Version: //p') +VERSION := $(shell echo '$(DEBVERS)' | sed -e 's/^[[:digit:]]*://' -e 's/[~-].*//') +PACKAGE = $(shell dh_listpackages|head -n 1|cut -d '-' -f 1) +SYSCONFDIR = "/etc" +DESTDIR = "debian/tmp" + # Uncomment this to turn on verbose mode. export DH_VERBOSE=1 @@ -19,55 +25,122 @@ export DH_OPTIONS configure: configure-stamp configure-stamp: dh_testdir - cp packaging/debian/replace.properties build/replace.properties - echo VERSION=$VERSION >> build/replace.properties + cp packaging/debian/replace.properties replace.properties.tmp + echo VERSION=${VERSION} >> replace.properties.tmp touch configure-stamp -build: build-arch +build: build-indep -build-arch: build-arch-stamp -build-arch-stamp: configure-stamp - mvn package -Dsystemvm +build-indep: build-indep-stamp + +build-indep-stamp: configure + mvn package -DskipTests -Dsystemvm \ + -Dcs.replace.properties=replace.properties.tmp touch $@ clean: dh_testdir dh_testroot rm -f build-arch-stamp build-indep-stamp configure-stamp + rm -f replace.properties.tmp dh_clean install: dh_testdir dh_testroot dh_prep -s - mkdir -p debian/tmp/usr/bin - mkdir -p debian/tmp/usr/share/cloud/management - mkdir -p debian/tmp/var/log/cloud - mkdir debian/tmp/var/log/cloud/managament - mkdir debian/tmp/var/log/cloud/awsapi - mkdir debian/tmp/var/log/cloud/agent - mkdir debian/tmp/var/log/cloud/ipallocator - mkdir debian/tmp/var/log/cloud/usage - mkdir -p debian/tmp/etc/cloud - mkdir debian/tmp/etc/cloud/agent - mkdir debian/tmp/etc/cloud/server - mkdir debian/tmp/etc/cloud/management - mkdir debian/tmp/etc/cloud/usage - mkdir -p debian/tmp/var/cache/cloud - mkdir debian/tmp/var/cache/cloud/management - mkdir -p debian/tmp/usr/share/cloud - mkdir debian/tmp/usr/share/cloud/setup - mkdir -p debian/tmp/usr/share/cloud/management/webapps/client - - cp -r client/target/utilities/scripts/db/* debian/tmp/usr/share/cloud/setup/ - cp -r client/target/cloud-client-ui-4.1.0-SNAPSHOT/* debian/tmp/usr/share/cloud/management/webapps/client/ - dh_installdirs -s - dh_install -s -binary: binary-common -binary-common: - dh_testdir - dh_testroot + # Common packages + mkdir -p $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE) + mkdir -p $(DESTDIR)/$(SYSCONFDIR)/init.d + mkdir -p $(DESTDIR)/var/cache/$(PACKAGE) + mkdir -p $(DESTDIR)/var/log/$(PACKAGE) + mkdir -p $(DESTDIR)/var/lib/$(PACKAGE) + mkdir -p $(DESTDIR)/usr/bin + mkdir -p $(DESTDIR)/usr/share + + # cloudstack-agent + mkdir $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/agent + mkdir $(DESTDIR)/var/log/$(PACKAGE)/agent + mkdir $(DESTDIR)/usr/share/$(PACKAGE)-agent + mkdir $(DESTDIR)/usr/share/$(PACKAGE)-agent/plugins + install -D agent/target/cloud-agent-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/$(PACKAGE)-agent.jar + install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/ + install -D plugins/hypervisors/kvm/target/dependencies/* $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/ + install -D packaging/debian/init/cloud-agent $(DESTDIR)/$(SYSCONFDIR)/init.d/$(PACKAGE)-agent + install -D agent/bindir/cloud-setup-agent.in $(DESTDIR)/usr/bin/cloud-setup-agent + install -D agent/bindir/cloud-ssh.in $(DESTDIR)/usr/bin/cloud-ssh + install -D agent/target/transformed/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/agent + + # cloudstack-management + mkdir $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server + mkdir $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management + mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management + mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/webapps/client + mkdir $(DESTDIR)/usr/share/$(PACKAGE)-management/setup + mkdir $(DESTDIR)/var/log/$(PACKAGE)/management + mkdir $(DESTDIR)/var/cache/$(PACKAGE)/management + mkdir $(DESTDIR)/var/cache/$(PACKAGE)/management/work + mkdir $(DESTDIR)/var/cache/$(PACKAGE)/management/temp + mkdir $(DESTDIR)/var/log/$(PACKAGE)/ipallocator + mkdir $(DESTDIR)/var/lib/$(PACKAGE)/management + mkdir $(DESTDIR)/var/lib/$(PACKAGE)/mnt + cp -r client/target/utilities/scripts/db/* $(DESTDIR)/usr/share/$(PACKAGE)-management/setup/ + cp -r client/target/cloud-client-ui-$(VERSION)-SNAPSHOT/* $(DESTDIR)/usr/share/$(PACKAGE)-management/webapps/client/ + cp server/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/ + cp client/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/ + ln -s tomcat6-nonssl.conf $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/tomcat6.conf + mkdir -p $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/Catalina/localhost/client + install -D packaging/debian/init/cloud-management $(DESTDIR)/$(SYSCONFDIR)/init.d/$(PACKAGE)-management + install -D client/bindir/cloud-update-xenserver-licenses.in $(DESTDIR)/usr/bin/cloud-update-xenserver-licenses + install -D server/target/cloud-server-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/$(PACKAGE)-server.jar + ln -s /usr/share/tomcat6/bin $(DESTDIR)/usr/share/$(PACKAGE)-management/bin + ln -s ../../..$(SYSCONFDIR)/$(PACKAGE)/management $(DESTDIR)/usr/share/$(PACKAGE)-management/conf + ln -s /usr/share/tomcat6/lib $(DESTDIR)/usr/share/$(PACKAGE)-management/lib + ln -s ../../../var/log/$(PACKAGE)/management $(DESTDIR)/usr/share/$(PACKAGE)-management/logs + ln -s ../../../var/cache/$(PACKAGE)/management/temp $(DESTDIR)/usr/share/$(PACKAGE)-management/temp + ln -s ../../../var/cache/$(PACKAGE)/management/work $(DESTDIR)/usr/share/$(PACKAGE)-management/work + + # cloudstack-common + mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-common + mkdir $(DESTDIR)/usr/share/$(PACKAGE)-common/scripts + mkdir $(DESTDIR)/usr/share/$(PACKAGE)-common/setup + cp -r scripts/installer $(DESTDIR)/usr/share/$(PACKAGE)-common/scripts + cp -r scripts/network $(DESTDIR)/usr/share/$(PACKAGE)-common/scripts + cp -r scripts/storage $(DESTDIR)/usr/share/$(PACKAGE)-common/scripts + cp -r scripts/util $(DESTDIR)/usr/share/$(PACKAGE)-common/scripts + cp -r scripts/vm $(DESTDIR)/usr/share/$(PACKAGE)-common/scripts + install -D client/target/utilities/bin/cloud-migrate-databases $(DESTDIR)/usr/bin + install -D client/target/utilities/bin/cloud-set-guest-password $(DESTDIR)/usr/bin + install -D client/target/utilities/bin/cloud-set-guest-sshkey $(DESTDIR)/usr/bin + install -D client/target/utilities/bin/cloud-setup-databases $(DESTDIR)/usr/bin + install -D client/target/utilities/bin/cloud-setup-management $(DESTDIR)/usr/bin + install -D client/target/cloud-client-ui-$(VERSION)-SNAPSHOT/WEB-INF/classes/vms/systemvm.iso $(DESTDIR)/usr/share/$(PACKAGE)-common/vms/systemvm.iso + + # cloudstack-python + mkdir -p $(DESTDIR)/usr/lib/python2.7/dist-packages + cp -r python/lib/cloud* $(DESTDIR)/usr/lib/python2.7/dist-packages + + # cloudstack-usage + mkdir $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/usage + mkdir $(DESTDIR)/var/log/$(PACKAGE)/usage + mkdir $(DESTDIR)/usr/share/$(PACKAGE)-usage + mkdir $(DESTDIR)/usr/share/$(PACKAGE)-usage/plugins + install -D usage/target/cloud-usage-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-usage/lib/$(PACKAGE)-usage.jar + install -D usage/target/dependencies/* $(DESTDIR)/usr/share/$(PACKAGE)-usage/lib/ + cp usage/target/transformed/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/usage/ + ln -s ../management/db.properties $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/usage/db.properties + install -D packaging/debian/init/cloud-usage $(DESTDIR)/$(SYSCONFDIR)/init.d/$(PACKAGE)-usage + + # cloudstack-awsapi + mkdir $(DESTDIR)/var/log/$(PACKAGE)/awsapi + + dh_installdirs + dh_install + dh_python2 + +binary: install + dh_install dh_installchangelogs dh_installdocs LICENSE dh_installdocs DISCLAIMER diff --git a/deps/XenServerJava/pom.xml b/deps/XenServerJava/pom.xml index 18ba54f56a3..0f2cdf427c8 100644 --- a/deps/XenServerJava/pom.xml +++ b/deps/XenServerJava/pom.xml @@ -21,7 +21,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml xapi diff --git a/deps/install-non-oss.sh b/deps/install-non-oss.sh index af495e7cc21..74575a8dbd1 100755 --- a/deps/install-non-oss.sh +++ b/deps/install-non-oss.sh @@ -8,7 +8,7 @@ # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -26,10 +26,17 @@ if [ -e cloud-manageontap.jar ]; then mv cloud-manageontap.jar manageontap.jar; mvn install:install-file -Dfile=manageontap.jar -DgroupId=com.cloud.com.netapp -DartifactId=manageontap -Dversion=4.0 -Dpackaging=jar # From https://my.vmware.com/group/vmware/get-download?downloadGroup=VSDK41 -# Version: 4.1, Release-date: 2010-07-13, Build: 257238 +# Version: 4.1, Release-date: 2010-07-13, Build: 257238 if [ -e vmware-apputils.jar ]; then mv vmware-apputils.jar apputils.jar; fi if [ -e vmware-vim.jar ]; then mv vmware-vim.jar vim.jar; fi if [ -e vmware-vim25.jar ]; then mv vmware-vim25.jar vim25.jar; fi mvn install:install-file -Dfile=vim25.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-vim25 -Dversion=4.1 -Dpackaging=jar mvn install:install-file -Dfile=apputils.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-apputils -Dversion=4.1 -Dpackaging=jar mvn install:install-file -Dfile=vim.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-vim -Dversion=4.1 -Dpackaging=jar + +# +# From https://my.vmware.com/group/vmware/get-download?downloadGroup=VSP510-WEBSDK-510 +# Version: 5.1, Release-date: 2012-09-10, Build: 774886 +mvn install:install-file -Dfile=vim25_51.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-vim25 -Dversion=5.1 -Dpackaging=jar + + diff --git a/developer/developer-prefill.sql b/developer/developer-prefill.sql index 8713d731645..6300d35df64 100644 --- a/developer/developer-prefill.sql +++ b/developer/developer-prefill.sql @@ -18,25 +18,25 @@ -- Add a default ROOT domain use cloud; -INSERT INTO `cloud`.`domain` (id, uuid, name, parent, path, owner, region_id) VALUES - (1, UUID(), 'ROOT', NULL, '/', 2, 1); +INSERT INTO `cloud`.`domain` (id, uuid, name, parent, path, owner) VALUES + (1, UUID(), 'ROOT', NULL, '/', 2); -- Add system and admin accounts -INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, state, region_id) VALUES - (1, UUID(), 'system', 1, 1, 'enabled', 1); +INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, state) VALUES + (1, UUID(), 'system', 1, 1, 'enabled'); -INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, state, region_id) VALUES - (2, UUID(), 'admin', 1, 1, 'enabled', 1); +INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, state) VALUES + (2, UUID(), 'admin', 1, 1, 'enabled'); -- Add system user INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, - lastname, email, state, created, region_id) VALUES (1, UUID(), 'system', RAND(), - '1', 'system', 'cloud', NULL, 'enabled', NOW(), 1); + lastname, email, state, created) VALUES (1, UUID(), 'system', RAND(), + '1', 'system', 'cloud', NULL, 'enabled', NOW()); -- Add system user with encrypted password=password INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, - lastname, email, state, created, region_id) VALUES (2, UUID(), 'admin', '5f4dcc3b5aa765d61d8327deb882cf99', - '2', 'Admin', 'User', 'admin@mailprovider.com', 'enabled', NOW(), 1); + lastname, email, state, created) VALUES (2, UUID(), 'admin', '5f4dcc3b5aa765d61d8327deb882cf99', + '2', 'Admin', 'User', 'admin@mailprovider.com', 'enabled', NOW()); -- Add configurations INSERT INTO `cloud`.`configuration` (category, instance, component, name, value) diff --git a/developer/pom.xml b/developer/pom.xml index 79b24665542..ff47b143093 100644 --- a/developer/pom.xml +++ b/developer/pom.xml @@ -18,7 +18,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT @@ -27,7 +27,12 @@ 5.1.21 runtime - + + org.apache.cloudstack + cloud-plugin-hypervisor-simulator + ${project.version} + compile + install @@ -152,10 +157,6 @@ ${basedir}/target/db/create-schema.sql ${basedir}/target/db/create-schema-premium.sql - - ${basedir}/target/db/create-schema-view.sql - - ${basedir}/target/db/4.1-new-db-schema.sql ${basedir}/target/db/templates.sql @@ -182,115 +183,9 @@ catalina.home ${project.parent.basedir}/utils - - - - - - - - - - simulator - - - deploydb-simulator - - - - - - org.codehaus.mojo - properties-maven-plugin - 1.0-alpha-2 - - - initialize - - read-project-properties - - - - ${project.parent.basedir}/utils/conf/db.properties - ${project.parent.basedir}/utils/conf/db.properties.override - - true - - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - - mysql - mysql-connector-java - ${cs.mysql.version} - - - commons-dbcp - commons-dbcp - ${cs.dbcp.version} - - - commons-pool - commons-pool - ${cs.pool.version} - - - org.jasypt - jasypt - ${cs.jasypt.version} - - - org.apache.cloudstack - cloud-utils - ${project.version} - - - org.apache.cloudstack - cloud-server - ${project.version} - - - - - process-resources - create-schema - - java - - - - - false - true - - org.apache.cloudstack - cloud-server - - com.cloud.upgrade.DatabaseCreator - - - ${project.parent.basedir}/utils/conf/db.properties - ${project.parent.basedir}/utils/conf/db.properties.override - - ${basedir}/target/db/create-schema-simulator.sql - ${basedir}/target/db/templates.simulator.sql - - com.cloud.upgrade.DatabaseUpgradeChecker - --database=simulator - --rootpassword=${db.root.password} - - - - catalina.home - ${project.parent.basedir}/utils + paths.script + ${basedir}/target/db diff --git a/docs/en-US/Developers_Guide.xml b/docs/en-US/Developers_Guide.xml index e753f9bcb33..c86208b3271 100644 --- a/docs/en-US/Developers_Guide.xml +++ b/docs/en-US/Developers_Guide.xml @@ -26,7 +26,7 @@ &PRODUCT; Developer's Guide Apache CloudStack - 4.0.0-incubating + 4.1.0-incubating diff --git a/docs/en-US/accessing-vms.xml b/docs/en-US/accessing-vms.xml index ce780cff080..67d9d774172 100644 --- a/docs/en-US/accessing-vms.xml +++ b/docs/en-US/accessing-vms.xml @@ -32,9 +32,9 @@ To access a VM directly over the network: - The VM must have some port open to incoming traffic. For example, in a basic zone, a new VM might be assigned to a security group which allows incoming traffic. This depends on what security group you picked when creating the VM. In other cases, you can open a port by setting up a port forwarding policy. See IP Forwarding and Firewalling. + The VM must have some port open to incoming traffic. For example, in a basic zone, a new VM might be assigned to a security group which allows incoming traffic. This depends on what security group you picked when creating the VM. In other cases, you can open a port by setting up a port forwarding policy. See . If a port is open but you can not access the VM using ssh, it’s possible that ssh is not already enabled on the VM. This will depend on whether ssh is enabled in the template you picked when creating the VM. Access the VM through the &PRODUCT; UI and enable ssh on the machine using the commands for the VM’s operating system. - If the network has an external firewall device, you will need to create a firewall rule to allow access. See IP Forwarding and Firewalling. + If the network has an external firewall device, you will need to create a firewall rule to allow access. See . diff --git a/docs/en-US/add-clusters-ovm.xml b/docs/en-US/add-clusters-ovm.xml index aa07dfa6506..d0b0688e6a3 100644 --- a/docs/en-US/add-clusters-ovm.xml +++ b/docs/en-US/add-clusters-ovm.xml @@ -28,12 +28,12 @@ Add a companion non-OVM cluster to the Pod. This cluster provides an environment where the &PRODUCT; System VMs can run. You should have already installed a non-OVM hypervisor on at least one Host to prepare for this step. Depending on which hypervisor you used: - For VMWare, follow the steps in Add Cluster: vSphere. When finished, return here and continue with the next step. + For VMWare, follow the steps in . When finished, return here and continue with the next step. For KVM or XenServer, follow the steps in . When finished, return here and continue with the next step - In the left navigation, choose Infrastructure. In Zones, click View More, then click the zone in which you want to add the cluster. - Click the Compute tab. In the Pods node, click View All. Select the same pod you used in step 1. + In the left navigation, choose Infrastructure. In Zones, click View All, then click the zone in which you want to add the cluster. + Click the Compute and Storage tab. In the Pods node, click View All. Click View Clusters, then click Add Cluster. The Add Cluster dialog is displayed. In Hypervisor, choose OVM. diff --git a/docs/en-US/add-remove-nic.xml b/docs/en-US/add-remove-nic.xml new file mode 100644 index 00000000000..15dc66c2b83 --- /dev/null +++ b/docs/en-US/add-remove-nic.xml @@ -0,0 +1,127 @@ + + +%BOOK_ENTITIES; +]> + +
+ Reconfiguring Physical Networks in VMs + &PRODUCT; provides you the ability to move VMs between networks and reconfigure a VM's + network. You can remove a VM from a physical network and add to a new physical network. You can + also change the default physical network of a virtual machine. With this functionality, hybrid + or traditional server loads can be accommodated with ease. + This feature is supported on XenServer and KVM hypervisors. + The following APIs have been added to support this feature. These API calls can function + only while the VM is in running or stopped state. +
+ addNicToVirtualMachine + The addNicToVirtualMachine API adds a new NIC to the specified VM on a selected + network. + + + + + parameter + description + Value + + + + + virtualmachineid + The unique ID of the VM to which the NIC is to be added. + true + + + networkid + The unique ID of the network the NIC that you add should apply + to. + true + + + ipaddress + The IP address of the VM on the network. + false + + + + + The network and VM must reside in the same zone. Two VMs with the same name cannot reside + in the same network. Therefore, adding a second VM that duplicates a name on a network will + fail. +
+
+ removeNicFromVirtualMachine + The removeNicFromVirtualMachine API removes a NIC from the specified VM on a selected + network. + + + + + parameter + description + Value + + + + + virtualmachineid + The unique ID of the VM from which the NIC is to be removed. + + true + + + nicid + The unique ID of the NIC that you want to remove. + true + + + + + Removing the default NIC is not allowed. +
+
+ updateDefaultNicForVirtualMachine + The updateDefaultNicForVirtualMachine API updates the specified NIC to be the default one + for a selected VM. + + + + + parameter + description + Value + + + + + virtualmachineid + The unique ID of the VM for which you want to specify the default NIC. + + true + + + nicid + The unique ID of the NIC that you want to set as the default + one. + true + + + + +
+
diff --git a/docs/en-US/added-API-commands-4-1.xml b/docs/en-US/added-API-commands-4-1.xml new file mode 100644 index 00000000000..aa5529e41ff --- /dev/null +++ b/docs/en-US/added-API-commands-4-1.xml @@ -0,0 +1,69 @@ + + +%BOOK_ENTITIES; +]> + +
+ Added API Commands in 4.1-incubating + + + createEgressFirewallRules (creates an egress firewall rule on the guest network.) + + + deleteEgressFirewallRules (deletes a egress firewall rule on the guest network.) + + + listEgressFirewallRules (lists the egress firewall rules configured for a guest + network.) + + + resetSSHKeyForVirtualMachine (Resets the SSHkey for virtual machine.) + + + addBaremetalHost (Adds a new host.) + + + addNicToVirtualMachine (Adds a new NIC to the specified VM on a selected + network.) + + + removeNicFromVirtualMachine (Removes the specified NIC from a selected VM.) + + + updateDefaultNicForVirtualMachine (Updates the specified NIC to be the default one for a + selected VM.) + + + addRegion (Registers a Region into another Region.) + + + updateRegion (Updates Region details: ID, Name, Endpoint, User API Key, and User Secret + Key.) + + + removeRegion (Removes a Region from current Region.) + + + listRegions (Get all the Regions. They can be filtered by using the ID or Name.) + + + getUser (This API can only be used by the Admin. Get user details by using the API Key.) + + + +
diff --git a/docs/en-US/added-error-codes.xml b/docs/en-US/added-error-codes.xml index dbfea263598..ae7389122f9 100644 --- a/docs/en-US/added-error-codes.xml +++ b/docs/en-US/added-error-codes.xml @@ -24,7 +24,7 @@
Added &PRODUCT; Error Codes - You can now find the &PRODUCT;-specific error code in the exception response for each type of exception. The following list of error codes is added to the new class named CSExceptionErrorCode. These codes are applicable in &PRODUCT; 3.0.3 and later versions. + You can now find the &PRODUCT;-specific error code in the exception response for each type of exception. The following list of error codes is added to the new class named CSExceptionErrorCode. diff --git a/docs/en-US/advanced-zone-guest-ip-addresses.xml b/docs/en-US/advanced-zone-guest-ip-addresses.xml index fbc6144bec1..66bc0826683 100644 --- a/docs/en-US/advanced-zone-guest-ip-addresses.xml +++ b/docs/en-US/advanced-zone-guest-ip-addresses.xml @@ -11,9 +11,7 @@ to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -22,6 +20,12 @@ under the License. -->
- Advanced Zone Guest IP Addresses - When advanced networking is used, the administrator can create additional networks for use by the guests. These networks can span the zone and be available to all accounts, or they can be scoped to a single account, in which case only the named account may create guests that attach to these networks. The networks are defined by a VLAN ID, IP range, and gateway. The administrator may provision thousands of these networks if desired. + Advanced Zone Guest IP Addresses + When advanced networking is used, the administrator can create additional networks for use + by the guests. These networks can span the zone and be available to all accounts, or they can be + scoped to a single account, in which case only the named account may create guests that attach + to these networks. The networks are defined by a VLAN ID, IP range, and gateway. The + administrator may provision thousands of these networks if desired. Additionally, the + administrator can reserve a part of the IP address space for non-&PRODUCT; VMs and + servers.
diff --git a/docs/en-US/aws-ec2-configuration.xml b/docs/en-US/aws-ec2-configuration.xml index 7d26027ba35..dd7732ebced 100644 --- a/docs/en-US/aws-ec2-configuration.xml +++ b/docs/en-US/aws-ec2-configuration.xml @@ -28,14 +28,13 @@ The software that provides AWS API compatibility is installed along with &PRODUCT;. You must enable the services and perform some setup steps prior to using it. - Set the global configuration parameters for each service to true. + Set the global configuration parameters for each service to true. See . Create a set of &PRODUCT; service offerings with names that match the Amazon service offerings. You can do this through the &PRODUCT; UI as described in the Administration Guide. Be sure you have included the Amazon default service offering, m1.small. As well as any EC2 instance types that you will use. - If you did not already do so when you set the configuration parameter in step 1, - restart the Management Server. + If you did not already do so when you set the configuration parameter in step , restart the Management Server. # service cloud-management restart diff --git a/docs/en-US/aws-ec2-user-setup.xml b/docs/en-US/aws-ec2-user-setup.xml index edc371ef376..f41eaa158d7 100644 --- a/docs/en-US/aws-ec2-user-setup.xml +++ b/docs/en-US/aws-ec2-user-setup.xml @@ -45,7 +45,7 @@ AWS API User Registration Each user must perform a one-time registration. The user follows these steps: - + Obtain the following by looking in the &PRODUCT; UI, using the API, or asking the cloud administrator: @@ -69,7 +69,7 @@ wget -O cloudstack-aws-api-register "https://git-wip-us.apache.org/repos/asf?p=incubator-cloudstack.git;a=blob_plain;f=awsapi-setup/setup/cloudstack-aws-api-register;hb=HEAD" - Then execute it, using the parameter values that were obtained in step 1. An example is shown below. + Then execute it, using the access and secret keys that were obtained in step . An example is shown below. $ cloudstack-aws-api-register --apikey=User’s &PRODUCT; API key --secretkey=User’s &PRODUCT; Secret key --cert=/path/to/cert.pem --url=http://&PRODUCT;.server:7080/awsapi diff --git a/docs/en-US/build-deb.xml b/docs/en-US/build-deb.xml index 37e5a7d7474..dca31d23a28 100644 --- a/docs/en-US/build-deb.xml +++ b/docs/en-US/build-deb.xml @@ -51,7 +51,7 @@ and packaging them into DEBs by issuing the following command. -$ dpkg-buildpackge -uc -us +$ dpkg-buildpackage -uc -us diff --git a/docs/en-US/build-rpm.xml b/docs/en-US/build-rpm.xml index e983aba8fe5..ba32ef568ab 100644 --- a/docs/en-US/build-rpm.xml +++ b/docs/en-US/build-rpm.xml @@ -5,78 +5,82 @@ ]>
- Building RPMs - - While we have defined, and you have presumably already installed the - bootstrap prerequisites, there are a number of build time prerequisites - that need to be resolved. &PRODUCT; uses maven for dependency resolution. - You can resolve the buildtime depdencies for CloudStack by running the - following command: - $ mvn -P deps - - - Now that we have resolved the dependencies we can move on to building &PRODUCT; - and packaging them into RPMs by issuing the following command. - $ ./waf rpm - - - Once this completes, you should find assembled RPMs in - artifacts/rpmbuild/RPMS/x86_64 - -
- Creating a yum repo - - While RPMs is an ideal packaging format - it's most easily consumed from - yum repositories over a network. We'll move into the directory with the - newly created RPMs by issuing the following command: - $ cd artifacts/rpmbuild/RPMS/x86_64 - - - Next we'll issue a command to create the repository metadata by - issuing the following command: - $ createrepo ./ - - - The files and directories within our current working directory can now - be uploaded to a web server and serve as a yum repository - + Building RPMs from Source + As mentioned previously in , you will need to install several prerequisites before you can build packages for &PRODUCT;. Here we'll assume you're working with a 64-bit build of CentOS or Red Hat Enterprise Linux. + # yum groupinstall "Development Tools" + # yum install java-1.6.0-openjdk-devel.x86_64 genisoimage mysql mysql-server ws-common-utils MySQL-python tomcat6 createrepo + Next, you'll need to install build-time dependencies for CloudStack with + Maven. We're using Maven 3, so you'll want to + grab a Maven 3 tarball + and uncompress it in your home directory (or whatever location you prefer): + $ tar zxvf apache-maven-3.0.4-bin.tar.gz + $ export PATH=/usr/local/apache-maven-3.0.4//bin:$PATH + Maven also needs to know where Java is, and expects the JAVA_HOME environment + variable to be set: + $ export JAVA_HOME=/usr/lib/jvm/jre-1.6.0-openjdk.x86_64/ + Verify that Maven is installed correctly: + $ mvn --version + You probably want to ensure that your environment variables will survive a logout/reboot. + Be sure to update ~/.bashrc with the PATH and JAVA_HOME variables. + + Building RPMs for $PRODUCT; is fairly simple. Assuming you already have the source downloaded and have uncompressed the tarball into a local directory, you're going to be able to generate packages in just a few minutes. + Packaging has Changed + If you've created packages for $PRODUCT; previously, you should be aware that the process has changed considerably since the project has moved to using Apache Maven. Please be sure to follow the steps in this section closely. + +
+ Generating RPMS + Now that we have the prerequisites and source, you will cd to the packaging/centos63/ directory. + Generating RPMs is done using the package.sh script: + $./package.sh + + That will run for a bit and then place the finished packages in dist/rpmbuild/RPMS/x86_64/. + You should see seven RPMs in that directory: cloudstack-agent-4.1.0-SNAPSHOT.el6.x86_64.rpm, cloudstack-awsapi-4.1.0-SNAPSHOT.el6.x86_64.rpm, cloudstack-cli-4.1.0-SNAPSHOT.el6.x86_64.rpm, cloudstack-common-4.1.0-SNAPSHOT.el6.x86_64.rpm, cloudstack-docs-4.1.0-SNAPSHOT.el6.x86_64.rpm, cloudstack-management-4.1.0-SNAPSHOT.el6.x86_64.rpm, and cloudstack-usage-4.1.0-SNAPSHOT.el6.x86_64.rpm. +
+ Creating a yum repo + + While RPMs is a useful packaging format - it's most easily consumed from Yum repositories over a network. The next step is to create a Yum Repo with the finished packages: + $ mkdir -p ~/tmp/repo + $ cp dist/rpmbuild/RPMS/x86_64/*rpm ~/tmp/repo/ + $ createrepo ~/tmp/repo + + + The files and directories within ~/tmp/repo can now be uploaded to a web server and serve as a yum repository. + +
+
+ Configuring your systems to use your new yum repository + + Now that your yum repository is populated with RPMs and metadata + we need to configure the machines that need to install $PRODUCT;. + Create a file named /etc/yum.repos.d/cloudstack.repo with this information: + + [apache-cloudstack] + name=Apache CloudStack + baseurl=http://webserver.tld/path/to/repo + enabled=1 + gpgcheck=0 + + + Completing this step will allow you to easily install $PRODUCT; on a number of machines across the network. + +
+
-
- Configuring your systems to use your new yum repository - - Now that your yum repository is populated with RPMs and metadata - we need to configure our machines that need to install CloudStack. - We will create a file at /etc/yum.repos.d/cloudstack.repo - with the following content: - -[apache-cloudstack] -name=Apache CloudStack -baseurl=http://webserver.tld/path/to/repo -enabled=1 -gpgcheck=0 - - - - Completing this step will allow you to easily install CloudStack on a number of - machines across the network. - -
-
diff --git a/docs/en-US/building-marvin.xml b/docs/en-US/building-marvin.xml index 3dac9d65d60..3332b16d9b1 100644 --- a/docs/en-US/building-marvin.xml +++ b/docs/en-US/building-marvin.xml @@ -25,8 +25,8 @@
Building and Installing Marvin Marvin is built with Maven and is dependent on APIdoc. To build it do the following in the root tree of &PRODUCT;: - mvn -P developer -l :cloud-apidoc - mvn -P developer -l :cloud-marvin + mvn -P developer -pl :cloud-apidoc + mvn -P developer -pl :cloud-marvin If successfull the build will have created the cloudstackAPI Python package under tools/marvin/marvin/cloudstackAPI as well as a gziped Marvin package under tools/marvin dist. To install the Python Marvin module do the following in tools/marvin: sudo python ./setup.py install The dependencies will be downloaded the Python module installed and you should be able to use Marvin in Python. Check that you can import the module before starting to use it. diff --git a/docs/en-US/change-console-proxy-ssl-certificate-domain.xml b/docs/en-US/change-console-proxy-ssl-certificate-domain.xml index 46ceaae1a19..89796a22c23 100644 --- a/docs/en-US/change-console-proxy-ssl-certificate-domain.xml +++ b/docs/en-US/change-console-proxy-ssl-certificate-domain.xml @@ -38,8 +38,8 @@ In the Update SSL Certificate screen of the &PRODUCT; UI, paste the following - Certificate from step 1(c). - Private key from step 1(e). + The Certificate you generated in the previous steps. + The Private key you generated in the previous steps. The desired new domain name; for example, company.com diff --git a/docs/en-US/changed-apicommands-4.1.xml b/docs/en-US/changed-apicommands-4.1.xml new file mode 100644 index 00000000000..42bd088afb3 --- /dev/null +++ b/docs/en-US/changed-apicommands-4.1.xml @@ -0,0 +1,106 @@ + + +%BOOK_ENTITIES; +]> + +
+ Changed API Commands in 4.1-incubating + + + + + + + API Commands + Description + + + + + + createNetworkOffering + listNetworkOfferings + listNetworks + + + The following request parameters is added: isPersistent. + This parameter determines if the network or network offering created or listed by + using this offering are persistent or not. + + + + + addF5LoadBalancer + configureNetscalerLoadBalancer + addNetscalerLoadBalancer + listF5LoadBalancers + configureF5LoadBalancer + listNetscalerLoadBalancers + + + The following response parameter is removed: inline. + + + + listFirewallRules + createFirewallRule + + The following request parameter is added: traffictype (optional). + + + + listUsageRecords + The following response parameter is added: virtualsize. + + + + + deleteIso + + + The following request parameter is added: forced (optional). + + + + + createStoragePool + + + The following request parameters are made mandatory: + + + podid + + + clusterid + + + + + + + listZones + + + The following request parameters is added: securitygroupenabled + + + + + +
diff --git a/docs/en-US/citrix-xenserver-installation.xml b/docs/en-US/citrix-xenserver-installation.xml index 867d36e1b10..40538658078 100644 --- a/docs/en-US/citrix-xenserver-installation.xml +++ b/docs/en-US/citrix-xenserver-installation.xml @@ -261,15 +261,15 @@ server 3.xenserver.pool.ntp.org Connect FiberChannel cable to all hosts in the cluster and to the FiberChannel storage host. - + Rescan the SCSI bus. Either use the following command or use XenCenter to perform an HBA rescan. # scsi-rescan - Repeat step 2 on every host. + Repeat step on every host. - + Check to be sure you see the new SCSI disk. # ls /dev/disk/by-id/scsi-360a98000503365344e6f6177615a516b -l The output should look like this, although the specific file name will be different @@ -279,7 +279,7 @@ server 3.xenserver.pool.ntp.org - Repeat step 4 on every host. + Repeat step on every host. On the storage server, run this command to get a unique ID for the new SR. diff --git a/docs/en-US/cloudmonkey.xml b/docs/en-US/cloudmonkey.xml new file mode 100644 index 00000000000..0057562cca2 --- /dev/null +++ b/docs/en-US/cloudmonkey.xml @@ -0,0 +1,224 @@ + + +%BOOK_ENTITIES; +]> + + + +
+ CloudMonkey + CloudMonkey is the &PRODUCT; Command Line Interface (CLI). It is written in Python and leverages Marvin. CloudMonkey can be used both as an interactive shell and as a command line tool which simplifies &PRODUCT; configuration and management. + + CloudMonkey is still under development and should be considered a Work In Progress (WIP), the wiki is the most up to date documentation: + https://cwiki.apache.org/CLOUDSTACK/cloudstack-cloudmonkey-cli.html + + +
+ Installing CloudMonkey + There are two ways to get CloudMonkey: + + + Via the official Apache &PRODUCT; releases (starting with 4.1). + + + + + + Via a community maintained package on Cheese Shop + pip install cloudmonkey + + + +
+ +
+ Configuration + To configure CloudMonkey you can edit the .cloudmonkey_config file in the user's home directory as shown below. The values can also be set interactively at the cloudmonkey prompt + +$ cat .cloudmonkey_config +[CLI] +protocol = http +asyncblock = true +color = true +prompt = cloudmonkey> +history_file = /Users/sebastiengoasguen/.cloudmonkey_history +host = localhost +path = /client/api +port = 8080 +apikey = plgWJfZK4gyS3mOMTVmjUVg-X-jlWlnfaUJ9GAbBbf9EdM-kAYMmAiLqzzq1ElZLYq_u38zCm0bewzGUdP66mg +secretkey = VDaACYb0LV9eNjTetIOElcVQkvJck_J_QljX_FcHRj87ZKiy0z0ty0ZsYBkoXkY9b7eq1EhwJaw7FF3akA3KBQ +timeout = 600 +log_file = /Users/sebastiengoasguen/.cloudmonkey_log + + The values can also be set at the cloudmonkey prompt. The API and secret keys are obtained via the &PRODUCT; UI or via a raw api call. + + set prompt myprompt> +myprompt> set host localhost +myprompt> set port 8080 +myprompt> set apikey +myprompt> set secretkey +]]> + + You can use cloudmonkey to interact with a local cloud, and even with a remote public cloud. You just need to set the host value properly and obtain the keys from the cloud administrator. +
+ +
+ Interactive Shell Usage + To start learning cloudmonkey, the best is to use the interactive shell. Simply type cloudmonkey at the prompt and you should get the interactive shell. + At the cloudmonkey prompt press the tab key twice, you will see all potential verbs available. Pick on, enter a space and then press tab twice. You will see all actions available for that verb + + +EOF assign cancel create detach extract ldap prepare reconnect restart shell update +activate associate change delete disable generate list query register restore start upload +add attach configure deploy enable get mark quit remove revoke stop +api authorize copy destroy exit help migrate reboot reset set suspend +cloudmonkey>create +account diskoffering loadbalancerrule portforwardingrule snapshot tags vpc +autoscalepolicy domain network privategateway snapshotpolicy template vpcoffering +autoscalevmgroup firewallrule networkacl project sshkeypair user vpnconnection +autoscalevmprofile instancegroup networkoffering remoteaccessvpn staticroute virtualrouterelement vpncustomergateway +condition ipforwardingrule physicalnetwork securitygroup storagenetworkiprange vlaniprange vpngateway +counter lbstickinesspolicy pod serviceoffering storagepool volume zone +]]> + + Picking one action and entering a space plus the tab key, you will obtain the list of parameters for that specific api call. + +create network +account= domainid= isAsync= networkdomain= projectid= vlan= +acltype= endip= name= networkofferingid= startip= vpcid= +displaytext= gateway= netmask= physicalnetworkid= subdomainaccess= zoneid= +]]> + + To get additional help on that specific api call you can use the following: + +create network -h +Creates a network +Required args: displaytext name networkofferingid zoneid +Args: account acltype displaytext domainid endip gateway isAsync name netmask networkdomain networkofferingid physicalnetworkid projectid startip subdomainaccess vlan vpcid zoneid + +cloudmonkey>create network -help +Creates a network +Required args: displaytext name networkofferingid zoneid +Args: account acltype displaytext domainid endip gateway isAsync name netmask networkdomain networkofferingid physicalnetworkid projectid startip subdomainaccess vlan vpcid zoneid + +cloudmonkey>create network --help +Creates a network +Required args: displaytext name networkofferingid zoneid +Args: account acltype displaytext domainid endip gateway isAsync name netmask networkdomain networkofferingid physicalnetworkid projectid startip subdomainaccess vlan vpcid zoneid +cloudmonkey> +]]> + + Note the required arguments necessary for the calls. + To find out the required parameters value, using a debugger console on the &PRODUCT; UI might be very useful. For instance using Firebug on Firefox, you can navigate the UI and check the parameters values for each call you are making as you navigate the UI. +
+ +
+ Starting a Virtual Machine instance with CloudMonkey + To start a virtual machine instance we will use the deploy virtualmachine call. + +deploy virtualmachine -h +Creates and automatically starts a virtual machine based on a service offering, disk offering, and template. +Required args: serviceofferingid templateid zoneid +Args: account diskofferingid displayname domainid group hostid hypervisor ipaddress iptonetworklist isAsync keyboard keypair name networkids projectid securitygroupids securitygroupnames serviceofferingid size startvm templateid userdata zoneid +]]> + + The required arguments are serviceofferingid, templateid and zoneid + In order to specify the template that we want to use, we can list all available templates with the following call: + +list templates templatefilter=all +count = 2 +template: +======== +domain = ROOT +domainid = 8a111e58-e155-4482-93ce-84efff3c7c77 +zoneid = e1bfdfaf-3d9b-43d4-9aea-2c9f173a1ae7 +displaytext = SystemVM Template (XenServer) +ostypeid = 849d7d0a-9fbe-452a-85aa-70e0a0cbc688 +passwordenabled = False +id = 6d360f79-4de9-468c-82f8-a348135d298e +size = 2101252608 +isready = True +templatetype = SYSTEM +zonename = devcloud +... +]]> + + In this snippet, I used DevCloud and only showed the beginning output of the first template, the SystemVM template + Similarly to get the serviceofferingid you would do: + +list serviceofferings | grep id +id = ef2537ad-c70f-11e1-821b-0800277e749c +id = c66c2557-12a7-4b32-94f4-48837da3fa84 +id = 3d8b82e5-d8e7-48d5-a554-cf853111bc50 +]]> + + Note that we can use the linux pipe as well as standard linux commands within the interactive shell. Finally we would start an instance with the following call: + +deploy virtualmachine templateid=13ccff62-132b-4caf-b456-e8ef20cbff0e zoneid=e1bfdfaf-3d9b-43d4-9aea-2c9f173a1ae7 serviceofferingid=ef2537ad-c70f-11e1-821b-0800277e749c +jobprocstatus = 0 +created = 2013-03-05T13:04:51-0800 +cmd = com.cloud.api.commands.DeployVMCmd +userid = 7ed6d5da-93b2-4545-a502-23d20b48ef2a +jobstatus = 1 +jobid = c441d894-e116-402d-aa36-fdb45adb16b7 +jobresultcode = 0 +jobresulttype = object +jobresult: +========= +virtualmachine: +============== +domain = ROOT +domainid = 8a111e58-e155-4482-93ce-84efff3c7c77 +haenable = False +templatename = tiny Linux +... +]]> + + The instance would be stopped with: + +cloudmonkey>stop virtualmachine id=7efe0377-4102-4193-bff8-c706909cc2d2 + + The ids that you will use will differ from this example. Make sure you use the ones that corresponds to your &PRODUCT; cloud. +
+ +
+ Scripting with CloudMonkey + All previous examples use CloudMonkey via the interactive shell, however it can be used as a straightfoward CLI, passing the commands to the cloudmonkey command like shown below. + $cloudmonkey list users + As such it can be used in shell scripts, it can received commands via stdin and its output can be parsed like any other unix commands as mentioned before. +
+ +
diff --git a/docs/en-US/console-proxy.xml b/docs/en-US/console-proxy.xml index 3dd7b9fd692..64183b4bfc0 100644 --- a/docs/en-US/console-proxy.xml +++ b/docs/en-US/console-proxy.xml @@ -24,11 +24,11 @@ console view via the web UI. It connects the user’s browser to the VNC port made available via the hypervisor for the console of the guest. Both the administrator and end user web UIs offer a console connection. - Clicking on a console icon brings up a new window. The AJAX code downloaded into that window + Clicking a console icon brings up a new window. The AJAX code downloaded into that window refers to the public IP address of a console proxy VM. There is exactly one public IP address allocated per console proxy VM. The AJAX application connects to this IP. The console proxy then - proxies the connection to the VNC port for the requested VM on the Host hosting the guest. - . + proxies the connection to the VNC port for the requested VM on the Host hosting the + guest. The hypervisors will have many ports assigned to VNC usage so that multiple VNC sessions can occur simultaneously. @@ -108,10 +108,10 @@ In the Update SSL Certificate screen of the &PRODUCT; UI, paste the following: - Certificate from step 1(c). + The certificate you've just generated. - Private key from step 1(e). + The private key you've just generated. The desired new domain name; for example, company.com diff --git a/docs/en-US/create-bare-metal-template.xml b/docs/en-US/create-bare-metal-template.xml index 19db2ed5f30..0ee4c11fead 100644 --- a/docs/en-US/create-bare-metal-template.xml +++ b/docs/en-US/create-bare-metal-template.xml @@ -24,7 +24,6 @@
Creating a Bare Metal Template - Beta feature. Untested in &PRODUCT; 3.0.3. Provided without guarantee of performance. Before you can create a bare metal template, you must have performed several other installation and setup steps to create a bare metal cluster and environment. See Bare Metal Installation in the Installation Guide. It is assumed you already have a directory named "win7_64bit" on your CIFS server, containing the image for the bare metal instance. This directory and image are set up as part of the Bare Metal Installation procedure. Log in to the &PRODUCT; UI as an administrator or end user. diff --git a/docs/en-US/creating-network-offerings.xml b/docs/en-US/creating-network-offerings.xml index 0269ce024cb..1f79fb166ce 100644 --- a/docs/en-US/creating-network-offerings.xml +++ b/docs/en-US/creating-network-offerings.xml @@ -22,146 +22,208 @@ under the License. -->
- Creating a New Network Offering - To create a network offering: - - Log in with admin privileges to the &PRODUCT; UI. - In the left navigation bar, click Service Offerings. - In Select Offering, choose Network Offering. - Click Add Network Offering. - In the dialog, make the following choices: - - Name. Any desired name for the network offering - Description. A short description of the offering that can be - displayed to users - Network Rate. Allowed data transfer rate in MB per - second - Guest Type. Choose whether the guest network is isolated or - shared. For a description of these terms, see - - Specify VLAN. (Isolated guest networks only) Indicate whether - a VLAN should be specified when this offering is used - Supported Services. Select one or more of the possible - network services. For some services, you must also choose the service - provider; for example, if you select Load Balancer, you can choose the - &PRODUCT; virtual router or any other load balancers that have been - configured in the cloud. Depending on which services you choose, additional - fields may appear in the rest of the dialog box.Based on the guest network type selected, you can see the following supported services: - - - - Supported Services - Description - Isolated - Shared - - - - - DHCP - For more information, see . - Supported - Supported - - - DNS - For more information, see . - Supported - Supported - - - Load Balancer - If you select Load Balancer, you can choose the &PRODUCT; virtual router or any other load - balancers that have been configured in the cloud. - Supported - Supported - - - Source NAT - If you select Source NAT, you can choose the &PRODUCT; virtual router or any other Source - NAT providers that have been configured in the - cloud. - Supported - Supported - - - Static NAT - If you select Static NAT, you can choose the &PRODUCT; virtual router or any other Static - NAT providers that have been configured in the - cloud. - Supported - Supported - - - Port Forwarding - If you select Port Forwarding, you can choose the &PRODUCT; virtual router or any other - Port Forwarding providers that have been configured in - the cloud. - Supported - Not Supported - - - VPN - For more information, see . - Supported - Not Supported - - - User Data - For more information, see . - Not Supported - Supported - - - Network ACL - For more information, see . - Supported - Not Supported - - - Security Groups - For more information, see . - Not Supported - Supported - - - - - - System Offering. If the service provider for any of the - services selected in Supported Services is a virtual router, the System - Offering field appears. Choose the system service offering that you want - virtual routers to use in this network. For example, if you selected Load - Balancer in Supported Services and selected a virtual router to provide load - balancing, the System Offering field appears so you can choose between the - &PRODUCT; default system service offering and any custom system service - offerings that have been defined by the &PRODUCT; root administrator. - For more information, see System Service Offerings. - Redundant router capability. Available - only when Virtual Router is selected as the Source NAT provider. Select this - option if you want to use two virtual routers in the network for - uninterrupted connection: one operating as the master virtual router and the - other as the backup. The master virtual router receives requests from and - sends responses to the user’s VM. The backup virtual router is activated - only when the master is down. After the failover, the backup becomes the - master virtual router. &PRODUCT; deploys the routers on different hosts - to ensure reliability if one host is down. - Conserve mode. Indicate whether to use conserve mode. In this - mode, network resources are allocated only when the first virtual machine - starts in the network. When the conservative mode is off, the public IP can - only be used for a single service. For example, a public IP used for a port - forwarding rule cannot be used for defining other services, such as SaticNAT - or load balancing. When the conserve mode is on, you can define more than - one service on the same public IP. - If StaticNAT is enabled, irrespective of the status of the conserve mode, no port forwarding - or load balancing rule can be created for the IP. However, you can add - the firewall rules by using the createFirewallRule command. - Tags. Network tag to specify which physical network to - use. - - Click Add. - - - + Creating a New Network Offering + To create a network offering: + + + Log in with admin privileges to the &PRODUCT; UI. + + + In the left navigation bar, click Service Offerings. + + + In Select Offering, choose Network Offering. + + + Click Add Network Offering. + + + In the dialog, make the following choices: + + + Name. Any desired name for the network + offering. + + + Description. A short description of the offering + that can be displayed to users. + + + Network Rate. Allowed data transfer rate in MB per + second. + + + Guest Type. Choose whether the guest network is + isolated or shared. + For a description of this term, see . + For a description of this term, see the Administration Guide. + + + + Persistent. Indicate whether the guest network is + persistent or not. The network that you can provision without having to deploy a VM on + it is termed persistent network. For more information, see . + + + Specify VLAN. (Isolated guest networks only) + Indicate whether a VLAN should be specified when this offering is used. + + + VPC. This option indicate whether the guest network + is Virtual Private Cloud-enabled. A Virtual Private Cloud (VPC) is a private, isolated + part of &PRODUCT;. A VPC can have its own virtual network topology that resembles a + traditional physical network. For more information on VPCs, see . + + + Supported Services. Select one or more of the + possible network services. For some services, you must also choose the service provider; + for example, if you select Load Balancer, you can choose the &PRODUCT; virtual router or + any other load balancers that have been configured in the cloud. Depending on which + services you choose, additional fields may appear in the rest of the dialog box. + Based on the guest network type selected, you can see the following supported + services: + + + + + Supported Services + Description + Isolated + Shared + + + + + DHCP + For more information, see . + Supported + Supported + + + DNS + For more information, see . + Supported + Supported + + + Load Balancer + If you select Load Balancer, you can choose the &PRODUCT; virtual + router or any other load balancers that have been configured in the + cloud. + Supported + Supported + + + Firewall + For more information, see . + For more information, see the Administration + Guide. + Supported + Supported + + + Source NAT + If you select Source NAT, you can choose the &PRODUCT; virtual router + or any other Source NAT providers that have been configured in the + cloud. + Supported + Supported + + + Static NAT + If you select Static NAT, you can choose the &PRODUCT; virtual router + or any other Static NAT providers that have been configured in the + cloud. + Supported + Supported + + + Port Forwarding + If you select Port Forwarding, you can choose the &PRODUCT; virtual + router or any other Port Forwarding providers that have been configured in the + cloud. + Supported + Not Supported + + + VPN + For more information, see . + Supported + Not Supported + + + User Data + For more information, see . + For more information, see the Administration + Guide. + Not Supported + Supported + + + Network ACL + For more information, see . + Supported + Not Supported + + + Security Groups + For more information, see . + Not Supported + Supported + + + + + + + System Offering. If the service provider for any of + the services selected in Supported Services is a virtual router, the System Offering + field appears. Choose the system service offering that you want virtual routers to use + in this network. For example, if you selected Load Balancer in Supported Services and + selected a virtual router to provide load balancing, the System Offering field appears + so you can choose between the &PRODUCT; default system service offering and any custom + system service offerings that have been defined by the &PRODUCT; root + administrator. + For more information, see . + For more information, see the Administration Guide. + + + Redundant router capability. Available only when + Virtual Router is selected as the Source NAT provider. Select this option if you want to + use two virtual routers in the network for uninterrupted connection: one operating as + the master virtual router and the other as the backup. The master virtual router + receives requests from and sends responses to the user’s VM. The backup virtual router + is activated only when the master is down. After the failover, the backup becomes the + master virtual router. &PRODUCT; deploys the routers on different hosts to ensure + reliability if one host is down. + + + Conserve mode. Indicate whether to use conserve + mode. In this mode, network resources are allocated only when the first virtual machine + starts in the network. When conservative mode is off, the public IP can only be used for + a single service. For example, a public IP used for a port forwarding rule cannot be + used for defining other services, such as SaticNAT or load balancing. When the conserve + mode is on, you can define more than one service on the same public IP. + + If StaticNAT is enabled, irrespective of the status of the conserve mode, no port + forwarding or load balancing rule can be created for the IP. However, you can add the + firewall rules by using the createFirewallRule command. + + + + Tags. Network tag to specify which physical network + to use. + + + + + Click Add. + +
diff --git a/docs/en-US/creating-vms.xml b/docs/en-US/creating-vms.xml index 86d89fd2e92..18995979a80 100644 --- a/docs/en-US/creating-vms.xml +++ b/docs/en-US/creating-vms.xml @@ -23,7 +23,7 @@
Creating VMs Virtual machines are usually created from a template. Users can also create blank virtual machines. A blank virtual machine is a virtual machine without an OS template. Users can attach an ISO file and install the OS from the CD/DVD-ROM. - Starting with v3.0.3, you can create a VM without starting it. You can determine whether the VM needs to be started as part of the VM deployment. A new request parameter, startVM, is introduced in the deployVm API to support this feature. For more information, see the Developer's Guide + You can create a VM without starting it. You can determine whether the VM needs to be started as part of the VM deployment. A request parameter, startVM, in the deployVm API provides this feature. For more information, see the Developer's Guide To create a VM from a template: Log in to the &PRODUCT; UI as an administrator or user. diff --git a/docs/en-US/dedicated-ha-hosts.xml b/docs/en-US/dedicated-ha-hosts.xml index ab50700197a..89c721f080a 100644 --- a/docs/en-US/dedicated-ha-hosts.xml +++ b/docs/en-US/dedicated-ha-hosts.xml @@ -24,7 +24,7 @@
Dedicated HA Hosts - (v3.0.3 and greater) One or more hosts can be designated for use only by HA-enabled VMs that are restarting due to a host failure. Setting up a pool of such dedicated HA hosts as the recovery destination for all HA-enabled VMs is useful to: + One or more hosts can be designated for use only by HA-enabled VMs that are restarting due to a host failure. Setting up a pool of such dedicated HA hosts as the recovery destination for all HA-enabled VMs is useful to: Make it easier to determine which VMs have been restarted as part of the &PRODUCT; high-availability function. If a VM is running on a dedicated HA host, then it must be an HA-enabled VM whose original host failed. (With one exception: It is possible for an administrator to manually migrate any VM to a dedicated HA host.). Keep HA-enabled VMs from restarting on hosts which may be reserved for other purposes. diff --git a/docs/en-US/deployment-architecture-overview.xml b/docs/en-US/deployment-architecture-overview.xml index fba36eb85a3..e3103c52c1c 100644 --- a/docs/en-US/deployment-architecture-overview.xml +++ b/docs/en-US/deployment-architecture-overview.xml @@ -48,7 +48,8 @@ A more full-featured installation consists of a highly-available multi-node Management Server installation and up to tens of thousands of hosts using any of several advanced networking setups. For - information about deployment options, see Choosing a Deployment Architecture. + information about deployment options, see the "Choosing a Deployment Architecture" + section of the $PRODUCT; Installation Guide. diff --git a/docs/en-US/devcloud-usage-mode.xml b/docs/en-US/devcloud-usage-mode.xml index bb65f904ccd..bc211ce1436 100644 --- a/docs/en-US/devcloud-usage-mode.xml +++ b/docs/en-US/devcloud-usage-mode.xml @@ -32,7 +32,7 @@ The following diagram shows the architecture of the SandBox mode. - + DevCloud.png: Schematic of the DevCloud SandBox architecture @@ -49,7 +49,7 @@ The following schematic shows the architecture of the Host-Only mode. - + DevCloud-hostonly.png: Schematic of the DevCloud host-only architecture diff --git a/docs/en-US/egress-firewall-rule.xml b/docs/en-US/egress-firewall-rule.xml new file mode 100644 index 00000000000..ef0e25efd03 --- /dev/null +++ b/docs/en-US/egress-firewall-rule.xml @@ -0,0 +1,98 @@ + + +%BOOK_ENTITIES; +]> + +
+ Creating Egress Firewall Rules in an Advanced Zone + + The egress firewall rules are supported only on virtual routers. + + + The egress traffic originates from a private network to a public network, such as the + Internet. By default, the egress traffic is blocked, so no outgoing traffic is allowed from a + guest network to the Internet. However, you can control the egress traffic in an Advanced zone + by creating egress firewall rules. When an egress firewall rule is applied, the traffic specific + to the rule is allowed and the remaining traffic is blocked. When all the firewall rules are + removed the default policy, Block, is applied. + Consider the following scenarios to apply egress firewall rules: + + + Allow the egress traffic from specified source CIDR. The Source CIDR is part of guest + network CIDR. + + + Allow the egress traffic with destination protocol TCP,UDP,ICMP, or ALL. + + + Allow the egress traffic with destination protocol and port range. The port range is + specified for TCP, UDP or for ICMP type and code. + + + To configure an egress firewall rule: + + + Log in to the &PRODUCT; UI as an administrator or end user. + + + In the left navigation, choose Network. + + + In Select view, choose Guest networks, then click the Guest network you want. + + + To add an egress rule, click the Egress rules tab and fill out the following fields to + specify what type of traffic is allowed to be sent out of VM instances in this guest + network: + + + + + + egress-firewall-rule.png: adding an egress firewall rule + + + + + CIDR: (Add by CIDR only) To send traffic only to + the IP addresses within a particular address block, enter a CIDR or a comma-separated + list of CIDRs. The CIDR is the base IP address of the destination. For example, + 192.168.0.0/22. To allow all CIDRs, set to 0.0.0.0/0. + + + Protocol: The networking protocol that VMs uses to + send outgoing traffic. The TCP and UDP protocols are typically used for data exchange + and end-user communications. The ICMP protocol is typically used to send error messages + or network monitoring data. + + + Start Port, End Port: (TCP, UDP only) A range of + listening ports that are the destination for the outgoing traffic. If you are opening a + single port, use the same number in both fields. + + + ICMP Type, ICMP Code: (ICMP only) The type of + message and error code that are sent. + + + + + Click Add. + + +
diff --git a/docs/en-US/event-framework.xml b/docs/en-US/event-framework.xml new file mode 100644 index 00000000000..88c45c9033d --- /dev/null +++ b/docs/en-US/event-framework.xml @@ -0,0 +1,110 @@ + + +%BOOK_ENTITIES; +]> + + +
+ Event Notification + Event notification framework provides a means for the Management Server components to + publish and subscribe to &PRODUCT; events. Event notification is achieved by implementing the + concept of event bus abstraction in the Management Server. An event bus is introduced in the + Management Server that allows the &PRODUCT;components and extension plug-ins to subscribe to the + events by using the Advanced Message Queuing Protocol (AMQP) client. In &PRODUCT;, a default + implementation of event bus is provided as a plug-in that uses the RabbitMQ AMQP client. The + AMQP client pushes the published events to a compatible AMQP server. Therefore all the &PRODUCT; + events are published to an exchange in the AMQP server. + A new event for state change, resource state change, is introduced as part of Event + notification framework. Every resource, such as user VM, volume, NIC, network, public IP, + snapshot, and template, is associated with a state machine and generates events as part of the + state change. That implies that a change in the state of a resource results in a state change + event, and the event is published in the corresponding state machine on the event bus. All the + &PRODUCT; events (alerts, action events, usage events) and the additional category of resource + state change events, are published on to the events bus. + + Use Cases + The following are some of the use cases: + + + + Usage or Billing Engines: A third-party cloud usage solution can implement a plug-in + that can connects to &PRODUCT; to subscribe to &PRODUCT; events and generate usage data. The + usage data is consumed by their usage software. + + + AMQP plug-in can place all the events on the a message queue, then a AMQP message broker + can provide topic-based notification to the subscribers. + + + Publish and Subscribe notification service can be implemented as a pluggable service in + &PRODUCT; that can provide rich set of APIs for event notification, such as topics-based + subscription and notification. Additionally, the pluggable service can deal with + multi-tenancy, authentication, and authorization issues. + + + + Configuration + As a &PRODUCT; administrator, perform the following one-time configuration to enable event + notification framework. At run time no changes can control the behaviour. + + + + Open 'componentContext.xml. + + + Define a bean named eventNotificationBus as follows: + + + name : Specify a name for the bean. + + + server : The name or the IP address of the RabbitMQ AMQP server. + + + port : The port on which RabbitMQ server is running. + + + username : The username associated with the account to access the RabbitMQ + server. + + + password : The password associated with the username of the account to access the + RabbitMQ server. + + + exchange : The exchange name on the RabbitMQ server where &PRODUCT; events are + published. + A sample bean is given below: + <bean id="eventNotificationBus" class="org.apache.cloudstack.mom.rabbitmq.RabbitMQEventBus"> + <property name="name" value="eventNotificationBus"/> + <property name="server" value="127.0.0.1"/> + <property name="port" value="5672"/> + <property name="username" value="guest"/> + <property name="password" value="guest"/> + <property name="exchange" value="cloudstack-events"/> + </bean> + The eventNotificationBus bean represents the + org.apache.cloudstack.mom.rabbitmq.RabbitMQEventBus class. + + + + + Restart the Management Server. + + +
diff --git a/docs/en-US/events.xml b/docs/en-US/events.xml index 242ff4511ff..49ef86e62b5 100644 --- a/docs/en-US/events.xml +++ b/docs/en-US/events.xml @@ -21,11 +21,17 @@ specific language governing permissions and limitations under the License. --> -
- Events - - - - + Events + An event is essentially a significant or meaningful change in the state of both virtual and + physical resources associated with a cloud environment. Events are used by monitoring systems, + usage and billing systems, or any other event-driven workflow systems to discern a pattern and + make the right business decision. In &PRODUCT; an event could be a state change of virtual or + psychical resources, an action performed by an user (action events), or policy based events + (alerts). + + + + +
diff --git a/docs/en-US/firewall-rules.xml b/docs/en-US/firewall-rules.xml index 01d072bbcc4..837a4c6f9d0 100644 --- a/docs/en-US/firewall-rules.xml +++ b/docs/en-US/firewall-rules.xml @@ -3,53 +3,80 @@ %BOOK_ENTITIES; ]> -
- Firewall Rules - By default, all incoming traffic to the public IP address is rejected by the firewall. To allow external traffic, you can open firewall ports by specifying firewall rules. You can optionally specify one or more CIDRs to filter the source IPs. This is useful when you want to allow only incoming requests from certain IP addresses. - You cannot use firewall rules to open ports for an elastic IP address. When elastic IP is used, outside access is instead controlled through the use of security groups. See . - Firewall rules can be created using the Firewall tab in the Management Server UI. This tab is not displayed by default when &PRODUCT; is installed. To display the Firewall tab, the &PRODUCT; administrator must set the global configuration parameter firewall.rule.ui.enabled to "true." - To create a firewall rule: - - Log in to the &PRODUCT; UI as an administrator or end user. - In the left navigation, choose Network. - Click the name of the network where you want to work with. - Click View IP Addresses. - Click the IP address you want to work with. - - Click the Configuration tab and fill in the following values. - - Source CIDR. (Optional) To accept only traffic from IP - addresses within a particular address block, enter a CIDR or a - comma-separated list of CIDRs. Example: 192.168.0.0/22. Leave empty to allow - all CIDRs. - Protocol. The communication protocol in use on the opened - port(s). - Start Port and End Port. The port(s) you want to open on the - firewall. If you are opening a single port, use the same number in both - fields - ICMP Type and ICMP Code. Used only if Protocol is set to - ICMP. Provide the type and code required by the ICMP protocol to fill out - the ICMP header. Refer to ICMP documentation for more details if you are not - sure what to enter - - Click Add. - + Firewall Rules + By default, all incoming traffic to the public IP address is rejected by the firewall. To + allow external traffic, you can open firewall ports by specifying firewall rules. You can + optionally specify one or more CIDRs to filter the source IPs. This is useful when you want to + allow only incoming requests from certain IP addresses. + You cannot use firewall rules to open ports for an elastic IP address. When elastic IP is + used, outside access is instead controlled through the use of security groups. See . + In an advanced zone, you can also create egress firewall rules by using the virtual router. + For more information, see . + Firewall rules can be created using the Firewall tab in the Management Server UI. This tab + is not displayed by default when &PRODUCT; is installed. To display the Firewall tab, the + &PRODUCT; administrator must set the global configuration parameter firewall.rule.ui.enabled to + "true." + To create a firewall rule: + + + Log in to the &PRODUCT; UI as an administrator or end user. + + + In the left navigation, choose Network. + + + Click the name of the network where you want to work with. + + + Click View IP Addresses. + + + Click the IP address you want to work with. + + + Click the Configuration tab and fill in the following values. + + + Source CIDR. (Optional) To accept only traffic from + IP addresses within a particular address block, enter a CIDR or a comma-separated list + of CIDRs. Example: 192.168.0.0/22. Leave empty to allow all CIDRs. + + + Protocol. The communication protocol in use on the + opened port(s). + + + Start Port and End Port. The port(s) you want to + open on the firewall. If you are opening a single port, use the same number in both + fields + + + ICMP Type and ICMP Code. Used only if Protocol is + set to ICMP. Provide the type and code required by the ICMP protocol to fill out the + ICMP header. Refer to ICMP documentation for more details if you are not sure what to + enter + + + + + Click Add. + +
diff --git a/docs/en-US/getting-release.xml b/docs/en-US/getting-release.xml index 09f0a7b08fe..b9e97c9b03d 100644 --- a/docs/en-US/getting-release.xml +++ b/docs/en-US/getting-release.xml @@ -29,35 +29,13 @@ Apache CloudStack project download page. - - You'll notice several links under the 'Latest release' section. - - - - - - apache-cloudstack-4.0.0-incubating-src.tar.bz2 - - This is the link to the release itself. - - - - - PGP - - This is a detached cryptographic signature that can be used to help - verify the authenticity of the release. - - - - - MD5 - - An MD5 hash of the release to aid in verify the validity of the release download. - - - - - SHA512 - - A SHA512 hash of the release to aid in verify the validity of the release download. - - - + Prior releases are available via archive.apache.org at http://archive.apache.org/dist/incubator/cloudstack/releases/. + + You'll notice several links under the 'Latest release' section. A link to a file ending in tar.bz2, as well as a PGP/GPG signature, MD5, and SHA512 file. + + The tar.bz2 file contains the Bzip2-compressed tarball with the source code. + The .asc file is a detached cryptographic signature that can be used to help verify the authenticity of the release. + The .md5 file is an MD5 hash of the release to aid in verify the validity of the release download. + The .sha file is a SHA512 hash of the release to aid in verify the validity of the release download. +
diff --git a/docs/en-US/globally-configured-limits.xml b/docs/en-US/globally-configured-limits.xml index 48a91f1b01e..ac71112b310 100644 --- a/docs/en-US/globally-configured-limits.xml +++ b/docs/en-US/globally-configured-limits.xml @@ -22,7 +22,7 @@ under the License. --> -
+
Globally Configured Limits In a zone, the guest virtual network has a 24 bit CIDR by default. This limits the guest virtual network to 254 running instances. It can be adjusted as needed, but this must be done before any instances are created in the zone. For example, 10.1.1.0/22 would provide for ~1000 addresses. The following table lists limits set in the Global Configuration: diff --git a/docs/en-US/guest-traffic.xml b/docs/en-US/guest-traffic.xml index 16dfa41cf7b..bca635582a8 100644 --- a/docs/en-US/guest-traffic.xml +++ b/docs/en-US/guest-traffic.xml @@ -23,7 +23,14 @@ -->
Guest Traffic - A network can carry guest traffic only between VMs within one zone. Virtual machines in different zones cannot communicate with each other using their IP addresses; they must communicate with each other by routing through a public IP address. + A network can carry guest traffic only between VMs within one zone. Virtual machines in different zones cannot communicate with each other using their IP addresses; they must communicate with each other by routing through a public IP address. + This figure illustrates a typical guest traffic setup: + + + + + Depicts a guest traffic setup. + The Management Server automatically creates a virtual router for each network. A virtual router is a special virtual machine that runs on the hosts. Each virtual router has three network interfaces. Its eth0 interface serves as the gateway for the guest traffic and has the IP address of 10.1.1.1. Its eth1 interface is used by the system to configure the virtual router. Its eth2 interface is assigned a public IP address for public traffic. The virtual router provides DHCP and will automatically assign an IP address for each guest VM within the IP range assigned for the network. The user can manually reconfigure guest VMs to assume different IP addresses. Source NAT is automatically configured in the virtual router to forward outbound traffic for all guest VMs diff --git a/docs/en-US/images/egress-firewall-rule.png b/docs/en-US/images/egress-firewall-rule.png new file mode 100644 index 00000000000..fa1d8ecd0bd Binary files /dev/null and b/docs/en-US/images/egress-firewall-rule.png differ diff --git a/docs/en-US/images/resize-volume-icon.png b/docs/en-US/images/resize-volume-icon.png new file mode 100644 index 00000000000..48499021f06 Binary files /dev/null and b/docs/en-US/images/resize-volume-icon.png differ diff --git a/docs/en-US/images/resize-volume.png b/docs/en-US/images/resize-volume.png new file mode 100644 index 00000000000..6195623ab49 Binary files /dev/null and b/docs/en-US/images/resize-volume.png differ diff --git a/docs/en-US/ip-forwarding-firewalling.xml b/docs/en-US/ip-forwarding-firewalling.xml index c154b078da3..54e18b7cfbc 100644 --- a/docs/en-US/ip-forwarding-firewalling.xml +++ b/docs/en-US/ip-forwarding-firewalling.xml @@ -3,28 +3,30 @@ %BOOK_ENTITIES; ]> -
- IP Forwarding and Firewalling - By default, all incoming traffic to the public IP address is rejected. All outgoing traffic from the guests is translated via NAT to the public IP address and is allowed. - To allow incoming traffic, users may set up firewall rules and/or port forwarding rules. For example, you can use a firewall rule to open a range of ports on the public IP address, such as 33 through 44. Then use port forwarding rules to direct traffic from individual ports within that range to specific ports on user VMs. For example, one port forwarding rule could route incoming traffic on the public IP's port 33 to port 100 on one user VM's private IP. - - + IP Forwarding and Firewalling + By default, all incoming traffic to the public IP address is rejected. All outgoing traffic + from the guests is translated via NAT to the public IP address and is allowed. + To allow incoming traffic, users may set up firewall rules and/or port forwarding rules. For + example, you can use a firewall rule to open a range of ports on the public IP address, such as + 33 through 44. Then use port forwarding rules to direct traffic from individual ports within + that range to specific ports on user VMs. For example, one port forwarding rule could route + incoming traffic on the public IP's port 33 to port 100 on one user VM's private IP. + +
diff --git a/docs/en-US/management-server-install-client.xml b/docs/en-US/management-server-install-client.xml index 7e81ec735fc..b5329e3442a 100644 --- a/docs/en-US/management-server-install-client.xml +++ b/docs/en-US/management-server-install-client.xml @@ -1,5 +1,5 @@ - %BOOK_ENTITIES; ]> @@ -11,9 +11,7 @@ to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -31,16 +29,6 @@ The &PRODUCT; Management server can be installed using either RPM or DEB packages. These packages will depend on everything you need to run the Management server. -
- Downloading vhd-util - This procedure is required only for installations where XenServer is installed on the hypervisor hosts. - Before setting up the Management Server, download vhd-util from vhd-util. - If the Management Server is RHEL or CentOS, copy vhd-util to - /usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver. - If the Management Server is Ubuntu, copy vhd-util to - /usr/lib/cloud/common/scripts/vm/hypervisor/xenserver/vhd-util. -
Install on CentOS/RHEL We start by installing the required packages: @@ -50,4 +38,16 @@ Install on Ubuntu apt-get install cloud-client
-
\ No newline at end of file + +
+ Downloading vhd-util + This procedure is required only for installations where XenServer is installed on the + hypervisor hosts. + Before setting up the Management Server, download vhd-util from vhd-util. + If the Management Server is RHEL or CentOS, copy vhd-util to + /usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver. + If the Management Server is Ubuntu, copy vhd-util to + /usr/lib/cloud/common/scripts/vm/hypervisor/xenserver. +
+
diff --git a/docs/en-US/management-server-install-db-local.xml b/docs/en-US/management-server-install-db-local.xml index 242249040b1..918cdc0a265 100644 --- a/docs/en-US/management-server-install-db-local.xml +++ b/docs/en-US/management-server-install-db-local.xml @@ -11,9 +11,7 @@ to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -29,19 +27,23 @@ MySQL. See . - Install MySQL from the package repository from your distribution: + Install MySQL from the package repository of your distribution: On RHEL or CentOS: yum install mysql-server On Ubuntu: apt-get install mysql-server - Edit the MySQL configuration (/etc/my.cnf or /etc/mysql/my.cnf, depending on your OS) - and insert the following lines in the [mysqld] section. You can put these lines below the - datadir line. The max_connections parameter should be set to 350 multiplied by the number of - Management Servers you are deploying. This example assumes one Management Server. + Open the MySQL configuration file. The configuration file is /etc/my.cnf or + /etc/mysql/my.cnf, depending on your OS. + + + Insert the following lines in the [mysqld] section. + You can put these lines below the datadir line. The max_connections parameter should be + set to 350 multiplied by the number of Management Servers you are deploying. This example + assumes one Management Server. - On Ubuntu, you can also create a file /etc/mysql/conf.d/cloudstack.cnf and add these + On Ubuntu, you can also create a file /etc/mysql/conf.d/cloudstack.cnf and add these directives there. Don't forget to add [mysqld] on the first line of the file. innodb_rollback_on_timeout=1 @@ -68,6 +70,36 @@ binlog-format = 'ROW' questions. mysql_secure_installation + + &PRODUCT; can be blocked by security mechanisms, such as SELinux. Disable SELinux to + ensure + that the Agent has all the required permissions. + Configure SELinux (RHEL and CentOS): + + + Check whether SELinux is installed on your machine. If not, you can skip this + section. + In RHEL or CentOS, SELinux is installed and enabled by default. You can verify this + with: + $ rpm -qa | grep selinux + + + Set the SELINUX variable in /etc/selinux/config to + "permissive". This ensures that the permissive setting will be maintained after a system + reboot. + In RHEL or CentOS: + vi /etc/selinux/config + Change the following line + SELINUX=enforcing + to this: + SELINUX=permissive + + + Set SELinux to permissive starting immediately, without requiring a system + reboot. + $ setenforce permissive + + + Set up the database. The following command creates the "cloud" user on the database. @@ -98,10 +130,10 @@ binlog-format = 'ROW' recommended that you replace this with a more secure value. See . - - (Optional) For management_server_ip, you may explicitly specify cluster management - server node IP. If not specified, the local IP address will be used. - + + (Optional) For management_server_ip, you may explicitly specify cluster management + server node IP. If not specified, the local IP address will be used. + cloud-setup-databases cloud:<dbpassword>@localhost \ --deploy-as=root:<password> \ @@ -116,9 +148,6 @@ binlog-format = 'ROW' If you are running the KVM hypervisor on the same machine with the Management Server, edit /etc/sudoers and add the following line: Defaults:cloud !requiretty - - This type of single-machine setup is recommended only for a trial installation. - Now that the database is set up, you can finish configuring the OS for the Management diff --git a/docs/en-US/management-server-install-nfs-shares.xml b/docs/en-US/management-server-install-nfs-shares.xml index e917a8e0b6c..a12e09c3eca 100644 --- a/docs/en-US/management-server-install-nfs-shares.xml +++ b/docs/en-US/management-server-install-nfs-shares.xml @@ -26,7 +26,7 @@ Prepare NFS Shares &PRODUCT; needs a place to keep primary and secondary storage (see Cloud Infrastructure Overview). Both of these can be NFS shares. This section tells how to set up the NFS shares before adding the storage to &PRODUCT;. Alternative Storage - NFS is not the only option for primary or secondary storage. For example, you may use a Ceph RDB cluster, GlusterFS, iSCSI, and otthers. + NFS is not the only option for primary or secondary storage. For example, you may use Ceph RBD, GlusterFS, iSCSI, and others. The choice of storage system will depend on the choice of hypervisor and whether you are dealing with primary or secondary storage. The requirements for primary and secondary storage are described in: diff --git a/docs/en-US/networks.xml b/docs/en-US/networks.xml index a7b9ea12466..f877aa55584 100644 --- a/docs/en-US/networks.xml +++ b/docs/en-US/networks.xml @@ -31,6 +31,7 @@ + @@ -45,4 +46,5 @@ - \ No newline at end of file + + diff --git a/docs/en-US/nfs-shares-on-management-server.xml b/docs/en-US/nfs-shares-on-management-server.xml index c000ce4818d..881ca8d7600 100644 --- a/docs/en-US/nfs-shares-on-management-server.xml +++ b/docs/en-US/nfs-shares-on-management-server.xml @@ -27,9 +27,9 @@ This section tells how to set up NFS shares for primary and secondary storage on the same node with the Management Server. This is more typical of a trial installation, but is technically possible in a larger deployment. It is assumed that you will have less than 16TB of storage on the host. The exact commands for the following steps may vary depending on your operating system version. - On Ubuntu/Debian systems, you'll need to install the nfs-kernel-server package: + On RHEL/CentOS systems, you'll need to install the nfs-utils package: -$ sudo apt-get install nfs-kernel-server +$ sudo yum install nfs-utils On the Management Server host, create two directories that you will use for primary and secondary storage. For example: diff --git a/docs/en-US/persistent-network.xml b/docs/en-US/persistent-network.xml new file mode 100644 index 00000000000..1ccc99c59a6 --- /dev/null +++ b/docs/en-US/persistent-network.xml @@ -0,0 +1,100 @@ + + +%BOOK_ENTITIES; +]> + + +
+ Persistent Networks + The network that you can provision without having to deploy any VMs on it is called a + persistent network. A persistent network can be part of a VPC or a non-VPC environment. + When you create other types of network, a network is only a database entry until the first + VM is created on that network. When the first VM is created, a VLAN ID is assigned and the + network is provisioned. Also, when the last VM is destroyed, the VLAN ID is released and the + network is no longer available. With the addition of persistent network, you will have the + ability to create a network in &PRODUCT; in which physical devices can be deployed without + having to run any VMs. Additionally, you can deploy physical devices on that network. + One of the advantages of having a persistent network is that you can create a VPC with a tier + consisting of only physical devices. For example, you might create a VPC for a three-tier + application, deploy VMs for Web and Application tier, and use physical machines for the + Database tier. Another use case is that if you are providing services by using physical + hardware, you can define the network as persistent and therefore even if all its VMs are + destroyed the services will not be discontinued. +
+ Persistent Network Considerations + + + Persistent network is designed for isolated networks. + + + All default network offerings are non-persistent. + + + A network offering cannot be editable because changing it affects the behavior of the + existing networks that were created using this network offering. + + + When you create a guest network, the network offering that you select defines the + network persistence. This in turn depends on whether persistent network is enabled in the + selected network offering. + + + An existing network can be made persistent by changing its network offering to an + offering that has the Persistent option enabled. While setting this property, even if the + network has no running VMs, the network is provisioned. + + + An existing network can be made non-persistent by changing its network offering to an + offering that has the Persistent option disabled. If the network has no running VMs, + during the next network garbage collection run the network is shut down. + + + When the last VM on a network is destroyed, the network garbage collector checks if + the network offering associated with the network is persistent, and shuts down the network + only if it is non-persistent. + + +
+
+ Creating a Persistent Guest Network + To create a persistent network, perform the following: + + + Create a network offering with the Persistent option enabled. + See . + See the Administration Guide. + + + Select Network from the left navigation pane. + + + Select the guest network that you want to offer this network service to. + + + Click the Edit button. + + + From the Network Offering drop-down, select the persistent network offering you have + just created. + + + Click OK. + + +
+
diff --git a/docs/en-US/plugin-niciranvp-devicemanagement.xml b/docs/en-US/plugin-niciranvp-devicemanagement.xml index 2423ce3925d..57b8eee9d7d 100644 --- a/docs/en-US/plugin-niciranvp-devicemanagement.xml +++ b/docs/en-US/plugin-niciranvp-devicemanagement.xml @@ -22,7 +22,7 @@ -->
Device-management - In CloudStack 4.0.x each Nicira NVP setup is considered a "device" that can be added and removed from a physical network. To complete the configuration of the Nicira NVP plugin a device needs to be added to the physical network using the "addNiciraNVPDevice" API call. The plugin is now enabled on the physical network and any guest networks created on that network will be provisioned using the Nicra NVP Controller. + In &PRODUCT; 4.0.x each Nicira NVP setup is considered a "device" that can be added and removed from a physical network. To complete the configuration of the Nicira NVP plugin a device needs to be added to the physical network using the "addNiciraNVPDevice" API call. The plugin is now enabled on the physical network and any guest networks created on that network will be provisioned using the Nicira NVP Controller. The plugin introduces a set of new API calls to manage the devices, see below or refer to the API reference. addNiciraNvpDevice @@ -44,4 +44,4 @@ listNiciraNVPDevices -
\ No newline at end of file +
diff --git a/docs/en-US/plugin-niciranvp-features.xml b/docs/en-US/plugin-niciranvp-features.xml index b71e67f4199..c346bfb64e3 100644 --- a/docs/en-US/plugin-niciranvp-features.xml +++ b/docs/en-US/plugin-niciranvp-features.xml @@ -22,12 +22,12 @@ -->
Features of the Nicira NVP Plugin - In CloudStack release 4.0.0-incubating this plugin supports the Connectivity service. This service is responsible for creating Layer 2 networks supporting the networks created by Guests. In other words when an tennant creates a new network, instead of the traditional VLAN a logical network will be created by sending the appropriate calls to the Nicira NVP Controller. + In &PRODUCT; release 4.0.0-incubating this plugin supports the Connectivity service. This service is responsible for creating Layer 2 networks supporting the networks created by Guests. In other words when an tenant creates a new network, instead of the traditional VLAN a logical network will be created by sending the appropriate calls to the Nicira NVP Controller. The plugin has been tested with Nicira NVP versions 2.1.0, 2.2.0 and 2.2.1 - In CloudStack 4.0.0-incubating only the XenServer hypervisor is supported for use in + In &PRODUCT; 4.0.0-incubating only the XenServer hypervisor is supported for use in combination with Nicira NVP. - In CloudStack 4.1.0-incubating both KVM and XenServer hypervisors are + In &PRODUCT; 4.1.0-incubating both KVM and XenServer hypervisors are supported. - In CloudStack 4.0.0-incubating the UI components for this plugin are not complete, + In &PRODUCT; 4.0.0-incubating the UI components for this plugin are not complete, configuration is done by sending commands to the API.
diff --git a/docs/en-US/plugin-niciranvp-preparations.xml b/docs/en-US/plugin-niciranvp-preparations.xml index 86b795ccd0b..762c941fd13 100644 --- a/docs/en-US/plugin-niciranvp-preparations.xml +++ b/docs/en-US/plugin-niciranvp-preparations.xml @@ -23,7 +23,7 @@
Prerequisites Before enabling the Nicira NVP plugin the NVP Controller needs to be configured. Please review the NVP User Guide on how to do that. - CloudStack needs to have at least one physical network with the isolation method set to "STT". This network should be enabled for the Guest traffic type. + &PRODUCT; needs to have at least one physical network with the isolation method set to "STT". This network should be enabled for the Guest traffic type. The Guest traffic type should be configured with the traffic label that matches the name of the Integration Bridge on the hypervisor. See the Nicira NVP User Guide for more details on how to set this up in XenServer or KVM. @@ -33,6 +33,6 @@ The username to access the API The password to access the API The UUID of the Transport Zone that contains the hypervisors in this Zone - The UUID of the Physical Network that will used for the Guest networks + The UUID of the Physical Network that will be used for the Guest networks -
\ No newline at end of file +
diff --git a/docs/en-US/plugin-niciranvp-uuidreferences.xml b/docs/en-US/plugin-niciranvp-uuidreferences.xml index c912971736b..cb5f1cae834 100644 --- a/docs/en-US/plugin-niciranvp-uuidreferences.xml +++ b/docs/en-US/plugin-niciranvp-uuidreferences.xml @@ -22,9 +22,9 @@ -->
UUID References - The plugin maintains several references in the CloudStack database to items created on the NVP Controller. - Every guest network this is created will have its broadcast type set to Lswitch and if the network is in state "Implemented", the broadcast URI will have the UUID of the Logical Switch that was created for this network on the NVP Controller. + The plugin maintains several references in the &PRODUCT; database to items created on the NVP Controller. + Every guest network that is created will have its broadcast type set to Lswitch and if the network is in state "Implemented", the broadcast URI will have the UUID of the Logical Switch that was created for this network on the NVP Controller. The Nics that are connected to one of the Logical Switches will have their Logical Switch Port UUID listed in the nicira_nvp_nic_map table All devices created on the NVP Controller will have a tag set to domain-account of the owner of the network, this string can be used to search for items in the NVP Controller. -
\ No newline at end of file +
diff --git a/docs/en-US/reserved-ip-addresses-non-csvms.xml b/docs/en-US/reserved-ip-addresses-non-csvms.xml new file mode 100644 index 00000000000..18ba3ca0e42 --- /dev/null +++ b/docs/en-US/reserved-ip-addresses-non-csvms.xml @@ -0,0 +1,163 @@ + + +%BOOK_ENTITIES; +]> + + +
+ IP Reservation in Isolated Guest Networks + In isolated guest networks, a part of the guest IP address space can be reserved for + non-&PRODUCT; VMs or physical servers. To do so, you configure a range of Reserved IP addresses + by specifying the CIDR when a guest network is in Implemented state. If your customers wish to + have non-&PRODUCT; controlled VMs or physical servers on the same network, they can share a part + of the IP address space that is primarily provided to the guest network. + In an Advanced zone, an IP address range or a CIDR is assigned to a network when the network + is defined. The &PRODUCT; virtual router acts as the DHCP server and uses CIDR for assigning IP + addresses to the guest VMs. If you decide to reserve IP ranges for non-&PRODUCT; purposes, you + can specify a part of the IP address range or the CIDR that should only be allocated by the DHCP + service of the virtual router to the guest VMs created in &PRODUCT;. The remaining IPs in that + network are called Reserved IP Range. When IP reservation is configured, the administrator can + add additional VMs or physical servers that are not part of &PRODUCT; to the same network and + assign them the Reserved IP addresses. &PRODUCT; guest VMs cannot acquire IPs from the Reserved + IP Range. +
+ IP Reservation Considerations + Consider the following before you reserve an IP range for non-&PRODUCT; machines: + + + IP Reservation can be applied only when the network is in Implemented state. + + + No IP Reservation is done by default. + + + Guest VM CIDR you specify must be a subset of the network CIDR. + + + Specify a valid Guest VM CIDR. IP Reservation is applied only if no active IPs exist + outside the Guest VM CIDR. + You cannot apply IP Reservation if any VM is alloted with an IP address that is + outside the Guest VM CIDR. + + + To reset an existing IP Reservation, apply IP reservation by specifying the value of + network CIDR in the CIDR field. + For example, the following table describes three scenarios of guest network + creation: + + + + + + + + + + Case + CIDR + Network CIDR + Reserved IP Range for Non-&PRODUCT; VMs + Description + + + + + 1 + 10.1.1.0/24 + None + None + No IP Reservation. + + + 2 + 10.1.1.0/26 + 10.1.1.0/24 + 10.1.1.64 to 10.1.1.254 + IP Reservation configured by the UpdateNetwork API with + guestvmcidr=10.1.1.0/26 or enter 10.1.1.0/26 in the CIDR field in the + UI. + + + 3 + 10.1.1.0/24 + None + None + Removing IP Reservation by the UpdateNetwork API with + guestvmcidr=10.1.1.0/24 or enter 10.1.1.0/24 in the CIDR field in the UI. + + + + + + + +
+
+ Limitations + + + The IP Reservation is not supported if active IPs that are found outside the Guest VM + CIDR. + + + Upgrading network offering which causes a change in CIDR (such as upgrading an + offering with no external devices to one with external devices) IP Reservation becomes + void if any. Reconfigure IP Reservation in the new re-implemeted network. + + +
+
+ Best Practices + Apply IP Reservation to the guest network as soon as the network state changes to + Implemented. If you apply reservation soon after the first guest VM is deployed, lesser + conflicts occurs while applying reservation. +
+
+ Reserving an IP Range + + + Log in to the &PRODUCT; UI as an administrator or end user. + + + In the left navigation, choose Network. + + + Click the name of the network you want to modify. + + + In the Details tab, click Edit. + + + + + edit-icon.png: button to edit a network + + + The CIDR field changes to editable one. + + + In CIDR, specify the Guest VM CIDR. + + + Click Apply. + Wait for the update to complete. The Network CIDR and the Reserved IP Range are + displayed on the Details page. + + +
+
diff --git a/docs/en-US/reset-ssh-key-dev.xml b/docs/en-US/reset-ssh-key-dev.xml new file mode 100644 index 00000000000..1a904e566ef --- /dev/null +++ b/docs/en-US/reset-ssh-key-dev.xml @@ -0,0 +1,27 @@ + + +%BOOK_ENTITIES; +]> + +
+ Resetting SSH Keys to Access VMs + Use the resetSSHKeyForVirtualMachine API to set or reset the SSH keypair assigned to a + virtual machine. With the addition of this feature, a lost or compromised SSH keypair can be + changed, and the user can access the VM by using the new keypair. Just create or register a new + keypair, then call resetSSHKeyForVirtualMachine. +
diff --git a/docs/en-US/resizing-volumes.xml b/docs/en-US/resizing-volumes.xml index 471411df5fe..42b584bf6c6 100644 --- a/docs/en-US/resizing-volumes.xml +++ b/docs/en-US/resizing-volumes.xml @@ -11,9 +11,7 @@ to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -21,18 +19,80 @@ specific language governing permissions and limitations under the License. --> -
- Resizing Volumes - &PRODUCT; does not provide the ability to resize root disks or data disks; the disk size is fixed based on the template used to create the VM. However, the tool VHD Resizer), while not officially supported by Cloud.com or Citrix, might provide a workaround. To increase disk size with VHD Resizer: - - Get the VHD from the secondary storage. - Import it into VHD Resizer. - Resize the VHD. - Upload the new VHD. - Create a new VM. - Take a snapshot, then create a new template from that snapshot. - For more information, see How to Resize a Provisioning Server 5 Virtual Disk at the Citrix Knowledge Center - + Resizing Volumes + &PRODUCT; provides the ability to resize data disks; &PRODUCT; controls volume size by using + disk offerings. This provides &PRODUCT; administrators with the flexibility to choose how much + space they want to make available to the end users. Volumes within the disk offerings with the + same storage tag can be resized. For example, if you only want to offer 10, 50, and 100 GB + offerings, the allowed resize should stay within those limits. That implies if you define a 10 + GB, a 50 GB and a 100 GB disk offerings, a user can upgrade from 10 GB to 50 GB, or 50 GB to 100 + GB. If you create a custom-sized disk offering, then you have the option to resize the volume by + specifying a new, larger size. + Additionally, using the resizeVolume API, a data volume can be moved from a static disk + offering to a custom disk offering with the size specified. This functionality allows those who + might be billing by certain volume sizes or disk offerings to stick to that model, while + providing the flexibility to migrate to whatever custom size necessary. + This feature is supported on KVM, XenServer, and VMware hosts. However, shrinking volumes is + not supported on VMware hosts. + Before you try to resize a volume, consider the following: + + + The VMs associated with the volume are stopped. + + + The data disks associated with the volume are removed. + + + When a volume is shrunk, the disk associated with it is simply truncated, and doing so + would put its content at risk of data loss. Therefore, resize any partitions or file systems + before you shrink a data disk so that all the data is moved off from that disk. + + + To resize a volume: + + + Log in to the &PRODUCT; UI as a user or admin. + + + In the left navigation bar, click Storage. + + + In Select View, choose Volumes. + + + Select the volume name in the Volumes list, then click the Resize Volume button + + + + + resize-volume-icon.png: button to display the resize volume option. + + + + + In the Resize Volume pop-up, choose desired characteristics for the storage. + + + + + + resize-volume.png: option to resize a volume. + + + + + If you select Custom Disk, specify a custom size. + + + Click Shrink OK to confirm that you are reducing the size of a volume. + This parameter protects against inadvertent shrinking of a disk, which might lead to + the risk of data loss. You must sign off that you know what you are doing. + + + + + Click OK. + +
- diff --git a/docs/en-US/shared-networks.xml b/docs/en-US/shared-networks.xml index d5a7ede9bdb..4c323208135 100644 --- a/docs/en-US/shared-networks.xml +++ b/docs/en-US/shared-networks.xml @@ -25,8 +25,7 @@ Shared Networks A shared network can be accessed by virtual machines that belong to many different accounts. Network Isolation on shared networks is accomplished using techniques such as - security groups (supported only in basic zones in &PRODUCT; 3.0.3 and later - versions). + security groups (supported only in basic zones). Shared Networks are created by the administrator Shared Networks can be designated to a certain domain diff --git a/docs/en-US/site-to-site-vpn.xml b/docs/en-US/site-to-site-vpn.xml index 6570aabe0bd..a5899eac4f1 100644 --- a/docs/en-US/site-to-site-vpn.xml +++ b/docs/en-US/site-to-site-vpn.xml @@ -55,6 +55,9 @@ Create VPN connection from the VPC VPN gateway to the customer VPN gateway.
+ Appropriate events are generated on the &PRODUCT; UI when status of a Site-to-Site VPN + connection changes from connected to disconnected, or vice versa. Currently no events are generated + when establishing a VPN connection fails or pending. diff --git a/docs/en-US/source-prereqs.xml b/docs/en-US/source-prereqs.xml index 6c2bc2a3cb8..2e40a58c59a 100644 --- a/docs/en-US/source-prereqs.xml +++ b/docs/en-US/source-prereqs.xml @@ -30,12 +30,15 @@ for package management. - The minimum bootstrapped prerequisites for building &PRODUCT; includes - the following: + You will need, at a minimum, the following to compile &PRODUCT;: - ant - maven (version 3) - Java (Java 6/OpenJDK 1.6) + Maven (version 3) + Java (OpenJDK 1.6 or Java 7/OpenJDK 1.7) + Apache Web Services Common Utilities (ws-commons-util) + MySQL + MySQLdb (provides Python database API) + Tomcat 6 (not 6.0.35) + genisoimage rpmbuild or dpkg-dev diff --git a/docs/en-US/source.xml b/docs/en-US/source.xml index 3cb4af2321f..5d911c23050 100644 --- a/docs/en-US/source.xml +++ b/docs/en-US/source.xml @@ -24,20 +24,10 @@ Building from Source - The official &PRODUCT; release is always in source code form. While there may - exist convenience binaries in various forms from a number of places, the - source is the canonical release will be source. In this document we'll cover - acquiring the source release, building that into binary, deployable packages. - - - While building and deploying directly from source is certainly possible, the reality - of Infrastructure-as-a-Service cloud computing implies a need to deploy packages on - a potentially large number of systems, which RPMs and DEBs fill nicely. - - - Building and deploying directly from source is thus outside the scope of this - document, but is documented in the INSTALL.md file in the release. - + The official &PRODUCT; release is always in source code form. You will likely be able to find "convenience binaries," the source is the canonical release. In this section, we'll cover acquiring the source release and building that so that you can deploy it using Maven or create Debian packages or RPMs. + Note that building and deploying directly from source is typically not the most efficient way to deploy an IaaS. However, we will cover that method as well as building RPMs or Debian packages for deploying &PRODUCT;. + The instructions here are likely version-specific. That is, the method for building from source for the 4.0.x series is different from the 4.1.x series. + If you are working with a unreleased version of &PRODUCT;, see the INSTALL.md file in the top-level directory of the release. diff --git a/docs/en-US/tools.xml b/docs/en-US/tools.xml index 66fe894e171..db6a510d593 100644 --- a/docs/en-US/tools.xml +++ b/docs/en-US/tools.xml @@ -26,4 +26,5 @@ Tools + diff --git a/docs/en-US/upload-existing-volume-to-vm.xml b/docs/en-US/upload-existing-volume-to-vm.xml index 86dc8e54758..d2b657164c8 100644 --- a/docs/en-US/upload-existing-volume-to-vm.xml +++ b/docs/en-US/upload-existing-volume-to-vm.xml @@ -29,7 +29,7 @@ You cannot upload a volume if the preconfigured volume limit has already been reached. The default limit for the cloud is set in the global configuration parameter max.account.volumes, but administrators can also set per-domain limits that are different from the global default. See Setting Usage Limits To upload a volume: - (Optional) Create an MD5 hash (checksum) of the disk image file that you are going to upload. After uploading the data disk, &PRODUCT; will use this value to verify that no data corruption has occurred. + (Optional) Create an MD5 hash (checksum) of the disk image file that you are going to upload. After uploading the data disk, &PRODUCT; will use this value to verify that no data corruption has occurred. Log in to the &PRODUCT; UI as an administrator or user In the left navigation bar, click Storage. Click Upload Volume. @@ -68,9 +68,9 @@
URL. The secure HTTP or HTTPS URL that &PRODUCT; can use to access your disk. The type of file at the URL must match the value chosen in Format. For example, if Format is VHD, the URL might look like the following: http://yourFileServerIP/userdata/myDataDisk.vhd - MD5 checksum. (Optional) Use the hash that you created in step 1. + MD5 checksum. (Optional) Use the hash that you created in step .
- Wait until the status of the volume shows that the upload is complete. Click Instances - Volumes, find the name you specified in step 5, and make sure the status is Uploaded. + Wait until the status of the volume shows that the upload is complete. Click Instances - Volumes, find the name you specified in step , and make sure the status is Uploaded.
diff --git a/docs/en-US/using-vpn-with-mac.xml b/docs/en-US/using-vpn-with-mac.xml index 718ebc777a9..a41dcab5e02 100644 --- a/docs/en-US/using-vpn-with-mac.xml +++ b/docs/en-US/using-vpn-with-mac.xml @@ -24,5 +24,18 @@
Using VPN with Mac OS X - In Mac OS X, in Network Preferences - Advanced, make sure Send all traffic over VPN connection is not checked. + First, be sure you've configured the VPN settings in your &PRODUCT; install. This section is only concerned with connecting via Mac OS X to your VPN. + Note, these instructions were written on Mac OS X 10.7.5. They may differ slightly in older or newer releases of Mac OS X. + + On your Mac, open System Preferences and click Network. + Make sure Send all traffic over VPN connection is not checked. + If your preferences are locked, you'll need to click the lock in the bottom left-hand corner to make any changes and provide your administrator credentials. + You will need to create a new network entry. Click the plus icon on the bottom left-hand side and you'll see a dialog that says "Select the interface and enter a name for the new service." Select VPN from the Interface drop-down menu, and "L2TP over IPSec" for the VPN Type. Enter whatever you like within the "Service Name" field. + You'll now have a new network interface with the name of whatever you put in the "Service Name" field. For the purposes of this example, we'll assume you've named it "CloudStack." Click on that interface and provide the IP address of the interface for your VPN under the Server Address field, and the user name for your VPN under Account Name. + Click Authentication Settings, and add the user's password under User Authentication and enter the pre-shared IPSec key in the Shared Secret field under Machine Authentication. Click OK. + You may also want to click the "Show VPN status in menu bar" but that's entirely optional. + Now click "Connect" and you will be connected to the CloudStack VPN. + + +
diff --git a/docs/en-US/using-vpn-with-windows.xml b/docs/en-US/using-vpn-with-windows.xml index e0e15692d35..c5d95ddd3e0 100644 --- a/docs/en-US/using-vpn-with-windows.xml +++ b/docs/en-US/using-vpn-with-windows.xml @@ -26,17 +26,17 @@ Using VPN with Windows The procedure to use VPN varies by Windows version. Generally, the user must edit the VPN properties and make sure that the default route is not the VPN. The following steps are for Windows L2TP clients on Windows Vista. The commands should be similar for other Windows versions. - Log in to the &PRODUCT; UI and click on the source NAT IP for the account. The VPN tab should display the IPsec preshared key. Make a note of this and the source NAT IP. The UI also lists one or more users and their passwords. Choose one of these users, or, if none exists, add a user and password. + Log in to the &PRODUCT; UI and click on the source NAT IP for the account. The VPN tab should display the IPsec preshared key. Make a note of this and the source NAT IP. The UI also lists one or more users and their passwords. Choose one of these users, or, if none exists, add a user and password. On the Windows box, go to Control Panel, then select Network and Sharing center. Click Setup a connection or network. In the next dialog, select No, create a new connection. In the next dialog, select Use my Internet Connection (VPN). - In the next dialog, enter the source NAT IP from step 1 and give the connection a name. Check Don't connect now. - In the next dialog, enter the user name and password selected in step 1. + In the next dialog, enter the source NAT IP from step and give the connection a name. Check Don't connect now. + In the next dialog, enter the user name and password selected in step . Click Create. Go back to the Control Panel and click Network Connections to see the new connection. The connection is not active yet. Right-click the new connection and select Properties. In the Properties dialog, select the Networking tab. - In Type of VPN, choose L2TP IPsec VPN, then click IPsec settings. Select Use preshared key. Enter the preshared key from Step 1. + In Type of VPN, choose L2TP IPsec VPN, then click IPsec settings. Select Use preshared key. Enter the preshared key from step . The connection is ready for activation. Go back to Control Panel -> Network Connections and double-click the created connection. - Enter the user name and password from Step 1. + Enter the user name and password from step .
diff --git a/docs/en-US/verifying-source.xml b/docs/en-US/verifying-source.xml index f8bd102379d..b20b9bbacf9 100644 --- a/docs/en-US/verifying-source.xml +++ b/docs/en-US/verifying-source.xml @@ -32,11 +32,11 @@ Getting the KEYS To enable you to verify the GPG signature, you will need to download the - KEYS + KEYS file. - You next need to import those keys, which you can do by running the following command: + You next need to import those keys, which you can do by running: # gpg --import KEYS diff --git a/docs/en-US/vlan-provisioning.xml b/docs/en-US/vlan-provisioning.xml index 8abd5da0a75..9345647d47a 100644 --- a/docs/en-US/vlan-provisioning.xml +++ b/docs/en-US/vlan-provisioning.xml @@ -21,10 +21,23 @@ specific language governing permissions and limitations under the License. --> -
- VLAN Provisioning - &PRODUCT; automatically creates and destroys interfaces bridged to VLANs on the hosts. In general the administrator does not need to manage this process. - &PRODUCT; manages VLANs differently based on hypervisor type. For XenServer or KVM, the VLANs are created on only the hosts where they will be used and then they are destroyed when all guests that require them have been terminated or moved to another host. - For vSphere the VLANs are provisioned on all hosts in the cluster even if there is no guest running on a particular Host that requires the VLAN. This allows the administrator to perform live migration and other functions in vCenter without having to create the VLAN on the destination Host. Additionally, the VLANs are not removed from the Hosts when they are no longer needed. + VLAN Provisioning + &PRODUCT; automatically creates and destroys interfaces bridged to VLANs on the hosts. In + general the administrator does not need to manage this process. + &PRODUCT; manages VLANs differently based on hypervisor type. For XenServer or KVM, the + VLANs are created on only the hosts where they will be used and then they are destroyed when all + guests that require them have been terminated or moved to another host. + For vSphere the VLANs are provisioned on all hosts in the cluster even if there is no guest + running on a particular Host that requires the VLAN. This allows the administrator to perform + live migration and other functions in vCenter without having to create the VLAN on the + destination Host. Additionally, the VLANs are not removed from the Hosts when they are no longer + needed. + You can use the same VLANs on different physical networks provided that each physical + network has its own underlying layer-2 infrastructure, such as switches. For example, you can + specify VLAN range 500 to 1000 while deploying physical networks A and B in an Advanced zone + setup. This capability allows you to set up an additional layer-2 physical infrastructure on a + different physical NIC and use the same set of VLANs if you run out of VLANs. Another advantage + is that you can use the same set of IPs for different customers, each one with their own routers + and the guest networks on different physical NICs.
diff --git a/docs/en-US/vmx-settings-dev.xml b/docs/en-US/vmx-settings-dev.xml new file mode 100644 index 00000000000..a0fdf7f7825 --- /dev/null +++ b/docs/en-US/vmx-settings-dev.xml @@ -0,0 +1,40 @@ + + +%BOOK_ENTITIES; +]> + +
+ Additional VMX Settings + A VMX (.vmx) file is the primary configuration file for a virtual machine. When a new VM is + created, information on the operating system, disk sizes, and networking is stored in this file. + The VM actively writes to its .vmx file for all the configuration changes. The VMX file is + typically located in the directory where the VM is created. In Windows Vista / Windows 7 / + Windows Server 2008, the default location is C:\Users\<your_user_name>\My + Documents\Virtual Machines\<virtual_machine_name>.vmx. In Linux, vmware-cmd -l lists the + full path to all the registered VMX files. Any manual additions to the .vmx file from ESX/ESXi + are overwritten by the entries stored in the vCenter Server database. Therefore, before you edit + a .vmx file, first remove the VM from the vCenter server's inventory and register the VM again + after editing. + The CloudStack API that supports passing some of the VMX settings is registerTemplate. The + supported parameters are rootDiskController, nicAdapter, and keyboard. In addition to these + existing VMX parameters, you can now use the keyboard.typematicMinDelay parameter in the + registerTemplate API call. This parameter controls the amount of delay for the repeated key + strokes on remote consoles. For more information on keyboard.typematicMinDelay, see keyboard.typematicMinDelay. +
diff --git a/docs/en-US/whats-new.xml b/docs/en-US/whats-new.xml index 77b3ec3df22..761d7a2eb37 100644 --- a/docs/en-US/whats-new.xml +++ b/docs/en-US/whats-new.xml @@ -3,40 +3,46 @@ %BOOK_ENTITIES; ]> - - - What's New in the API? - The following describes any new major features of each &PRODUCT; version as it applies to API usage. -
- What's New in the API for 4.0 - - -
-
- What's New in the API for 3.0 - - - - - - -
+ What's New in the API? + The following describes any new major features of each &PRODUCT; version as it applies to + API usage. +
+ What's New in the API for 4.1 + + + + + +
+
+ What's New in the API for 4.0 + + +
+
+ What's New in the API for 3.0 + + + + + + +
diff --git a/docs/en-US/working-with-snapshots.xml b/docs/en-US/working-with-snapshots.xml index a381707e8f0..b984439203c 100644 --- a/docs/en-US/working-with-snapshots.xml +++ b/docs/en-US/working-with-snapshots.xml @@ -29,4 +29,8 @@ Users can create snapshots manually or by setting up automatic recurring snapshot policies. Users can also create disk volumes from snapshots, which may be attached to a VM like any other disk volume. Snapshots of both root disks and data disks are supported. However, &PRODUCT; does not currently support booting a VM from a recovered root disk. A disk recovered from snapshot of a root disk is treated as a regular data disk; the data on recovered disk can be accessed by attaching the disk to a VM. A completed snapshot is copied from primary storage to secondary storage, where it is stored until deleted or purged by newer snapshot. + + + + diff --git a/docs/en-US/working-with-system-vm.xml b/docs/en-US/working-with-system-vm.xml index 97459f947bf..70f7dd1aa4e 100644 --- a/docs/en-US/working-with-system-vm.xml +++ b/docs/en-US/working-with-system-vm.xml @@ -1,33 +1,39 @@ - %BOOK_ENTITIES; ]> - - - Working with System Virtual Machines - &PRODUCT; uses several types of system virtual machines to perform tasks in the cloud. In general &PRODUCT; manages these system VMs and creates, starts, and stops them as needed based on scale and immediate needs. However, the administrator should be aware of them and their roles to assist in debugging issues. - - - - - + Working with System Virtual Machines + &PRODUCT; uses several types of system virtual machines to perform tasks in the cloud. In + general &PRODUCT; manages these system VMs and creates, starts, and stops them as needed based + on scale and immediate needs. However, the administrator should be aware of them and their roles + to assist in debugging issues. + + You can configure the system.vm.random.password parameter to create a random system VM + password to ensure higher security. If you reset the value for system.vm.random.password to + true and restart the Management Server, a random password is generated and stored encrypted in + the database. You can view the decrypted password under the system.vm.password global + parameter on the &PRODUCT; UI or by calling the listConfigurations API. + + + + + + diff --git a/docs/en-US/working-with-usage-data.xml b/docs/en-US/working-with-usage-data.xml index 56a929fd6c1..5324617ab23 100644 --- a/docs/en-US/working-with-usage-data.xml +++ b/docs/en-US/working-with-usage-data.xml @@ -31,4 +31,5 @@ + diff --git a/docs/en-US/zone-add.xml b/docs/en-US/zone-add.xml index 2a85bb0b432..4f6606fce03 100644 --- a/docs/en-US/zone-add.xml +++ b/docs/en-US/zone-add.xml @@ -63,7 +63,7 @@ Basic. For AWS-style networking. Provides a single network where each VM instance is assigned an IP directly from the network. Guest isolation can be provided through layer-3 means such as security groups (IP address source filtering). Advanced. For more sophisticated network topologies. This network model provides the most flexibility in defining guest networks and providing custom network offerings such as firewall, VPN, or load balancer support. - For more information about the network types, see Network Setup. + For more information about the network types, see . The rest of the steps differ depending on whether you chose Basic or Advanced. Continue with the steps that apply to you: diff --git a/docs/pot/build-deb.pot b/docs/pot/build-deb.pot index ca2bb9f54c0..995b086af5b 100644 --- a/docs/pot/build-deb.pot +++ b/docs/pot/build-deb.pot @@ -62,7 +62,7 @@ msgstr "" #. Tag: screen #, no-c-format msgid "\n" -"$ dpkg-buildpackge -uc -us\n" +"$ dpkg-buildpackage -uc -us\n" "" msgstr "" diff --git a/docs/pot/verifying-source.pot b/docs/pot/verifying-source.pot index 9c1effe6bf5..9b2d586aacf 100644 --- a/docs/pot/verifying-source.pot +++ b/docs/pot/verifying-source.pot @@ -40,7 +40,7 @@ msgstr "" #. Tag: para #, no-c-format -msgid "To enable you to verify the GPG signature, you will need to download the KEYS file." +msgid "To enable you to verify the GPG signature, you will need to download the KEYS file." msgstr "" #. Tag: para diff --git a/docs/publican-cloudstack/defaults.cfg b/docs/publican-cloudstack/defaults.cfg index 6aebaee5a83..9e27bdd309d 100644 --- a/docs/publican-cloudstack/defaults.cfg +++ b/docs/publican-cloudstack/defaults.cfg @@ -16,6 +16,6 @@ # specific language governing permissions and limitations # under the License. -doc_url: "http://docs.cloudstack.org" +doc_url: "http://incubator.apache.org/cloudstack/docs" prod_url: "http://cloudstack.org" diff --git a/docs/publican-cloudstack/en-US/Legal_Notice.xml b/docs/publican-cloudstack/en-US/Legal_Notice.xml index 0e4be5bf56a..5e30efb07c0 100644 --- a/docs/publican-cloudstack/en-US/Legal_Notice.xml +++ b/docs/publican-cloudstack/en-US/Legal_Notice.xml @@ -53,6 +53,15 @@ completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF. + + + CloudStack® is a registered trademark of the Apache Software Foundation. + + + + Apache CloudStack, the CloudStack word design, the Apache CloudStack word design, and the cloud monkey logo are trademarks of the + Apache Software Foundation. + diff --git a/docs/publican-cloudstack/en-US/images/title_logo.svg b/docs/publican-cloudstack/en-US/images/title_logo.svg index a5888de4cf4..1d2913bf50c 100644 --- a/docs/publican-cloudstack/en-US/images/title_logo.svg +++ b/docs/publican-cloudstack/en-US/images/title_logo.svg @@ -1,5 +1,6 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/engine/storage/integration-test/test/resource/component.xml b/engine/storage/integration-test/test/resource/component.xml new file mode 100644 index 00000000000..0368ad41425 --- /dev/null +++ b/engine/storage/integration-test/test/resource/component.xml @@ -0,0 +1,201 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/engine/storage/integration-test/test/resource/storageContext.xml b/engine/storage/integration-test/test/resource/storageContext.xml index 0127c96a734..7c5382d49f9 100644 --- a/engine/storage/integration-test/test/resource/storageContext.xml +++ b/engine/storage/integration-test/test/resource/storageContext.xml @@ -23,10 +23,8 @@ - - - + @@ -34,49 +32,12 @@ - - - - - - + - - - - - - - - - org.apache.cloudstack.framework - - + + + - - - - - - - - - - - - - - - - - diff --git a/engine/storage/pom.xml b/engine/storage/pom.xml index e8a2eb75193..270fe47c743 100644 --- a/engine/storage/pom.xml +++ b/engine/storage/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/engine/storage/snapshot/pom.xml b/engine/storage/snapshot/pom.xml index 45439c4726a..211cdac574e 100644 --- a/engine/storage/snapshot/pom.xml +++ b/engine/storage/snapshot/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml @@ -44,7 +44,11 @@ install - src - test + ${project.basedir}/test + + + ${project.basedir}/test/resource + + diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java index 487e2d53eff..fa7772a979d 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java @@ -20,28 +20,65 @@ package org.apache.cloudstack.storage.snapshot; import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.storage.datastore.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.snapshot.db.SnapshotDao2; -import org.apache.cloudstack.storage.snapshot.db.SnapshotVO; import org.springframework.stereotype.Component; +import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.utils.exception.CloudRuntimeException; + @Component public class SnapshotDataFactoryImpl implements SnapshotDataFactory { @Inject - SnapshotDao2 snapshotDao; + SnapshotDao snapshotDao; @Inject ObjectInDataStoreManager objMap; @Inject DataStoreManager storeMgr; + @Inject + VolumeDataFactory volumeFactory; @Override public SnapshotInfo getSnapshot(long snapshotId, DataStore store) { - SnapshotVO snapshot = snapshotDao.findById(snapshotId); - ObjectInDataStoreVO obj = objMap.findObject(snapshotId, DataObjectType.SNAPSHOT, store.getId(), store.getRole()); - SnapshotObject so = new SnapshotObject(snapshot, store); + SnapshotVO snapshot = snapshotDao.findByIdIncludingRemoved(snapshotId); + DataObjectInStore obj = objMap.findObject(snapshot.getUuid(), DataObjectType.SNAPSHOT, store.getUuid(), store.getRole()); + if (obj == null) { + return null; + } + SnapshotObject so = SnapshotObject.getSnapshotObject(snapshot, store); + return so; + } + @Override + public SnapshotInfo getSnapshot(long snapshotId) { + SnapshotVO snapshot = snapshotDao.findByIdIncludingRemoved(snapshotId); + SnapshotObject so = null; + if (snapshot.getState() == Snapshot.State.BackedUp) { + DataStore store = objMap.findStore(snapshot.getUuid(), DataObjectType.SNAPSHOT, DataStoreRole.Image); + so = SnapshotObject.getSnapshotObject(snapshot, store); + } else { + VolumeInfo volume = this.volumeFactory.getVolume(snapshot.getVolumeId()); + so = SnapshotObject.getSnapshotObject(snapshot, volume.getDataStore()); + } + return so; + } + + @Override + public SnapshotInfo getSnapshot(DataObject obj, DataStore store) { + SnapshotVO snapshot = snapshotDao.findByIdIncludingRemoved(obj.getId()); + if (snapshot == null) { + throw new CloudRuntimeException("Can't find snapshot: " + obj.getId()); + } + SnapshotObject so = SnapshotObject.getSnapshotObject(snapshot, store); return so; } } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java index 6ce17973375..d10dc778092 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java @@ -18,19 +18,54 @@ */ package org.apache.cloudstack.storage.snapshot; +import java.util.Date; + +import javax.inject.Inject; + import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; -import org.apache.cloudstack.storage.snapshot.db.SnapshotVO; +import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; +import org.apache.log4j.Logger; + +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; public class SnapshotObject implements SnapshotInfo { + private static final Logger s_logger = Logger.getLogger(SnapshotObject.class); private SnapshotVO snapshot; private DataStore store; - - public SnapshotObject(SnapshotVO snapshot, DataStore store) { - this.snapshot = snapshot; - this.store = store; + @Inject + protected SnapshotDao snapshotDao; + @Inject + protected VolumeDao volumeDao; + @Inject protected VolumeDataFactory volFactory; + @Inject protected SnapshotStateMachineManager stateMachineMgr; + @Inject + ObjectInDataStoreManager ojbectInStoreMgr; + public SnapshotObject() { + + } + + protected void configure(SnapshotVO snapshot, DataStore store) { + this.snapshot = snapshot; + this.store = store; + } + + public static SnapshotObject getSnapshotObject(SnapshotVO snapshot, DataStore store) { + SnapshotObject snapObj = ComponentContext.inject(SnapshotObject.class); + snapObj.configure(snapshot, store); + return snapObj; } public DataStore getStore() { @@ -51,50 +86,138 @@ public class SnapshotObject implements SnapshotInfo { @Override public VolumeInfo getBaseVolume() { - // TODO Auto-generated method stub - return null; + return volFactory.getVolume(this.snapshot.getVolumeId()); } @Override public long getId() { - // TODO Auto-generated method stub - return 0; + return this.snapshot.getId(); } @Override public String getUri() { - // TODO Auto-generated method stub - return null; + return this.snapshot.getUuid(); } @Override public DataStore getDataStore() { - // TODO Auto-generated method stub - return null; + return this.store; } @Override public Long getSize() { - // TODO Auto-generated method stub - return 0L; + return this.getSize(); } @Override public DataObjectType getType() { - // TODO Auto-generated method stub - return null; + return DataObjectType.SNAPSHOT; } @Override public DiskFormat getFormat() { - // TODO Auto-generated method stub return null; } @Override public String getUuid() { - // TODO Auto-generated method stub - return null; + return this.snapshot.getUuid(); } + @Override + public void processEvent( + ObjectInDataStoreStateMachine.Event event) { + try { + ojbectInStoreMgr.update(this, event); + } catch (Exception e) { + s_logger.debug("Failed to update state:" + e.toString()); + throw new CloudRuntimeException("Failed to update state: " + e.toString()); + } + } + + @Override + public long getAccountId() { + return this.snapshot.getAccountId(); + } + + @Override + public long getVolumeId() { + return this.snapshot.getVolumeId(); + } + + @Override + public String getPath() { + return this.snapshot.getPath(); + } + + public void setPath(String path) { + this.snapshot.setPath(path); + } + + @Override + public String getName() { + return this.snapshot.getName(); + } + + @Override + public Date getCreated() { + return this.snapshot.getCreated(); + } + + @Override + public Type getRecurringType() { + return this.snapshot.getRecurringType(); + } + + @Override + public State getState() { + return this.snapshot.getState(); + } + + @Override + public HypervisorType getHypervisorType() { + return this.snapshot.getHypervisorType(); + } + + @Override + public boolean isRecursive() { + return this.snapshot.isRecursive(); + } + + @Override + public short getsnapshotType() { + return this.snapshot.getsnapshotType(); + } + + @Override + public long getDomainId() { + return this.snapshot.getDomainId(); + } + + public void setPrevSnapshotId(Long id) { + this.snapshot.setPrevSnapshotId(id); + } + + @Override + public Long getDataCenterId() { + return this.snapshot.getDataCenterId(); + } + + public void processEvent(Snapshot.Event event) + throws NoTransitionException { + stateMachineMgr.processEvent(this.snapshot, event); + } + + @Override + public Long getPrevSnapshotId() { + return this.snapshot.getPrevSnapshotId(); + } + + public void setBackupSnapshotId(String id) { + this.snapshot.setBackupSnapshotId(id); + } + + public String getBackupSnapshotId() { + return this.snapshot.getBackupSnapshotId(); + } } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java index 80b1918665d..1b64fd0cae3 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java @@ -17,10 +17,15 @@ package org.apache.cloudstack.storage.snapshot; import org.apache.cloudstack.engine.cloud.entity.api.SnapshotEntity; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.springframework.stereotype.Component; @Component public class SnapshotServiceImpl implements SnapshotService { + + public SnapshotServiceImpl() { + + } @Override public SnapshotEntity getSnapshotEntity(long snapshotId) { @@ -45,5 +50,7 @@ public class SnapshotServiceImpl implements SnapshotService { // TODO Auto-generated method stub return false; } + + } diff --git a/server/src/com/cloud/baremetal/HttpCallException.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManager.java similarity index 72% rename from server/src/com/cloud/baremetal/HttpCallException.java rename to engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManager.java index d21a37cbb74..c6057704cd8 100644 --- a/server/src/com/cloud/baremetal/HttpCallException.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManager.java @@ -14,15 +14,13 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package com.cloud.baremetal; -import com.cloud.utils.SerialVersionUID; +package org.apache.cloudstack.storage.snapshot; -import com.cloud.exception.CloudException; +import com.cloud.storage.Snapshot.Event; +import com.cloud.storage.SnapshotVO; +import com.cloud.utils.fsm.NoTransitionException; -public class HttpCallException extends CloudException { - private static final long serialVersionUID= SerialVersionUID.HttpCallException; - public HttpCallException(String msg) { - super(msg); - } +public interface SnapshotStateMachineManager { + public void processEvent(SnapshotVO snapshot, Event event) throws NoTransitionException; } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java new file mode 100644 index 00000000000..aa1cf684d7a --- /dev/null +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStateMachineManagerImpl.java @@ -0,0 +1,54 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.snapshot; + +import javax.inject.Inject; + +import org.springframework.stereotype.Component; + +import com.cloud.storage.Snapshot; +import com.cloud.storage.Snapshot.Event; +import com.cloud.storage.Snapshot.State; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.listener.SnapshotStateListener; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.utils.fsm.StateMachine2; + +@Component +public class SnapshotStateMachineManagerImpl implements +SnapshotStateMachineManager { + private StateMachine2 stateMachine = new StateMachine2(); + @Inject + protected SnapshotDao snapshotDao; + public SnapshotStateMachineManagerImpl() { + stateMachine.addTransition(Snapshot.State.Allocated, Event.CreateRequested, Snapshot.State.Creating); + stateMachine.addTransition(Snapshot.State.Creating, Event.OperationSucceeded, Snapshot.State.CreatedOnPrimary); + stateMachine.addTransition(Snapshot.State.Creating, Event.OperationNotPerformed, Snapshot.State.BackedUp); + stateMachine.addTransition(Snapshot.State.Creating, Event.OperationFailed, Snapshot.State.Error); + stateMachine.addTransition(Snapshot.State.CreatedOnPrimary, Event.BackupToSecondary, Snapshot.State.BackingUp); + stateMachine.addTransition(Snapshot.State.BackingUp, Event.OperationSucceeded, Snapshot.State.BackedUp); + stateMachine.addTransition(Snapshot.State.BackingUp, Event.OperationFailed, Snapshot.State.CreatedOnPrimary); + + stateMachine.registerListener(new SnapshotStateListener()); + } + + public void processEvent(SnapshotVO snapshot, Event event) throws NoTransitionException { + stateMachine.transitTo(snapshot, event, null, snapshotDao); + } +} diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java new file mode 100644 index 00000000000..ea3b0afd09d --- /dev/null +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/AncientSnasphotStrategy.java @@ -0,0 +1,608 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.snapshot.strategy; + +import java.util.List; +import java.util.concurrent.ExecutionException; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.framework.async.AsyncRpcConext; +import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.motion.DataMotionService; +import org.apache.cloudstack.storage.snapshot.SnapshotObject; +import org.apache.cloudstack.storage.snapshot.SnapshotStateMachineManager; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.BackupSnapshotAnswer; +import com.cloud.agent.api.DeleteSnapshotBackupCommand; +import com.cloud.agent.api.to.S3TO; +import com.cloud.agent.api.to.SwiftTO; +import com.cloud.configuration.Resource.ResourceType; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.event.EventTypes; +import com.cloud.event.UsageEventUtils; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.host.HostVO; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VolumeManager; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.s3.S3Manager; +import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.storage.swift.SwiftManager; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VirtualMachine.State; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.snapshot.VMSnapshot; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; + +@Component +public class AncientSnasphotStrategy implements SnapshotStrategy { + private static final Logger s_logger = Logger.getLogger(AncientSnasphotStrategy.class); + @Inject + protected VolumeDao _volsDao; + @Inject + protected UserVmDao _vmDao; + @Inject + protected PrimaryDataStoreDao _storagePoolDao; + @Inject + protected ClusterDao _clusterDao; + @Inject + protected SnapshotDao snapshotDao; + @Inject + private ResourceManager _resourceMgr; + @Inject + protected SnapshotDao _snapshotDao; + @Inject + protected SnapshotManager snapshotMgr; + @Inject + protected VolumeManager volumeMgr; + @Inject + private ConfigurationDao _configDao; + @Inject + protected SnapshotStateMachineManager stateMachineManager; + @Inject + private VolumeDao volumeDao; + @Inject + SnapshotDataFactory snapshotfactory; + @Inject + DataStoreManager dataStoreMgr; + @Inject + DataMotionService motionSrv; + @Inject + ObjectInDataStoreManager objInStoreMgr; + @Inject + VMSnapshotDao _vmSnapshotDao; + + + @Override + public boolean canHandle(SnapshotInfo snapshot) { + return true; + } + + static private class CreateSnapshotContext extends AsyncRpcConext { + final VolumeInfo volume; + final SnapshotInfo snapshot; + final AsyncCallFuture future; + public CreateSnapshotContext(AsyncCompletionCallback callback, VolumeInfo volume, + SnapshotInfo snapshot, + AsyncCallFuture future) { + super(callback); + this.volume = volume; + this.snapshot = snapshot; + this.future = future; + } + } + + static private class DeleteSnapshotContext extends AsyncRpcConext { + final SnapshotInfo snapshot; + final AsyncCallFuture future; + public DeleteSnapshotContext(AsyncCompletionCallback callback, SnapshotInfo snapshot, + AsyncCallFuture future) { + super(callback); + this.snapshot = snapshot; + this.future = future; + } + + } + + static private class CopySnapshotContext extends AsyncRpcConext { + final SnapshotInfo srcSnapshot; + final SnapshotInfo destSnapshot; + final AsyncCallFuture future; + public CopySnapshotContext(AsyncCompletionCallback callback, + SnapshotInfo srcSnapshot, + SnapshotInfo destSnapshot, + AsyncCallFuture future) { + super(callback); + this.srcSnapshot = srcSnapshot; + this.destSnapshot = destSnapshot; + this.future = future; + } + + } + + protected Void createSnapshotAsyncCallback(AsyncCallbackDispatcher callback, + CreateSnapshotContext context) { + CreateCmdResult result = callback.getResult(); + SnapshotObject snapshot = (SnapshotObject)context.snapshot; + VolumeInfo volume = context.volume; + AsyncCallFuture future = context.future; + SnapshotResult snapResult = new SnapshotResult(snapshot); + if (result.isFailed()) { + s_logger.debug("create snapshot " + context.snapshot.getName() + " failed: " + result.getResult()); + try { + snapshot.processEvent(Snapshot.Event.OperationFailed); + } catch (NoTransitionException nte) { + s_logger.debug("Failed to update snapshot state due to " + nte.getMessage()); + } + + + snapResult.setResult(result.getResult()); + future.complete(snapResult); + return null; + } + + try { + SnapshotVO preSnapshotVO = this.snapshotMgr.getParentSnapshot(volume, snapshot); + String preSnapshotPath = null; + if (preSnapshotVO != null) { + preSnapshotPath = preSnapshotVO.getPath(); + } + SnapshotVO snapshotVO = this.snapshotDao.findById(snapshot.getId()); + // The snapshot was successfully created + if (preSnapshotPath != null && preSnapshotPath.equals(result.getPath())) { + // empty snapshot + s_logger.debug("CreateSnapshot: this is empty snapshot "); + + snapshotVO.setPath(preSnapshotPath); + snapshotVO.setBackupSnapshotId(preSnapshotVO.getBackupSnapshotId()); + snapshotVO.setSwiftId(preSnapshotVO.getSwiftId()); + snapshotVO.setPrevSnapshotId(preSnapshotVO.getId()); + snapshotVO.setSecHostId(preSnapshotVO.getSecHostId()); + snapshot.processEvent(Snapshot.Event.OperationNotPerformed); + } else { + long preSnapshotId = 0; + + if (preSnapshotVO != null && preSnapshotVO.getBackupSnapshotId() != null) { + preSnapshotId = preSnapshotVO.getId(); + int _deltaSnapshotMax = NumbersUtil.parseInt(_configDao.getValue("snapshot.delta.max"), SnapshotManager.DELTAMAX); + int deltaSnap = _deltaSnapshotMax; + + int i; + for (i = 1; i < deltaSnap; i++) { + String prevBackupUuid = preSnapshotVO.getBackupSnapshotId(); + // previous snapshot doesn't have backup, create a full snapshot + if (prevBackupUuid == null) { + preSnapshotId = 0; + break; + } + long preSSId = preSnapshotVO.getPrevSnapshotId(); + if (preSSId == 0) { + break; + } + preSnapshotVO = _snapshotDao.findByIdIncludingRemoved(preSSId); + } + if (i >= deltaSnap) { + preSnapshotId = 0; + } + } + + //If the volume is moved around, backup a full snapshot to secondary storage + if (volume.getLastPoolId() != null && !volume.getLastPoolId().equals(volume.getPoolId())) { + preSnapshotId = 0; + //TODO: fix this hack + VolumeVO volumeVO = this.volumeDao.findById(volume.getId()); + volumeVO.setLastPoolId(volume.getPoolId()); + this.volumeDao.update(volume.getId(), volumeVO); + } + + snapshot.setPath(result.getPath()); + snapshot.setPrevSnapshotId(preSnapshotId); + + snapshot.processEvent(Snapshot.Event.OperationSucceeded); + snapResult = new SnapshotResult(this.snapshotfactory.getSnapshot(snapshot.getId())); + } + } catch (Exception e) { + s_logger.debug("Failed to create snapshot: ", e); + snapResult.setResult(e.toString()); + try { + snapshot.processEvent(Snapshot.Event.OperationFailed); + } catch (NoTransitionException e1) { + s_logger.debug("Failed to change snapshot state: " + e1.toString()); + } + } + + future.complete(snapResult); + return null; + } + + class SnapshotResult extends CommandResult { + SnapshotInfo snashot; + public SnapshotResult(SnapshotInfo snapshot) { + this.snashot = snapshot; + } + } + + protected SnapshotInfo createSnapshotOnPrimary(VolumeInfo volume, Long snapshotId) { + SnapshotObject snapshot = (SnapshotObject)this.snapshotfactory.getSnapshot(snapshotId); + if (snapshot == null) { + throw new CloudRuntimeException("Can not find snapshot " + snapshotId); + } + + try { + snapshot.processEvent(Snapshot.Event.CreateRequested); + } catch (NoTransitionException nte) { + s_logger.debug("Failed to update snapshot state due to " + nte.getMessage()); + throw new CloudRuntimeException("Failed to update snapshot state due to " + nte.getMessage()); + } + + AsyncCallFuture future = new AsyncCallFuture(); + try { + CreateSnapshotContext context = new CreateSnapshotContext( + null, volume, snapshot, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher + .create(this); + caller.setCallback( + caller.getTarget().createSnapshotAsyncCallback(null, null)) + .setContext(context); + PrimaryDataStoreDriver primaryStore = (PrimaryDataStoreDriver)volume.getDataStore().getDriver(); + primaryStore.takeSnapshot(snapshot, caller); + } catch (Exception e) { + s_logger.debug("Failed to take snapshot: " + snapshot.getId(), e); + try { + snapshot.processEvent(Snapshot.Event.OperationFailed); + } catch (NoTransitionException e1) { + s_logger.debug("Failed to change state for event: OperationFailed" , e); + } + throw new CloudRuntimeException("Failed to take snapshot" + snapshot.getId()); + } + + SnapshotResult result; + + try { + result = future.get(); + if (result.isFailed()) { + s_logger.debug("Failed to create snapshot:" + result.getResult()); + throw new CloudRuntimeException(result.getResult()); + } + return result.snashot; + } catch (InterruptedException e) { + s_logger.debug("Failed to create snapshot", e); + throw new CloudRuntimeException("Failed to create snapshot", e); + } catch (ExecutionException e) { + s_logger.debug("Failed to create snapshot", e); + throw new CloudRuntimeException("Failed to create snapshot", e); + } + + } + + private boolean hostSupportSnapsthot(HostVO host) { + if (host.getHypervisorType() != HypervisorType.KVM) { + return true; + } + // Determine host capabilities + String caps = host.getCapabilities(); + + if (caps != null) { + String[] tokens = caps.split(","); + for (String token : tokens) { + if (token.contains("snapshot")) { + return true; + } + } + } + return false; + } + + protected boolean supportedByHypervisor(VolumeInfo volume) { + if (volume.getHypervisorType().equals(HypervisorType.KVM)) { + StoragePool storagePool = (StoragePool)volume.getDataStore(); + ClusterVO cluster = _clusterDao.findById(storagePool.getClusterId()); + List hosts = _resourceMgr.listAllHostsInCluster(cluster.getId()); + if (hosts != null && !hosts.isEmpty()) { + HostVO host = hosts.get(0); + if (!hostSupportSnapsthot(host)) { + throw new CloudRuntimeException("KVM Snapshot is not supported on cluster: " + host.getId()); + } + } + } + + // if volume is attached to a vm in destroyed or expunging state; disallow + if (volume.getInstanceId() != null) { + UserVmVO userVm = _vmDao.findById(volume.getInstanceId()); + if (userVm != null) { + if (userVm.getState().equals(State.Destroyed) || userVm.getState().equals(State.Expunging)) { + throw new CloudRuntimeException("Creating snapshot failed due to volume:" + volume.getId() + " is associated with vm:" + userVm.getInstanceName() + " is in " + + userVm.getState().toString() + " state"); + } + + if(userVm.getHypervisorType() == HypervisorType.VMware || userVm.getHypervisorType() == HypervisorType.KVM) { + List activeSnapshots = _snapshotDao.listByInstanceId(volume.getInstanceId(), Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp); + if(activeSnapshots.size() > 1) + throw new CloudRuntimeException("There is other active snapshot tasks on the instance to which the volume is attached, please try again later"); + } + + List activeVMSnapshots = _vmSnapshotDao.listByInstanceId(userVm.getId(), + VMSnapshot.State.Creating, VMSnapshot.State.Reverting, VMSnapshot.State.Expunging); + if (activeVMSnapshots.size() > 0) { + throw new CloudRuntimeException( + "There is other active vm snapshot tasks on the instance to which the volume is attached, please try again later"); + } + } + } + + return true; + } + + @Override + public SnapshotInfo takeSnapshot(VolumeInfo volume, Long snapshotId) { + + supportedByHypervisor(volume); + + SnapshotInfo snapshot = createSnapshotOnPrimary(volume, snapshotId); + return snapshot; + } + + @Override + public SnapshotInfo backupSnapshot(SnapshotInfo snapshot) { + SnapshotObject snapObj = (SnapshotObject)snapshot; + AsyncCallFuture future = new AsyncCallFuture(); + SnapshotResult result = new SnapshotResult(snapshot); + try { + + snapObj.processEvent(Snapshot.Event.BackupToSecondary); + + ZoneScope scope = new ZoneScope(snapshot.getDataCenterId()); + List stores = this.dataStoreMgr.getImageStores(scope); + if (stores.size() != 1) { + throw new CloudRuntimeException("find out more than one image stores"); + } + + DataStore imageStore = stores.get(0); + SnapshotInfo snapshotOnImageStore = (SnapshotInfo)imageStore.create(snapshot); + + snapshotOnImageStore.processEvent(Event.CreateOnlyRequested); + CopySnapshotContext context = new CopySnapshotContext(null, snapshot, + snapshotOnImageStore, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher + .create(this); + caller.setCallback( + caller.getTarget().copySnapshotAsyncCallback(null, null)) + .setContext(context); + this.motionSrv.copyAsync(snapshot, snapshotOnImageStore, caller); + } catch (Exception e) { + s_logger.debug("Failed to copy snapshot", e); + result.setResult("Failed to copy snapshot:" +e.toString()); + try { + snapObj.processEvent(Snapshot.Event.OperationFailed); + } catch (NoTransitionException e1) { + s_logger.debug("Failed to change state: " + e1.toString()); + } + future.complete(result); + } + + try { + SnapshotResult res = future.get(); + SnapshotInfo destSnapshot = res.snashot; + return destSnapshot; + } catch (InterruptedException e) { + s_logger.debug("failed copy snapshot", e); + throw new CloudRuntimeException("Failed to copy snapshot" , e); + } catch (ExecutionException e) { + s_logger.debug("Failed to copy snapshot", e); + throw new CloudRuntimeException("Failed to copy snapshot" , e); + } + + } + + protected Void copySnapshotAsyncCallback(AsyncCallbackDispatcher callback, + CopySnapshotContext context) { + CopyCommandResult result = callback.getResult(); + SnapshotInfo destSnapshot = context.destSnapshot; + SnapshotObject srcSnapshot = (SnapshotObject)context.srcSnapshot; + AsyncCallFuture future = context.future; + SnapshotResult snapResult = new SnapshotResult(destSnapshot); + if (result.isFailed()) { + snapResult.setResult(result.getResult()); + future.complete(snapResult); + return null; + } + + try { + BackupSnapshotAnswer answer = (BackupSnapshotAnswer)result.getAnswer(); + + DataObjectInStore dataInStore = objInStoreMgr.findObject(destSnapshot, destSnapshot.getDataStore()); + dataInStore.setInstallPath(answer.getBackupSnapshotName()); + objInStoreMgr.update(destSnapshot, Event.OperationSuccessed); + + srcSnapshot.processEvent(Snapshot.Event.OperationSucceeded); + snapResult = new SnapshotResult(this.snapshotfactory.getSnapshot(destSnapshot.getId())); + future.complete(snapResult); + } catch (Exception e) { + s_logger.debug("Failed to update snapshot state", e); + snapResult.setResult(e.toString()); + future.complete(snapResult); + } + return null; + } + + @DB + protected boolean destroySnapshotBackUp(SnapshotVO snapshot) { + DataStore store = objInStoreMgr.findStore(snapshot.getUuid(), DataObjectType.SNAPSHOT, DataStoreRole.Image); + if (store == null) { + s_logger.debug("Can't find snapshot" + snapshot.getId() + " backed up into image store"); + return false; + } + + try { + SnapshotInfo snapshotInfo = this.snapshotfactory.getSnapshot(snapshot.getId(), store); + snapshotInfo.processEvent(ObjectInDataStoreStateMachine.Event.DestroyRequested); + + AsyncCallFuture future = new AsyncCallFuture(); + DeleteSnapshotContext context = new DeleteSnapshotContext(null, + snapshotInfo, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher + .create(this); + caller.setCallback( + caller.getTarget().deleteSnapshotCallback(null, null)) + .setContext(context); + + store.getDriver().deleteAsync(snapshotInfo, caller); + + SnapshotResult result = future.get(); + if (result.isFailed()) { + s_logger.debug("Failed to delete snapsoht: " + result.getResult()); + } + return result.isSuccess(); + } catch (Exception e) { + s_logger.debug("Failed to delete snapshot", e); + return false; + } + } + + protected Void deleteSnapshotCallback(AsyncCallbackDispatcher callback, + DeleteSnapshotContext context) { + CommandResult result = callback.getResult(); + AsyncCallFuture future = context.future; + SnapshotInfo snapshot = context.snapshot; + if (result.isFailed()) { + s_logger.debug("delete snapshot failed" + result.getResult()); + snapshot.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); + SnapshotResult res = new SnapshotResult(context.snapshot); + future.complete(res); + return null; + } + snapshot.processEvent(ObjectInDataStoreStateMachine.Event.OperationSuccessed); + SnapshotResult res = new SnapshotResult(context.snapshot); + future.complete(res); + return null; + } + + @Override + public boolean deleteSnapshot(SnapshotInfo snapInfo) { + Long snapshotId = snapInfo.getId(); + SnapshotObject snapshot = (SnapshotObject)snapInfo; + + if (!Snapshot.State.BackedUp.equals(snapshot.getState())) { + throw new InvalidParameterValueException("Can't delete snapshotshot " + snapshotId + " due to it is not in BackedUp Status"); + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Calling deleteSnapshot for snapshotId: " + snapshotId); + } + SnapshotVO lastSnapshot = null; + if (snapshot.getBackupSnapshotId() != null) { + List snaps = _snapshotDao.listByBackupUuid(snapshot.getVolumeId(), snapshot.getBackupSnapshotId()); + if (snaps != null && snaps.size() > 1) { + snapshot.setBackupSnapshotId(null); + SnapshotVO snapshotVO = this._snapshotDao.findById(snapshotId); + _snapshotDao.update(snapshot.getId(), snapshotVO); + } + } + + _snapshotDao.remove(snapshotId); + + long lastId = snapshotId; + boolean destroy = false; + while (true) { + lastSnapshot = _snapshotDao.findNextSnapshot(lastId); + if (lastSnapshot == null) { + // if all snapshots after this snapshot in this chain are removed, remove those snapshots. + destroy = true; + break; + } + if (lastSnapshot.getRemoved() == null) { + // if there is one child not removed, then can not remove back up snapshot. + break; + } + lastId = lastSnapshot.getId(); + } + if (destroy) { + lastSnapshot = _snapshotDao.findByIdIncludingRemoved(lastId); + while (lastSnapshot.getRemoved() != null) { + String BackupSnapshotId = lastSnapshot.getBackupSnapshotId(); + if (BackupSnapshotId != null) { + List snaps = _snapshotDao.listByBackupUuid(lastSnapshot.getVolumeId(), BackupSnapshotId); + if (snaps != null && snaps.size() > 1) { + lastSnapshot.setBackupSnapshotId(null); + _snapshotDao.update(lastSnapshot.getId(), lastSnapshot); + } else { + if (destroySnapshotBackUp(lastSnapshot)) { + + } else { + s_logger.debug("Destroying snapshot backup failed " + lastSnapshot); + break; + } + } + } + lastId = lastSnapshot.getPrevSnapshotId(); + if (lastId == 0) { + break; + } + lastSnapshot = _snapshotDao.findByIdIncludingRemoved(lastId); + } + } + return true; + + } + + @Override + public boolean revertSnapshot(SnapshotInfo snapshot) { + // TODO Auto-generated method stub + return false; + } + +} diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/HypervisorBasedSnapshot.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/HypervisorBasedSnapshot.java deleted file mode 100644 index 7f18200cd3d..00000000000 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/HypervisorBasedSnapshot.java +++ /dev/null @@ -1,44 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.storage.snapshot.strategy; - -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; -import org.apache.cloudstack.storage.snapshot.SnapshotStrategy; -import org.springframework.stereotype.Component; - -@Component -public class HypervisorBasedSnapshot implements SnapshotStrategy { - - @Override - public boolean takeSnapshot(SnapshotInfo snapshot) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean revertSnapshot(SnapshotInfo snapshot) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean deleteSnapshot(SnapshotInfo snapshot) { - // TODO Auto-generated method stub - return false; - } - -} diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/StorageBasedSnapshot.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/StorageBasedSnapshot.java deleted file mode 100644 index fa9c5aeaa08..00000000000 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/strategy/StorageBasedSnapshot.java +++ /dev/null @@ -1,42 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.storage.snapshot.strategy; - -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; -import org.apache.cloudstack.storage.snapshot.SnapshotStrategy; - -public class StorageBasedSnapshot implements SnapshotStrategy { - - @Override - public boolean takeSnapshot(SnapshotInfo snapshot) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean revertSnapshot(SnapshotInfo snapshot) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean deleteSnapshot(SnapshotInfo snapshot) { - // TODO Auto-generated method stub - return false; - } - -} diff --git a/engine/storage/snapshot/test/resource/SnapshotManagerTestContext.xml b/engine/storage/snapshot/test/resource/SnapshotManagerTestContext.xml new file mode 100644 index 00000000000..d99c2e2dbac --- /dev/null +++ b/engine/storage/snapshot/test/resource/SnapshotManagerTestContext.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/engine/storage/snapshot/test/src/SnapshotDataFactoryTest.java b/engine/storage/snapshot/test/src/SnapshotDataFactoryTest.java new file mode 100644 index 00000000000..e722ab55c70 --- /dev/null +++ b/engine/storage/snapshot/test/src/SnapshotDataFactoryTest.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package src; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import com.cloud.utils.component.ComponentContext; + +import junit.framework.TestCase; + +//@RunWith(SpringJUnit4ClassRunner.class) +//@ContextConfiguration(locations = "classpath:/SnapshotManagerTestContext.xml") +public class SnapshotDataFactoryTest extends TestCase { + //@Inject SnapshotDataFactory snapshotFactory; + + @Before + public void setup() throws Exception { + //ComponentContext.initComponentsLifeCycle(); + + } + + @Test + public void testGestSnapshot() { + //snapshotFactory.getSnapshot(snapshotId); + } + +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/HypervsiorHostEndPointRpcServer.java b/engine/storage/src/org/apache/cloudstack/storage/HypervsiorHostEndPointRpcServer.java index b709991ee57..f441f39ddfa 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/HypervsiorHostEndPointRpcServer.java +++ b/engine/storage/src/org/apache/cloudstack/storage/HypervsiorHostEndPointRpcServer.java @@ -40,24 +40,24 @@ public class HypervsiorHostEndPointRpcServer implements HostEndpointRpcServer { private static final Logger s_logger = Logger.getLogger(HypervsiorHostEndPointRpcServer.class); @Inject - private RpcProvider _rpcProvider; + private RpcProvider rpcProvider; public HypervsiorHostEndPointRpcServer() { } public HypervsiorHostEndPointRpcServer(RpcProvider rpcProvider) { - _rpcProvider = rpcProvider; - _rpcProvider.registerRpcServiceEndpoint(RpcServiceDispatcher.getDispatcher(this)); + rpcProvider = rpcProvider; + rpcProvider.registerRpcServiceEndpoint(RpcServiceDispatcher.getDispatcher(this)); } @PostConstruct public void Initialize() { - _rpcProvider.registerRpcServiceEndpoint(RpcServiceDispatcher.getDispatcher(this)); + rpcProvider.registerRpcServiceEndpoint(RpcServiceDispatcher.getDispatcher(this)); } @Override public void sendCommandAsync(HypervisorHostEndPoint host, final Command command, final AsyncCompletionCallback callback) { - _rpcProvider.newCall(host.getHostAddr()).addCallbackListener(new RpcCallbackListener() { + rpcProvider.newCall(host.getHostAddr()).addCallbackListener(new RpcCallbackListener() { @Override public void onSuccess(Answer result) { callback.complete(result); diff --git a/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java similarity index 50% rename from server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java rename to engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java index 61b5e1f7752..6334ca7f2dc 100755 --- a/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java @@ -14,48 +14,38 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package com.cloud.storage.allocator; +package org.apache.cloudstack.storage.allocator; import java.math.BigDecimal; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; -import java.util.Set; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.log4j.Logger; -import com.cloud.capacity.CapacityManager; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; -import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner.ExcludeList; -import com.cloud.host.Host; -import com.cloud.server.StatsCollector; +import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.StoragePoolVO; -import com.cloud.storage.VMTemplateStoragePoolVO; -import com.cloud.storage.VMTemplateStorageResourceAssoc; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; -import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; import com.cloud.storage.Volume.Type; -import com.cloud.storage.dao.StoragePoolDao; -import com.cloud.storage.dao.StoragePoolHostDao; -import com.cloud.storage.dao.VMTemplateDao; -import com.cloud.storage.dao.VMTemplateHostDao; -import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.VolumeDao; -import com.cloud.storage.swift.SwiftManager; -import com.cloud.template.TemplateManager; +import com.cloud.user.Account; import com.cloud.utils.NumbersUtil; import com.cloud.utils.component.AdapterBase; import com.cloud.vm.DiskProfile; @@ -64,22 +54,19 @@ import com.cloud.vm.VirtualMachineProfile; public abstract class AbstractStoragePoolAllocator extends AdapterBase implements StoragePoolAllocator { private static final Logger s_logger = Logger.getLogger(AbstractStoragePoolAllocator.class); - @Inject TemplateManager _tmpltMgr; - @Inject StorageManager _storageMgr; - @Inject StoragePoolDao _storagePoolDao; - @Inject VMTemplateHostDao _templateHostDao; - @Inject VMTemplatePoolDao _templatePoolDao; - @Inject VMTemplateDao _templateDao; + @Inject StorageManager storageMgr; + protected @Inject PrimaryDataStoreDao _storagePoolDao; @Inject VolumeDao _volumeDao; - @Inject StoragePoolHostDao _poolHostDao; @Inject ConfigurationDao _configDao; @Inject ClusterDao _clusterDao; - @Inject SwiftManager _swiftMgr; - @Inject CapacityManager _capacityMgr; + protected @Inject DataStoreManager dataStoreMgr; protected BigDecimal _storageOverprovisioningFactor = new BigDecimal(1); long _extraBytesPerVolume = 0; Random _rand; boolean _dontMatter; + protected String _allocationAlgorithm = "random"; + @Inject + DiskOfferingDao _diskOfferingDao; @Override public boolean configure(String name, Map params) throws ConfigurationException { @@ -93,53 +80,86 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement _extraBytesPerVolume = 0; _rand = new Random(System.currentTimeMillis()); - + _dontMatter = Boolean.parseBoolean(configs.get("storage.overwrite.provisioning")); - + + String allocationAlgorithm = configs.get("vm.allocation.algorithm"); + if (allocationAlgorithm != null) { + _allocationAlgorithm = allocationAlgorithm; + } + return true; } + + protected abstract List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo); - abstract boolean allocatorIsCorrectType(DiskProfile dskCh); + @Override + public + List allocateToPool(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { + List pools = select(dskCh, vmProfile, plan, avoid, returnUpTo); + return reOrder(pools, vmProfile, plan); + } - protected boolean templateAvailable(long templateId, long poolId) { - VMTemplateStorageResourceAssoc thvo = _templatePoolDao.findByPoolTemplate(poolId, templateId); - if (thvo != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Template id : " + templateId + " status : " + thvo.getDownloadState().toString()); - } - return (thvo.getDownloadState()==Status.DOWNLOADED); - } else { - return false; + protected List reorderPoolsByNumberOfVolumes(DeploymentPlan plan, List pools, Account account) { + if(account == null){ + return pools; + } + long dcId = plan.getDataCenterId(); + Long podId = plan.getPodId(); + Long clusterId = plan.getClusterId(); + + List poolIdsByVolCount = _volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("List of pools in ascending order of number of volumes for account id: "+ account.getAccountId() + " is: "+ poolIdsByVolCount); + } + + //now filter the given list of Pools by this ordered list + Map poolMap = new HashMap(); + for (StoragePool pool : pools) { + poolMap.put(pool.getId(), pool); + } + List matchingPoolIds = new ArrayList(poolMap.keySet()); + + poolIdsByVolCount.retainAll(matchingPoolIds); + + List reorderedPools = new ArrayList(); + for(Long id: poolIdsByVolCount){ + reorderedPools.add(poolMap.get(id)); + } + + return reorderedPools; + } + + protected List reOrder(List pools, + VirtualMachineProfile vmProfile, + DeploymentPlan plan) { + Account account = null; + if(vmProfile.getVirtualMachine() != null){ + account = vmProfile.getOwner(); } + + if(_allocationAlgorithm.equals("random") || _allocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) { + // Shuffle this so that we don't check the pools in the same order. + Collections.shuffle(pools); + }else if(_allocationAlgorithm.equals("userdispersing")){ + pools = reorderPoolsByNumberOfVolumes(plan, pools, account); + } + return pools; } - protected boolean localStorageAllocationNeeded(DiskProfile dskCh) { - return dskCh.useLocalStorage(); - } - - protected boolean poolIsCorrectType(DiskProfile dskCh, StoragePool pool) { - boolean localStorageAllocationNeeded = localStorageAllocationNeeded(dskCh); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Is localStorageAllocationNeeded? "+ localStorageAllocationNeeded); - s_logger.debug("Is storage pool shared? "+ pool.getPoolType().isShared()); - } - - return ((!localStorageAllocationNeeded && pool.getPoolType().isShared()) || (localStorageAllocationNeeded && !pool.getPoolType().isShared())); - } - - protected boolean checkPool(ExcludeList avoid, StoragePoolVO pool, DiskProfile dskCh, VMTemplateVO template, List templatesInPool, - StatsCollector sc, DeploymentPlan plan) { + protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, + DeploymentPlan plan) { if (s_logger.isDebugEnabled()) { s_logger.debug("Checking if storage pool is suitable, name: " + pool.getName()+ " ,poolId: "+ pool.getId()); } - if (avoid.shouldAvoid(pool)) { if (s_logger.isDebugEnabled()) { s_logger.debug("StoragePool is in avoid set, skipping this pool"); } return false; } + if(dskCh.getType().equals(Type.ROOT) && pool.getPoolType().equals(StoragePoolType.Iscsi)){ if (s_logger.isDebugEnabled()) { s_logger.debug("Disk needed for ROOT volume, but StoragePoolType is Iscsi, skipping this and trying other available pools"); @@ -147,26 +167,13 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement return false; } - //by default, all pools are up when successfully added - //don't return the pool if not up (if in maintenance/prepareformaintenance/errorinmaintenance) - if(!pool.getStatus().equals(StoragePoolStatus.Up)){ - if (s_logger.isDebugEnabled()) { - s_logger.debug("StoragePool status is not UP, status is: "+pool.getStatus().name()+", skipping this pool"); - } - return false; + DiskOfferingVO diskOffering = _diskOfferingDao.findById(dskCh.getDiskOfferingId()); + if (diskOffering.getSystemUse() && pool.getPoolType() == StoragePoolType.RBD) { + s_logger.debug("Skipping RBD pool " + pool.getName() + " as a suitable pool. RBD is not supported for System VM's"); + return false; } + - // Check that the pool type is correct - if (!poolIsCorrectType(dskCh, pool)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("StoragePool is not of correct type, skipping this pool"); - } - return false; - } - - /*hypervisor type is correct*/ - // TODO : when creating a standalone volume, offering is passed as NULL, need to - // refine the logic of checking hypervisorType based on offering info Long clusterId = pool.getClusterId(); ClusterVO cluster = _clusterDao.findById(clusterId); if (!(cluster.getHypervisorType() == dskCh.getHypersorType())) { @@ -176,32 +183,10 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement return false; } - // check capacity Volume volume = _volumeDao.findById(dskCh.getVolumeId()); List requestVolumes = new ArrayList(); requestVolumes.add(volume); - return _storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool); + return storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool); } - - - - @Override - public String chooseStorageIp(VirtualMachine vm, Host host, Host storage) { - return storage.getStorageIpAddress(); - } - - - @Override - public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile vmProfile, long dcId, long podId, Long clusterId, Long hostId, Set avoids, int returnUpTo) { - - ExcludeList avoid = new ExcludeList(); - for(StoragePool pool : avoids){ - avoid.addPool(pool.getId()); - } - - DataCenterDeployment plan = new DataCenterDeployment(dcId, podId, clusterId, hostId, null, null); - return allocateToPool(dskCh, vmProfile, plan, avoid, returnUpTo); - } - } diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java new file mode 100644 index 00000000000..747e2586fed --- /dev/null +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java @@ -0,0 +1,105 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.allocator; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.deploy.DeploymentPlan; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; +import com.cloud.offering.ServiceOffering; +import com.cloud.storage.StoragePool; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.vm.DiskProfile; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; + +@Component +@Local(value=StoragePoolAllocator.class) +public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocator { + private static final Logger s_logger = Logger.getLogger(ClusterScopeStoragePoolAllocator.class); + protected String _allocationAlgorithm = "random"; + + @Inject + DiskOfferingDao _diskOfferingDao; + + @Override + protected List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { + + List suitablePools = new ArrayList(); + + long dcId = plan.getDataCenterId(); + Long podId = plan.getPodId(); + Long clusterId = plan.getClusterId(); + + if(dskCh.getTags() != null && dskCh.getTags().length != 0){ + s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + " having tags:" + Arrays.toString(dskCh.getTags())); + }else{ + s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); + } + + List pools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags()); + if (pools.size() == 0) { + if (s_logger.isDebugEnabled()) { + String storageType = dskCh.useLocalStorage() ? ServiceOffering.StorageType.local.toString() : ServiceOffering.StorageType.shared.toString(); + s_logger.debug("No storage pools available for " + storageType + " volume allocation, returning"); + } + return suitablePools; + } + + for (StoragePoolVO pool: pools) { + if(suitablePools.size() == returnUpTo){ + break; + } + StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); + if (filter(avoid, pol, dskCh, plan)) { + suitablePools.add(pol); + } + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("FirstFitStoragePoolAllocator returning "+suitablePools.size() +" suitable storage pools"); + } + + return suitablePools; + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + super.configure(name, params); + + if (_configDao != null) { + Map configs = _configDao.getConfiguration(params); + String allocationAlgorithm = configs.get("vm.allocation.algorithm"); + if (allocationAlgorithm != null) { + _allocationAlgorithm = allocationAlgorithm; + } + } + return true; + } +} diff --git a/server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java similarity index 82% rename from server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java rename to engine/storage/src/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java index 4eeae280d8b..91bc25c715d 100644 --- a/server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java @@ -14,7 +14,7 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package com.cloud.storage.allocator; +package org.apache.cloudstack.storage.allocator; import java.util.List; import java.util.Map; @@ -23,8 +23,8 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.deploy.DeploymentPlan; @@ -36,32 +36,18 @@ import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; -@Component @Local(value=StoragePoolAllocator.class) public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAllocator { private static final Logger s_logger = Logger.getLogger(GarbageCollectingStoragePoolAllocator.class); StoragePoolAllocator _firstFitStoragePoolAllocator; StoragePoolAllocator _localStoragePoolAllocator; - @Inject StorageManager _storageMgr; + @Inject StorageManager storageMgr; @Inject ConfigurationDao _configDao; boolean _storagePoolCleanupEnabled; @Override - public boolean allocatorIsCorrectType(DiskProfile dskCh) { - return true; - } - - public Integer getStorageOverprovisioningFactor() { - return null; - } - - public Long getExtraBytesPerVolume() { - return null; - } - - @Override - public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { + public List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { if (!_storagePoolCleanupEnabled) { s_logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped."); @@ -69,10 +55,10 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl } // Clean up all storage pools - _storageMgr.cleanupStorage(false); + storageMgr.cleanupStorage(false); // Determine what allocator to use StoragePoolAllocator allocator; - if (localStorageAllocationNeeded(dskCh)) { + if (dskCh.useLocalStorage()) { allocator = _localStoragePoolAllocator; } else { allocator = _firstFitStoragePoolAllocator; @@ -88,7 +74,7 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); - _firstFitStoragePoolAllocator = ComponentContext.inject(FirstFitStoragePoolAllocator.class); + _firstFitStoragePoolAllocator = ComponentContext.inject(ClusterScopeStoragePoolAllocator.class); _firstFitStoragePoolAllocator.configure("GCFirstFitStoragePoolAllocator", params); _localStoragePoolAllocator = ComponentContext.inject(LocalStoragePoolAllocator.class); _localStoragePoolAllocator.configure("GCLocalStoragePoolAllocator", params); diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java new file mode 100644 index 00000000000..a8d5173cebe --- /dev/null +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.allocator; + +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.deploy.DeploymentPlan; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.Volume; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.NumbersUtil; +import com.cloud.vm.DiskProfile; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; + +@Component +@Local(value = StoragePoolAllocator.class) +public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { + private static final Logger s_logger = Logger.getLogger(LocalStoragePoolAllocator.class); + + @Inject + StoragePoolHostDao _poolHostDao; + @Inject + VMInstanceDao _vmInstanceDao; + @Inject + UserVmDao _vmDao; + @Inject + ServiceOfferingDao _offeringDao; + @Inject + CapacityDao _capacityDao; + @Inject + ConfigurationDao _configDao; + + @Override + protected List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { + + List suitablePools = new ArrayList(); + + + if (s_logger.isDebugEnabled()) { + s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm"); + } + + // data disk and host identified from deploying vm (attach volume case) + if (dskCh.getType() == Volume.Type.DATADISK && plan.getHostId() != null) { + List hostPools = _poolHostDao.listByHostId(plan.getHostId()); + for (StoragePoolHostVO hostPool: hostPools) { + StoragePoolVO pool = _storagePoolDao.findById(hostPool.getPoolId()); + if (pool != null && pool.isLocal()) { + StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); + if (filter(avoid, pol, dskCh, plan)) { + s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list"); + suitablePools.add(pol); + } + } + + if (suitablePools.size() == returnUpTo) { + break; + } + } + } else { + List availablePools = _storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), dskCh.getTags()); + for (StoragePoolVO pool : availablePools) { + if (suitablePools.size() == returnUpTo) { + break; + } + StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); + if (filter(avoid, pol, dskCh, plan)) { + suitablePools.add(pol); + } + } + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("LocalStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools"); + } + + return suitablePools; + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + super.configure(name, params); + + _storageOverprovisioningFactor = new BigDecimal(1); + _extraBytesPerVolume = NumbersUtil.parseLong((String) params.get("extra.bytes.per.volume"), 50 * 1024L * 1024L); + + return true; + } + + public LocalStoragePoolAllocator() { + } +} diff --git a/server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/UseLocalForRootAllocator.java similarity index 74% rename from server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java rename to engine/storage/src/org/apache/cloudstack/storage/allocator/UseLocalForRootAllocator.java index 2c19406fef6..4663b12e97e 100644 --- a/server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/UseLocalForRootAllocator.java @@ -14,7 +14,7 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package com.cloud.storage.allocator; +package org.apache.cloudstack.storage.allocator; import java.util.List; import java.util.Map; @@ -23,23 +23,17 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.springframework.stereotype.Component; +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; -import com.cloud.configuration.Config; -import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner.ExcludeList; -import com.cloud.host.Host; import com.cloud.storage.StoragePool; -import com.cloud.storage.Volume.Type; - import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; -@Component @Local(value=StoragePoolAllocator.class) public class UseLocalForRootAllocator extends LocalStoragePoolAllocator implements StoragePoolAllocator { @@ -55,29 +49,13 @@ public class UseLocalForRootAllocator extends LocalStoragePoolAllocator implemen return super.allocateToPool(dskCh, vmProfile, plan, avoid, returnUpTo); } - - @Override - public String chooseStorageIp(VirtualMachine vm, Host host, Host storage) { - return null; - } @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); return true; } - - @Override - protected boolean localStorageAllocationNeeded(DiskProfile dskCh) { - if (dskCh.getType() == Type.ROOT) { - return true; - } else if (dskCh.getType() == Type.DATADISK) { - return false; - } else { - return super.localStorageAllocationNeeded(dskCh); - } - } - + protected UseLocalForRootAllocator() { } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java new file mode 100644 index 00000000000..c45f8a822a9 --- /dev/null +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.allocator; + +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.deploy.DeploymentPlan; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.StoragePool; +import com.cloud.storage.Volume; +import com.cloud.vm.DiskProfile; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; + +@Component +public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { + private static final Logger s_logger = Logger.getLogger(ZoneWideStoragePoolAllocator.class); + @Inject PrimaryDataStoreDao _storagePoolDao; + @Inject DataStoreManager dataStoreMgr; + + @Override + protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, + DeploymentPlan plan) { + Volume volume = _volumeDao.findById(dskCh.getVolumeId()); + List requestVolumes = new ArrayList(); + requestVolumes.add(volume); + return storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool); + } + + @Override + protected List select(DiskProfile dskCh, + VirtualMachineProfile vmProfile, + DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { + List suitablePools = new ArrayList(); + HypervisorType hypervisor = vmProfile.getHypervisorType(); + if (hypervisor != null) { + if (hypervisor != HypervisorType.KVM) { + s_logger.debug("Only kvm supports zone wide storage"); + return suitablePools; + } + } + + List storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags()); + + for (StoragePoolVO storage : storagePools) { + if (suitablePools.size() == returnUpTo) { + break; + } + StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId()); + if (filter(avoid, pol, dskCh, plan)) { + suitablePools.add(pol); + } + } + return suitablePools; + } +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java index 657d32c7877..218f9013a17 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataObjectManagerImpl.java @@ -24,14 +24,14 @@ import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncRpcConext; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; import org.apache.cloudstack.storage.motion.DataMotionService; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -52,7 +52,7 @@ public class DataObjectManagerImpl implements DataObjectManager { protected DataObject waitingForCreated(DataObject dataObj, DataStore dataStore) { long retries = this.waitingRetries; - ObjectInDataStoreVO obj = null; + DataObjectInStore obj = null; do { try { Thread.sleep(waitingTime); @@ -61,8 +61,8 @@ public class DataObjectManagerImpl implements DataObjectManager { throw new CloudRuntimeException("sleep interrupted", e); } - obj = objectInDataStoreMgr.findObject(dataObj.getId(), - dataObj.getType(), dataStore.getId(), dataStore.getRole()); + obj = objectInDataStoreMgr.findObject(dataObj, + dataStore); if (obj == null) { s_logger.debug("can't find object in db, maybe it's cleaned up already, exit waiting"); break; @@ -92,11 +92,10 @@ public class DataObjectManagerImpl implements DataObjectManager { } @Override - public void createAsync(DataObject data, DataStore store, + public void createAsync(DataObject data, DataStore store, AsyncCompletionCallback callback, boolean noCopy) { - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - data.getId(), data.getType(), store.getId(), - store.getRole()); + DataObjectInStore obj = objectInDataStoreMgr.findObject( + data, store); DataObject objInStore = null; boolean freshNewTemplate = false; if (obj == null) { @@ -105,8 +104,8 @@ public class DataObjectManagerImpl implements DataObjectManager { data, store); freshNewTemplate = true; } catch (Throwable e) { - obj = objectInDataStoreMgr.findObject(data.getId(), - data.getType(), store.getId(), store.getRole()); + obj = objectInDataStoreMgr.findObject(data, + store); if (obj == null) { CreateCmdResult result = new CreateCmdResult( null, null); @@ -184,20 +183,12 @@ public class DataObjectManagerImpl implements DataObjectManager { return null; } - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - objInStrore.getId(), objInStrore - .getType(), objInStrore.getDataStore() - .getId(), objInStrore.getDataStore() - .getRole()); - - obj.setInstallPath(result.getPath()); - obj.setSize(result.getSize()); try { - objectInDataStoreMgr.update(obj, + objectInDataStoreMgr.update(objInStrore, ObjectInDataStoreStateMachine.Event.OperationSuccessed); } catch (NoTransitionException e) { try { - objectInDataStoreMgr.update(obj, + objectInDataStoreMgr.update(objInStrore, ObjectInDataStoreStateMachine.Event.OperationFailed); } catch (NoTransitionException e1) { s_logger.debug("failed to change state", e1); @@ -259,14 +250,10 @@ public class DataObjectManagerImpl implements DataObjectManager { CopyContext context) { CopyCommandResult result = callback.getResult(); DataObject destObj = context.destObj; - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - destObj.getId(), destObj - .getType(), destObj.getDataStore() - .getId(), destObj.getDataStore() - .getRole()); + if (result.isFailed()) { try { - objectInDataStoreMgr.update(obj, Event.OperationFailed); + objectInDataStoreMgr.update(destObj, Event.OperationFailed); } catch (NoTransitionException e) { s_logger.debug("Failed to update copying state", e); } @@ -276,10 +263,8 @@ public class DataObjectManagerImpl implements DataObjectManager { context.getParentCallback().complete(res); } - obj.setInstallPath(result.getPath()); - try { - objectInDataStoreMgr.update(obj, + objectInDataStoreMgr.update(destObj, ObjectInDataStoreStateMachine.Event.OperationSuccessed); } catch (NoTransitionException e) { s_logger.debug("Failed to update copying state: ", e); @@ -311,11 +296,8 @@ public class DataObjectManagerImpl implements DataObjectManager { @Override public void deleteAsync(DataObject data, AsyncCompletionCallback callback) { - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - data.getId(), data.getType(), data.getDataStore().getId(), - data.getDataStore().getRole()); try { - objectInDataStoreMgr.update(obj, Event.DestroyRequested); + objectInDataStoreMgr.update(data, Event.DestroyRequested); } catch (NoTransitionException e) { s_logger.debug("destroy failed", e); CreateCmdResult res = new CreateCmdResult( @@ -338,23 +320,18 @@ public class DataObjectManagerImpl implements DataObjectManager { protected Void deleteAsynCallback(AsyncCallbackDispatcher callback, DeleteContext context) { DataObject destObj = context.obj; - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - destObj.getId(), destObj - .getType(), destObj.getDataStore() - .getId(), destObj.getDataStore() - .getRole()); - + CommandResult res = callback.getResult(); if (res.isFailed()) { try { - objectInDataStoreMgr.update(obj, Event.OperationFailed); + objectInDataStoreMgr.update(destObj, Event.OperationFailed); } catch (NoTransitionException e) { s_logger.debug("delete failed", e); } } else { try { - objectInDataStoreMgr.update(obj, Event.OperationSuccessed); + objectInDataStoreMgr.update(destObj, Event.OperationSuccessed); } catch (NoTransitionException e) { s_logger.debug("delete failed", e); } @@ -366,9 +343,8 @@ public class DataObjectManagerImpl implements DataObjectManager { @Override public DataObject createInternalStateOnly(DataObject data, DataStore store) { - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - data.getId(), data.getType(), store.getId(), - store.getRole()); + DataObjectInStore obj = objectInDataStoreMgr.findObject( + data, store); DataObject objInStore = null; if (obj == null) { objInStore = objectInDataStoreMgr.create( @@ -391,12 +367,6 @@ public class DataObjectManagerImpl implements DataObjectManager { @Override public void update(DataObject data, String path, Long size) { - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - data.getId(), data.getType(), data.getDataStore().getId(), - data.getDataStore().getRole()); - - obj.setInstallPath(path); - obj.setSize(size); - objectInDataStoreMgr.update(obj); + throw new CloudRuntimeException("not implemented"); } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java index f857ac5db1a..a2fd08d1e8f 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java @@ -18,12 +18,15 @@ */ package org.apache.cloudstack.storage.datastore; +import java.util.List; import java.util.Map; import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.storage.image.datastore.ImageDataStoreManager; import org.springframework.stereotype.Component; @@ -50,5 +53,22 @@ public class DataStoreManagerImpl implements DataStoreManager { String providerUuid) { return null; } + @Override + public DataStore getDataStore(String uuid, DataStoreRole role) { + if (role == DataStoreRole.Primary) { + return primaryStorMgr.getPrimaryDataStore(uuid); + } else if (role == DataStoreRole.Image) { + return imageDataStoreMgr.getImageDataStore(uuid); + } + throw new CloudRuntimeException("un recognized type" + role); + } + @Override + public List getImageStores(Scope scope) { + return imageDataStoreMgr.getList(); + } + @Override + public DataStore getPrimaryDataStore(long storeId) { + return primaryStorMgr.getPrimaryDataStore(storeId); + } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManager.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManager.java index e707de6b8bd..d170f5c707a 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManager.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManager.java @@ -17,26 +17,20 @@ package org.apache.cloudstack.storage.datastore; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import com.cloud.utils.fsm.NoTransitionException; public interface ObjectInDataStoreManager { public DataObject create(DataObject template, DataStore dataStore); - public VolumeInfo create(VolumeInfo volume, DataStore dataStore); - public SnapshotInfo create(SnapshotInfo snapshot, DataStore dataStore); - public ObjectInDataStoreVO findObject(long objectId, DataObjectType type, - long dataStoreId, DataStoreRole role); public DataObject get(DataObject dataObj, DataStore store); public boolean update(DataObject vo, Event event) throws NoTransitionException; - boolean update(ObjectInDataStoreVO obj, Event event) - throws NoTransitionException; - - boolean update(ObjectInDataStoreVO obj); + DataObjectInStore findObject(String uuid, DataObjectType type, + String dataStoreUuid, DataStoreRole role); + DataObjectInStore findObject(DataObject obj, DataStore store); + DataStore findStore(String objUuid, DataObjectType type, DataStoreRole role); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java index 7eb4932348f..87ba1d216c5 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java @@ -19,19 +19,25 @@ package org.apache.cloudstack.storage.datastore; import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.storage.db.ObjectInDataStoreDao; import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.image.ImageDataFactory; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.State; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.dao.VMTemplateHostDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VolumeHostDao; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.SearchCriteria2; import com.cloud.utils.db.SearchCriteriaService; @@ -41,16 +47,28 @@ import com.cloud.utils.fsm.StateMachine2; @Component public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager { + private static final Logger s_logger = Logger + .getLogger(ObjectInDataStoreManagerImpl.class); @Inject ImageDataFactory imageFactory; @Inject + DataStoreManager storeMgr; + @Inject VolumeDataFactory volumeFactory; @Inject ObjectInDataStoreDao objectDataStoreDao; - protected StateMachine2 stateMachines; + @Inject + VolumeHostDao volumeHostDao; + @Inject + VMTemplateHostDao templateHostDao; + @Inject + VMTemplatePoolDao templatePoolDao; + @Inject + SnapshotDataFactory snapshotFactory; + protected StateMachine2 stateMachines; public ObjectInDataStoreManagerImpl() { - stateMachines = new StateMachine2(); + stateMachines = new StateMachine2(); stateMachines.addTransition(State.Allocated, Event.CreateRequested, State.Creating); stateMachines.addTransition(State.Creating, Event.OperationSuccessed, @@ -76,101 +94,122 @@ public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager { stateMachines.addTransition(State.Allocated, Event.CreateOnlyRequested, State.Creating2); stateMachines.addTransition(State.Creating2, Event.OperationFailed, - State.Failed); + State.Allocated); stateMachines.addTransition(State.Creating2, Event.OperationSuccessed, State.Ready); } @Override public DataObject create(DataObject obj, DataStore dataStore) { - - ObjectInDataStoreVO vo = new ObjectInDataStoreVO(); - vo.setDataStoreId(dataStore.getId()); - vo.setDataStoreRole(dataStore.getRole()); - vo.setObjectId(obj.getId()); - vo.setSize(obj.getSize()); - - vo.setObjectType(obj.getType()); - vo = objectDataStoreDao.persist(vo); + if (obj.getType() == DataObjectType.TEMPLATE && dataStore.getRole() == DataStoreRole.Primary) { + VMTemplateStoragePoolVO vo = new VMTemplateStoragePoolVO(dataStore.getId(), obj.getId()); + vo = templatePoolDao.persist(vo); + } else { + ObjectInDataStoreVO vo = new ObjectInDataStoreVO(); + vo.setDataStoreRole(dataStore.getRole()); + vo.setDataStoreUuid(dataStore.getUuid()); + vo.setObjectType(obj.getType()); + vo.setObjectUuid(obj.getUuid()); + vo = objectDataStoreDao.persist(vo); + } if (obj.getType() == DataObjectType.TEMPLATE) { - return imageFactory.getTemplate(obj.getId(), dataStore); + return imageFactory.getTemplate(obj, dataStore); } else if (obj.getType() == DataObjectType.VOLUME) { - return volumeFactory.getVolume(obj.getId(), dataStore); + return volumeFactory.getVolume(obj, dataStore); + } else if (obj.getType() == DataObjectType.SNAPSHOT) { + return snapshotFactory.getSnapshot(obj, dataStore); } throw new CloudRuntimeException("unknown type"); } - - @Override - public VolumeInfo create(VolumeInfo volume, DataStore dataStore) { - ObjectInDataStoreVO vo = new ObjectInDataStoreVO(); - vo.setDataStoreId(dataStore.getId()); - vo.setDataStoreRole(dataStore.getRole()); - vo.setObjectId(volume.getId()); - vo.setObjectType(volume.getType()); - vo = objectDataStoreDao.persist(vo); - - return volumeFactory.getVolume(volume.getId(), dataStore); - } - - @Override - public SnapshotInfo create(SnapshotInfo snapshot, DataStore dataStore) { - // TODO Auto-generated method stub - return null; - } - - @Override - public ObjectInDataStoreVO findObject(long objectId, DataObjectType type, - long dataStoreId, DataStoreRole role) { - SearchCriteriaService sc = SearchCriteria2 - .create(ObjectInDataStoreVO.class); - sc.addAnd(sc.getEntity().getObjectId(), Op.EQ, objectId); - sc.addAnd(sc.getEntity().getDataStoreId(), Op.EQ, dataStoreId); - sc.addAnd(sc.getEntity().getObjectType(), Op.EQ, type); - sc.addAnd(sc.getEntity().getDataStoreRole(), Op.EQ, role); - sc.addAnd(sc.getEntity().getState(), Op.NIN, - ObjectInDataStoreStateMachine.State.Destroyed, - ObjectInDataStoreStateMachine.State.Failed); - ObjectInDataStoreVO objectStoreVO = sc.find(); - return objectStoreVO; - - } - + @Override public boolean update(DataObject data, Event event) throws NoTransitionException { - ObjectInDataStoreVO obj = this.findObject(data.getId(), data.getType(), - data.getDataStore().getId(), data.getDataStore().getRole()); + DataObjectInStore obj = this.findObject(data, data.getDataStore()); if (obj == null) { throw new CloudRuntimeException( "can't find mapping in ObjectInDataStore table for: " + data); } - return this.stateMachines.transitTo(obj, event, null, - objectDataStoreDao); - - } - - @Override - public boolean update(ObjectInDataStoreVO obj, Event event) - throws NoTransitionException { - return this.stateMachines.transitTo(obj, event, null, - objectDataStoreDao); - + + if (data.getType() == DataObjectType.TEMPLATE && data.getDataStore().getRole() == DataStoreRole.Primary) { + try { + this.stateMachines.transitTo(obj, event, null, + templatePoolDao); + } catch (NoTransitionException e) { + if (event == Event.CreateOnlyRequested || event == Event.OperationSuccessed) { + s_logger.debug("allow muliple create requests"); + } else { + throw e; + } + } + } else { + this.stateMachines.transitTo(obj, event, null, objectDataStoreDao); + } + return true; } @Override public DataObject get(DataObject dataObj, DataStore store) { if (dataObj.getType() == DataObjectType.TEMPLATE) { - return imageFactory.getTemplate(dataObj.getId(), store); + return imageFactory.getTemplate(dataObj, store); } else if (dataObj.getType() == DataObjectType.VOLUME) { - return volumeFactory.getVolume(dataObj.getId(), store); + return volumeFactory.getVolume(dataObj, store); } throw new CloudRuntimeException("unknown type"); } @Override - public boolean update(ObjectInDataStoreVO obj) { - return objectDataStoreDao.update(obj.getId(), obj); + public DataObjectInStore findObject(DataObject obj, DataStore store) { + DataObjectInStore vo = null; + SearchCriteriaService sc = SearchCriteria2.create(ObjectInDataStoreVO.class); + + if (store.getRole() == DataStoreRole.Image) { + sc.addAnd(sc.getEntity().getDataStoreUuid(), Op.EQ, store.getUuid()); + sc.addAnd(sc.getEntity().getDataStoreRole(), Op.EQ, store.getRole()); + sc.addAnd(sc.getEntity().getObjectUuid(), Op.EQ, obj.getUuid()); + sc.addAnd(sc.getEntity().getObjectType(), Op.EQ, obj.getType()); + vo = sc.find(); + } else if (obj.getType() == DataObjectType.TEMPLATE && store.getRole() == DataStoreRole.Primary) { + vo = templatePoolDao.findByPoolTemplate(store.getId(), obj.getId()); + } else { + s_logger.debug("unknown type: " + obj.getType() + " " + store.getRole()); + throw new CloudRuntimeException("unknown type"); + } + return vo; } + + @Override + public DataObjectInStore findObject(String uuid, DataObjectType type, + String dataStoreUuid, DataStoreRole role) { + DataObjectInStore vo = null; + SearchCriteriaService sc = SearchCriteria2.create(ObjectInDataStoreVO.class); + + if (role == DataStoreRole.Image) { + sc.addAnd(sc.getEntity().getDataStoreUuid(), Op.EQ, dataStoreUuid); + sc.addAnd(sc.getEntity().getDataStoreRole(), Op.EQ, role); + sc.addAnd(sc.getEntity().getObjectUuid(), Op.EQ, uuid); + sc.addAnd(sc.getEntity().getObjectType(), Op.EQ, type); + vo = sc.find(); + } + return vo; + } + + @Override + public DataStore findStore(String objUuid, DataObjectType type, DataStoreRole role) { + DataStore store = null; + if (role == DataStoreRole.Image) { + SearchCriteriaService sc = SearchCriteria2.create(ObjectInDataStoreVO.class); + sc.addAnd(sc.getEntity().getDataStoreRole(), Op.EQ, role); + sc.addAnd(sc.getEntity().getObjectUuid(), Op.EQ, objUuid); + sc.addAnd(sc.getEntity().getObjectType(), Op.EQ, type); + ObjectInDataStoreVO vo = sc.find(); + if (vo != null) { + store = this.storeMgr.getDataStore(vo.getDataStoreUuid(), vo.getDataStoreRole()); + } + } + return store; + } + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStore.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStore.java index a6ba9bc1f60..fdaaace49d7 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStore.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStore.java @@ -20,31 +20,19 @@ package org.apache.cloudstack.storage.datastore; import java.util.List; -import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.storage.image.TemplateInfo; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; -import org.apache.cloudstack.storage.volume.TemplateOnPrimaryDataStoreInfo; public interface PrimaryDataStore extends DataStore, PrimaryDataStoreInfo { VolumeInfo getVolume(long id); List getVolumes(); -/* void deleteVolumeAsync(VolumeInfo volume, AsyncCompletionCallback callback); - - void createVolumeAsync(VolumeInfo vo, VolumeDiskType diskType, AsyncCompletionCallback callback); - - void createVoluemFromBaseImageAsync(VolumeInfo volume, TemplateInfo templateStore, AsyncCompletionCallback callback); - */ - boolean exists(DataObject data); TemplateInfo getTemplate(long templateId); @@ -53,13 +41,4 @@ public interface PrimaryDataStore extends DataStore, PrimaryDataStoreInfo { DiskFormat getDefaultDiskType(); - -/* void takeSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback); - - void revertSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback); - - void deleteSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback);*/ } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java index 0ac57f445aa..e70f803ee81 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreEntityImpl.java @@ -26,8 +26,8 @@ import java.util.Map; import org.apache.cloudstack.engine.datacenter.entity.api.StorageEntity; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; -import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StoragePoolStatus; public class PrimaryDataStoreEntityImpl implements StorageEntity { private PrimaryDataStoreInfo dataStore; @@ -132,7 +132,8 @@ public class PrimaryDataStoreEntityImpl implements StorageEntity { @Override public State getState() { - return this.dataStore.getManagedState(); + //return this.dataStore.getManagedState(); + return null; } @Override @@ -229,13 +230,7 @@ public class PrimaryDataStoreEntityImpl implements StorageEntity { return null; } - @Override - public String getStorageProvider() { - // TODO Auto-generated method stub - return null; - } - @Override public String getStorageType() { // TODO Auto-generated method stub return null; @@ -247,4 +242,16 @@ public class PrimaryDataStoreEntityImpl implements StorageEntity { } + @Override + public Long getStorageProviderId() { + // TODO Auto-generated method stub + return null; + } + + @Override + public boolean isInMaintenance() { + // TODO Auto-generated method stub + return false; + } + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java index a60ec7a6e65..d1c26e1a272 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreProviderManager.java @@ -18,11 +18,14 @@ */ package org.apache.cloudstack.storage.datastore; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; public interface PrimaryDataStoreProviderManager { public PrimaryDataStore getPrimaryDataStore(long dataStoreId); + public PrimaryDataStore getPrimaryDataStore(String uuid); boolean registerDriver(String uuid, PrimaryDataStoreDriver driver); + boolean registerHostListener(String uuid, HypervisorHostListener listener); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java deleted file mode 100644 index 24a5c790688..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDao.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.datastore.db; - -import java.util.List; -import java.util.Map; - -import org.apache.cloudstack.storage.datastore.DataStoreStatus; - -import com.cloud.utils.db.GenericDao; - -public interface PrimaryDataStoreDao extends GenericDao { - - /** - * @param datacenterId - * -- the id of the datacenter (availability zone) - */ - List listByDataCenterId(long datacenterId); - - /** - * @param datacenterId - * -- the id of the datacenter (availability zone) - */ - List listBy(long datacenterId, long podId, Long clusterId); - - /** - * Set capacity of storage pool in bytes - * - * @param id - * pool id. - * @param capacity - * capacity in bytes - */ - void updateCapacity(long id, long capacity); - - /** - * Set available bytes of storage pool in bytes - * - * @param id - * pool id. - * @param available - * available capacity in bytes - */ - void updateAvailable(long id, long available); - - PrimaryDataStoreVO persist(PrimaryDataStoreVO pool, Map details); - - /** - * Find pool by name. - * - * @param name - * name of pool. - * @return the single StoragePoolVO - */ - List findPoolByName(String name); - - /** - * Find pools by the pod that matches the details. - * - * @param podId - * pod id to find the pools in. - * @param details - * details to match. All must match for the pool to be returned. - * @return List of StoragePoolVO - */ - List findPoolsByDetails(long dcId, long podId, Long clusterId, Map details); - - List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, Boolean shared); - - /** - * Find pool by UUID. - * - * @param uuid - * uuid of pool. - * @return the single StoragePoolVO - */ - PrimaryDataStoreVO findPoolByUUID(String uuid); - - List listByStorageHost(String hostFqdnOrIp); - - PrimaryDataStoreVO findPoolByHostPath(long dcId, Long podId, String host, String path, String uuid); - - List listPoolByHostPath(String host, String path); - - void updateDetails(long poolId, Map details); - - Map getDetails(long poolId); - - List searchForStoragePoolDetails(long poolId, String value); - - List findIfDuplicatePoolsExistByUUID(String uuid); - - List listByStatus(DataStoreStatus status); - - long countPoolsByStatus(DataStoreStatus... statuses); - - List listByStatusInZone(long dcId, DataStoreStatus status); - - List listPoolsByCluster(long clusterId); -} \ No newline at end of file diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java deleted file mode 100644 index faca54b569a..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ /dev/null @@ -1,360 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.datastore.db; - -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.cloudstack.storage.datastore.DataStoreStatus; -import org.springframework.stereotype.Component; - -import com.cloud.utils.db.DB; -import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.GenericSearchBuilder; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria.Func; -import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.exception.CloudRuntimeException; - -@Component -public class PrimaryDataStoreDaoImpl extends GenericDaoBase implements PrimaryDataStoreDao { - protected final SearchBuilder AllFieldSearch; - protected final SearchBuilder DcPodSearch; - protected final SearchBuilder DcPodAnyClusterSearch; - protected final SearchBuilder DeleteLvmSearch; - protected final GenericSearchBuilder StatusCountSearch; - - @Inject protected PrimaryDataStoreDetailsDao _detailsDao; - - private final String DetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and ("; - private final String DetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?"; - private final String FindPoolTagDetails = "SELECT storage_pool_details.name FROM storage_pool_details WHERE pool_id = ? and value = ?"; - - public PrimaryDataStoreDaoImpl() { - AllFieldSearch = createSearchBuilder(); - AllFieldSearch.and("name", AllFieldSearch.entity().getName(), SearchCriteria.Op.EQ); - AllFieldSearch.and("uuid", AllFieldSearch.entity().getUuid(), SearchCriteria.Op.EQ); - AllFieldSearch.and("datacenterId", AllFieldSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); - AllFieldSearch.and("hostAddress", AllFieldSearch.entity().getHostAddress(), SearchCriteria.Op.EQ); - AllFieldSearch.and("status", AllFieldSearch.entity().getStatus(), SearchCriteria.Op.EQ); - AllFieldSearch.and("path", AllFieldSearch.entity().getPath(), SearchCriteria.Op.EQ); - AllFieldSearch.and("podId", AllFieldSearch.entity().getPodId(), Op.EQ); - AllFieldSearch.and("clusterId", AllFieldSearch.entity().getClusterId(), Op.EQ); - AllFieldSearch.done(); - - DcPodSearch = createSearchBuilder(); - DcPodSearch.and("datacenterId", DcPodSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); - DcPodSearch.and().op("nullpod", DcPodSearch.entity().getPodId(), SearchCriteria.Op.NULL); - DcPodSearch.or("podId", DcPodSearch.entity().getPodId(), SearchCriteria.Op.EQ); - DcPodSearch.cp(); - DcPodSearch.and().op("nullcluster", DcPodSearch.entity().getClusterId(), SearchCriteria.Op.NULL); - DcPodSearch.or("cluster", DcPodSearch.entity().getClusterId(), SearchCriteria.Op.EQ); - DcPodSearch.cp(); - DcPodSearch.done(); - - DcPodAnyClusterSearch = createSearchBuilder(); - DcPodAnyClusterSearch.and("datacenterId", DcPodAnyClusterSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); - DcPodAnyClusterSearch.and().op("nullpod", DcPodAnyClusterSearch.entity().getPodId(), SearchCriteria.Op.NULL); - DcPodAnyClusterSearch.or("podId", DcPodAnyClusterSearch.entity().getPodId(), SearchCriteria.Op.EQ); - DcPodAnyClusterSearch.cp(); - DcPodAnyClusterSearch.done(); - - DeleteLvmSearch = createSearchBuilder(); - DeleteLvmSearch.and("ids", DeleteLvmSearch.entity().getId(), SearchCriteria.Op.IN); - DeleteLvmSearch.and().op("LVM", DeleteLvmSearch.entity().getPoolType(), SearchCriteria.Op.EQ); - DeleteLvmSearch.or("Filesystem", DeleteLvmSearch.entity().getPoolType(), SearchCriteria.Op.EQ); - DeleteLvmSearch.cp(); - DeleteLvmSearch.done(); - - StatusCountSearch = createSearchBuilder(Long.class); - StatusCountSearch.and("status", StatusCountSearch.entity().getStatus(), SearchCriteria.Op.IN); - StatusCountSearch.select(null, Func.COUNT, null); - StatusCountSearch.done(); - } - - @Override - public List findPoolByName(String name) { - SearchCriteria sc = AllFieldSearch.create(); - sc.setParameters("name", name); - return listIncludingRemovedBy(sc); - } - - @Override - public PrimaryDataStoreVO findPoolByUUID(String uuid) { - SearchCriteria sc = AllFieldSearch.create(); - sc.setParameters("uuid", uuid); - return findOneIncludingRemovedBy(sc); - } - - @Override - public List findIfDuplicatePoolsExistByUUID(String uuid) { - SearchCriteria sc = AllFieldSearch.create(); - sc.setParameters("uuid", uuid); - return listBy(sc); - } - - @Override - public List listByDataCenterId(long datacenterId) { - SearchCriteria sc = AllFieldSearch.create(); - sc.setParameters("datacenterId", datacenterId); - return listBy(sc); - } - - @Override - public void updateAvailable(long id, long available) { - PrimaryDataStoreVO pool = createForUpdate(id); - pool.setAvailableBytes(available); - update(id, pool); - } - - @Override - public void updateCapacity(long id, long capacity) { - PrimaryDataStoreVO pool = createForUpdate(id); - pool.setCapacityBytes(capacity); - update(id, pool); - - } - - @Override - public List listByStorageHost(String hostFqdnOrIp) { - SearchCriteria sc = AllFieldSearch.create(); - sc.setParameters("hostAddress", hostFqdnOrIp); - return listIncludingRemovedBy(sc); - } - - @Override - public List listByStatus(DataStoreStatus status) { - SearchCriteria sc = AllFieldSearch.create(); - sc.setParameters("status", status); - return listBy(sc); - } - - @Override - public List listByStatusInZone(long dcId, DataStoreStatus status) { - SearchCriteria sc = AllFieldSearch.create(); - sc.setParameters("status", status); - sc.setParameters("datacenterId", dcId); - return listBy(sc); - } - - @Override - public PrimaryDataStoreVO findPoolByHostPath(long datacenterId, Long podId, String host, String path, String uuid) { - SearchCriteria sc = AllFieldSearch.create(); - sc.setParameters("hostAddress", host); - sc.setParameters("path", path); - sc.setParameters("datacenterId", datacenterId); - sc.setParameters("podId", podId); - sc.setParameters("uuid", uuid); - - return findOneBy(sc); - } - - @Override - public List listBy(long datacenterId, long podId, Long clusterId) { - if (clusterId != null) { - SearchCriteria sc = DcPodSearch.create(); - sc.setParameters("datacenterId", datacenterId); - sc.setParameters("podId", podId); - - sc.setParameters("cluster", clusterId); - return listBy(sc); - } else { - SearchCriteria sc = DcPodAnyClusterSearch.create(); - sc.setParameters("datacenterId", datacenterId); - sc.setParameters("podId", podId); - return listBy(sc); - } - } - - @Override - public List listPoolByHostPath(String host, String path) { - SearchCriteria sc = AllFieldSearch.create(); - sc.setParameters("hostAddress", host); - sc.setParameters("path", path); - - return listBy(sc); - } - - public PrimaryDataStoreVO listById(Integer id) { - SearchCriteria sc = AllFieldSearch.create(); - sc.setParameters("id", id); - - return findOneIncludingRemovedBy(sc); - } - - @Override - @DB - public PrimaryDataStoreVO persist(PrimaryDataStoreVO pool, Map details) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - pool = super.persist(pool); - if (details != null) { - for (Map.Entry detail : details.entrySet()) { - PrimaryDataStoreDetailVO vo = new PrimaryDataStoreDetailVO(pool.getId(), detail.getKey(), detail.getValue()); - _detailsDao.persist(vo); - } - } - txn.commit(); - return pool; - } - - @DB - @Override - public List findPoolsByDetails(long dcId, long podId, Long clusterId, Map details) { - StringBuilder sql = new StringBuilder(DetailsSqlPrefix); - if (clusterId != null) { - sql.append("storage_pool.cluster_id = ? OR storage_pool.cluster_id IS NULL) AND ("); - } - for (Map.Entry detail : details.entrySet()) { - sql.append("((storage_pool_details.name='").append(detail.getKey()).append("') AND (storage_pool_details.value='").append(detail.getValue()).append("')) OR "); - } - sql.delete(sql.length() - 4, sql.length()); - sql.append(DetailsSqlSuffix); - Transaction txn = Transaction.currentTxn(); - PreparedStatement pstmt = null; - try { - pstmt = txn.prepareAutoCloseStatement(sql.toString()); - int i = 1; - pstmt.setLong(i++, dcId); - pstmt.setLong(i++, podId); - if (clusterId != null) { - pstmt.setLong(i++, clusterId); - } - pstmt.setInt(i++, details.size()); - ResultSet rs = pstmt.executeQuery(); - List pools = new ArrayList(); - while (rs.next()) { - pools.add(toEntityBean(rs, false)); - } - return pools; - } catch (SQLException e) { - throw new CloudRuntimeException("Unable to execute " + pstmt, e); - } - } - - protected Map tagsToDetails(String[] tags) { - Map details = new HashMap(tags.length); - for (String tag : tags) { - details.put(tag, "true"); - } - return details; - } - - @Override - public List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, Boolean shared) { - List storagePools = null; - if (tags == null || tags.length == 0) { - storagePools = listBy(dcId, podId, clusterId); - } else { - Map details = tagsToDetails(tags); - storagePools = findPoolsByDetails(dcId, podId, clusterId, details); - } - - if (shared == null) { - return storagePools; - } else { - List filteredStoragePools = new ArrayList(storagePools); - for (PrimaryDataStoreVO pool : storagePools) { - /* - * if (shared != pool.isShared()) { - * filteredStoragePools.remove(pool); } - */ - } - - return filteredStoragePools; - } - } - - @Override - @DB - public List searchForStoragePoolDetails(long poolId, String value) { - - StringBuilder sql = new StringBuilder(FindPoolTagDetails); - - Transaction txn = Transaction.currentTxn(); - PreparedStatement pstmt = null; - try { - pstmt = txn.prepareAutoCloseStatement(sql.toString()); - pstmt.setLong(1, poolId); - pstmt.setString(2, value); - - ResultSet rs = pstmt.executeQuery(); - List tags = new ArrayList(); - - while (rs.next()) { - tags.add(rs.getString("name")); - } - return tags; - } catch (SQLException e) { - throw new CloudRuntimeException("Unable to execute " + pstmt.toString(), e); - } - - } - - @Override - public void updateDetails(long poolId, Map details) { - if (details != null) { - _detailsDao.update(poolId, details); - } - } - - @Override - public Map getDetails(long poolId) { - return _detailsDao.getDetails(poolId); - } - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - super.configure(name, params); - _detailsDao.configure("DetailsDao", params); - return true; - } - - @Override - public long countPoolsByStatus(DataStoreStatus... statuses) { - SearchCriteria sc = StatusCountSearch.create(); - - sc.setParameters("status", (Object[]) statuses); - - List rs = customSearchIncludingRemoved(sc, null); - if (rs.size() == 0) { - return 0; - } - - return rs.get(0); - } - - @Override - public List listPoolsByCluster(long clusterId) { - SearchCriteria sc = AllFieldSearch.create(); - sc.setParameters("clusterId", clusterId); - - return listBy(sc); - } -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java index 3634b52908a..96d2da357f5 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java @@ -26,14 +26,19 @@ import java.util.UUID; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.storage.datastore.db.DataStoreProviderDao; import org.apache.cloudstack.storage.datastore.db.DataStoreProviderVO; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.component.ManagerBase; @Component public class DataStoreProviderManagerImpl extends ManagerBase implements DataStoreProviderManager { + private static final Logger s_logger = Logger + .getLogger(DataStoreProviderManagerImpl.class); @Inject List providers; @Inject @@ -59,8 +64,8 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto @Override public boolean configure(String name, Map params) throws ConfigurationException { - -/* + Map copyParams = new HashMap(params); + //TODO: hold global lock List providerVos = providerDao.listAll(); for (DataStoreProvider provider : providers) { @@ -83,12 +88,20 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto } else { uuid = providerVO.getUuid(); } - params.put("uuid", uuid); - params.put("id", providerVO.getId()); - provider.configure(params); + copyParams.put("uuid", uuid); + copyParams.put("id", providerVO.getId()); providerMap.put(uuid, provider); + try { + boolean registrationResult = provider.configure(copyParams); + if (!registrationResult) { + providerMap.remove(uuid); + } + } catch(Exception e) { + s_logger.debug("configure provider failed", e); + providerMap.remove(uuid); + } } - */ + return true; } @@ -97,4 +110,9 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto DataStoreProviderVO provider = providerDao.findById(id); return providerMap.get(provider.getUuid()); } + + @Override + public DataStoreProvider getDefaultPrimaryDataStoreProvider() { + return this.getDataStoreProvider("ancient primary data store provider"); + } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java index 502158cdaaa..d44a40e971f 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/ImageDataStoreProvider.java @@ -18,6 +18,8 @@ */ package org.apache.cloudstack.storage.datastore.provider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; + public interface ImageDataStoreProvider extends DataStoreProvider { } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java index dbca549212c..fdf5958f1ab 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/PrimaryDataStoreProvider.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.storage.datastore.provider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; + public interface PrimaryDataStoreProvider extends DataStoreProvider { } diff --git a/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDao.java b/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDao.java index 08f9182f237..fb7dec0fa41 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDao.java +++ b/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDao.java @@ -16,10 +16,12 @@ // under the License. package org.apache.cloudstack.storage.db; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; + import com.cloud.utils.db.GenericDao; import com.cloud.utils.fsm.StateDao; -public interface ObjectInDataStoreDao extends GenericDao, StateDao { +public interface ObjectInDataStoreDao extends GenericDao, StateDao { } diff --git a/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java index 4a5a913adca..9965d60b23d 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/db/ObjectInDataStoreDaoImpl.java @@ -20,17 +20,17 @@ import java.util.Map; import javax.naming.ConfigurationException; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.State; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import com.cloud.storage.VolumeVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.UpdateBuilder; @Component public class ObjectInDataStoreDaoImpl extends GenericDaoBase implements ObjectInDataStoreDao { @@ -38,6 +38,8 @@ public class ObjectInDataStoreDaoImpl extends GenericDaoBase updateStateSearch; @Override public boolean configure(String name, Map params) throws ConfigurationException { + super.configure(name, params); + updateStateSearch = this.createSearchBuilder(); updateStateSearch.and("id", updateStateSearch.entity().getId(), Op.EQ); updateStateSearch.and("state", updateStateSearch.entity().getState(), Op.EQ); @@ -47,7 +49,8 @@ public class ObjectInDataStoreDaoImpl extends GenericDaoBase { +public class ObjectInDataStoreVO implements StateObject, DataObjectInStore { @Id @GeneratedValue(strategy = GenerationType.IDENTITY) long id; - @Column(name = "datastore_id") - private long dataStoreId; + @Column(name = "datastore_uuid") + private String dataStoreUuid; @Column(name = "datastore_role") @Enumerated(EnumType.STRING) private DataStoreRole dataStoreRole; - @Column(name = "object_id") - long objectId; + @Column(name = "object_uuid") + String objectUuid; @Column(name = "object_type") @Enumerated(EnumType.STRING) @@ -74,6 +76,15 @@ public class ObjectInDataStoreVO implements StateObject params) { - ImageDataStoreVO store = imageStoreDao.findByUuid(params.get("uuid")); + public ImageDataStoreVO createImageDataStore(Map params) { + ImageDataStoreVO store = imageStoreDao.findByUuid((String)params.get("uuid")); if (store != null) { - throw new CloudRuntimeException("duplicate uuid"); + return store; } store = new ImageDataStoreVO(); - store.setName(params.get("name")); - store.setProtocol(params.get("protocol")); - store.setProvider(Long.parseLong(params.get("provider"))); - store.setScope(Enum.valueOf(ScopeType.class, params.get("scope"))); - store.setUuid(params.get("uuid")); + store.setName((String)params.get("name")); + store.setProtocol((String)params.get("protocol")); + store.setProvider((Long)params.get("provider")); + store.setScope((ScopeType)params.get("scope")); + store.setUuid((String)params.get("uuid")); store = imageStoreDao.persist(store); return store; } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreManager.java b/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreManager.java index 2bd361f05e9..b6d84cdcef2 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreManager.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageDataStoreManager.java @@ -18,9 +18,14 @@ */ package org.apache.cloudstack.storage.image.datastore; +import java.util.List; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.storage.image.ImageDataStoreDriver; public interface ImageDataStoreManager { ImageDataStore getImageDataStore(long dataStoreId); + ImageDataStore getImageDataStore(String uuid); + List getList(); boolean registerDriver(String uuid, ImageDataStoreDriver driver); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDao.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDao.java deleted file mode 100644 index b5db164055d..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDao.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.image.db; - -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.cloudstack.storage.image.TemplateEvent; -import org.apache.cloudstack.storage.image.TemplateState; - -import com.cloud.domain.DomainVO; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.projects.Project.ListProjectResourcesCriteria; -import com.cloud.template.VirtualMachineTemplate.TemplateFilter; -import com.cloud.user.Account; -import com.cloud.utils.Pair; -import com.cloud.utils.db.GenericDao; -import com.cloud.utils.fsm.StateDao; - -public interface ImageDataDao extends GenericDao, StateDao { - public List listByPublic(); - - public ImageDataVO findByName(String templateName); - - public ImageDataVO findByTemplateName(String templateName); - - // public void update(ImageDataVO template); - - public List listAllSystemVMTemplates(); - - public List listDefaultBuiltinTemplates(); - - public String getRoutingTemplateUniqueName(); - - public List findIsosByIdAndPath(Long domainId, Long accountId, String path); - - public List listReadyTemplates(); - - public List listByAccountId(long accountId); - - public Set> searchTemplates(String name, String keyword, TemplateFilter templateFilter, boolean isIso, List hypers, Boolean bootable, DomainVO domain, - Long pageSize, Long startIndex, Long zoneId, HypervisorType hyperType, boolean onlyReady, boolean showDomr, List permittedAccounts, Account caller, - ListProjectResourcesCriteria listProjectResourcesCriteria, Map tags); - - public Set> searchSwiftTemplates(String name, String keyword, TemplateFilter templateFilter, boolean isIso, List hypers, Boolean bootable, DomainVO domain, - Long pageSize, Long startIndex, Long zoneId, HypervisorType hyperType, boolean onlyReady, boolean showDomr, List permittedAccounts, Account caller, Map tags); - - public long addTemplateToZone(ImageDataVO tmplt, long zoneId); - - public List listAllInZone(long dataCenterId); - - public List listByHypervisorType(List hyperTypes); - - public List publicIsoSearch(Boolean bootable, boolean listRemoved, Map tags); - - public List userIsoSearch(boolean listRemoved); - - ImageDataVO findSystemVMTemplate(long zoneId); - - ImageDataVO findSystemVMTemplate(long zoneId, HypervisorType hType); - - ImageDataVO findRoutingTemplate(HypervisorType type); - - List listPrivateTemplatesByHost(Long hostId); - - public Long countTemplatesForAccount(long accountId); - -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDaoImpl.java deleted file mode 100644 index 301b5861f8c..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataDaoImpl.java +++ /dev/null @@ -1,975 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.image.db; - -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashSet; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.cloudstack.storage.image.TemplateEvent; -import org.apache.cloudstack.storage.image.TemplateState; -import org.apache.cloudstack.storage.image.format.ISO; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - -import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.dc.dao.DataCenterDao; -import com.cloud.domain.DomainVO; -import com.cloud.domain.dao.DomainDao; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.projects.Project.ListProjectResourcesCriteria; -import com.cloud.server.ResourceTag.TaggedResourceType; -import com.cloud.storage.Storage; -import com.cloud.storage.Storage.TemplateType; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; -import com.cloud.storage.VMTemplateZoneVO; -import com.cloud.storage.dao.VMTemplateDaoImpl; -import com.cloud.storage.dao.VMTemplateDetailsDao; -import com.cloud.storage.dao.VMTemplateZoneDao; -import com.cloud.tags.ResourceTagVO; -import com.cloud.tags.dao.ResourceTagsDaoImpl; -import com.cloud.template.VirtualMachineTemplate.TemplateFilter; -import com.cloud.user.Account; -import com.cloud.utils.Pair; -import com.cloud.utils.db.DB; -import com.cloud.utils.db.Filter; -import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.GenericSearchBuilder; -import com.cloud.utils.db.JoinBuilder; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria.Func; -import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.db.UpdateBuilder; -import com.cloud.utils.exception.CloudRuntimeException; - -@Component -public class ImageDataDaoImpl extends GenericDaoBase implements ImageDataDao { - private static final Logger s_logger = Logger.getLogger(VMTemplateDaoImpl.class); - - @Inject - VMTemplateZoneDao templateZoneDao; - @Inject - VMTemplateDetailsDao templateDetailsDao; - - @Inject - ConfigurationDao configDao; - @Inject - HostDao hostDao; - @Inject - DomainDao domainDao; - @Inject - DataCenterDao dcDao; - - private final String SELECT_TEMPLATE_HOST_REF = "SELECT t.id, h.data_center_id, t.unique_name, t.name, t.public, t.featured, t.type, t.hvm, t.bits, t.url, t.format, t.created, t.account_id, " - + "t.checksum, t.display_text, t.enable_password, t.guest_os_id, t.bootable, t.prepopulate, t.cross_zones, t.hypervisor_type FROM vm_template t"; - - private final String SELECT_TEMPLATE_ZONE_REF = "SELECT t.id, tzr.zone_id, t.unique_name, t.name, t.public, t.featured, t.type, t.hvm, t.bits, t.url, t.format, t.created, t.account_id, " - + "t.checksum, t.display_text, t.enable_password, t.guest_os_id, t.bootable, t.prepopulate, t.cross_zones, t.hypervisor_type FROM vm_template t INNER JOIN template_zone_ref tzr on (t.id = tzr.template_id) "; - - private final String SELECT_TEMPLATE_SWIFT_REF = "SELECT t.id, t.unique_name, t.name, t.public, t.featured, t.type, t.hvm, t.bits, t.url, t.format, t.created, t.account_id, " - + "t.checksum, t.display_text, t.enable_password, t.guest_os_id, t.bootable, t.prepopulate, t.cross_zones, t.hypervisor_type FROM vm_template t"; - protected SearchBuilder TemplateNameSearch; - protected SearchBuilder UniqueNameSearch; - protected SearchBuilder tmpltTypeSearch; - protected SearchBuilder tmpltTypeHyperSearch; - protected SearchBuilder tmpltTypeHyperSearch2; - - protected SearchBuilder AccountIdSearch; - protected SearchBuilder NameSearch; - protected SearchBuilder TmpltsInZoneSearch; - private SearchBuilder PublicSearch; - private SearchBuilder NameAccountIdSearch; - private SearchBuilder PublicIsoSearch; - private SearchBuilder UserIsoSearch; - private GenericSearchBuilder CountTemplatesByAccount; - private SearchBuilder updateStateSearch; - - //ResourceTagsDaoImpl _tagsDao = ComponentInject.inject(ResourceTagsDaoImpl.class); - @Inject - ResourceTagsDaoImpl _tagsDao = null; - private String routerTmpltName; - private String consoleProxyTmpltName; - - protected ImageDataDaoImpl() { - } - - @Override - public List listByPublic() { - SearchCriteria sc = PublicSearch.create(); - sc.setParameters("public", 1); - return listBy(sc); - } - - @Override - public ImageDataVO findByName(String templateName) { - SearchCriteria sc = UniqueNameSearch.create(); - sc.setParameters("uniqueName", templateName); - return findOneIncludingRemovedBy(sc); - } - - @Override - public ImageDataVO findByTemplateName(String templateName) { - SearchCriteria sc = NameSearch.create(); - sc.setParameters("name", templateName); - return findOneIncludingRemovedBy(sc); - } - - @Override - public List publicIsoSearch(Boolean bootable, boolean listRemoved, Map tags) { - - SearchBuilder sb = null; - if (tags == null || tags.isEmpty()) { - sb = PublicIsoSearch; - } else { - sb = createSearchBuilder(); - sb.and("public", sb.entity().isPublicTemplate(), SearchCriteria.Op.EQ); - sb.and("format", sb.entity().getFormat(), SearchCriteria.Op.EQ); - sb.and("type", sb.entity().getTemplateType(), SearchCriteria.Op.EQ); - sb.and("bootable", sb.entity().isBootable(), SearchCriteria.Op.EQ); - sb.and("removed", sb.entity().getRemoved(), SearchCriteria.Op.EQ); - - SearchBuilder tagSearch = _tagsDao.createSearchBuilder(); - for (int count = 0; count < tags.size(); count++) { - tagSearch.or().op("key" + String.valueOf(count), tagSearch.entity().getKey(), SearchCriteria.Op.EQ); - tagSearch.and("value" + String.valueOf(count), tagSearch.entity().getValue(), SearchCriteria.Op.EQ); - tagSearch.cp(); - } - tagSearch.and("resourceType", tagSearch.entity().getResourceType(), SearchCriteria.Op.EQ); - sb.groupBy(sb.entity().getId()); - sb.join("tagSearch", tagSearch, sb.entity().getId(), tagSearch.entity().getResourceId(), JoinBuilder.JoinType.INNER); - } - - SearchCriteria sc = sb.create(); - - sc.setParameters("public", 1); - sc.setParameters("format", "ISO"); - sc.setParameters("type", TemplateType.PERHOST.toString()); - if (bootable != null) { - sc.setParameters("bootable", bootable); - } - - if (!listRemoved) { - sc.setParameters("removed", (Object) null); - } - - if (tags != null && !tags.isEmpty()) { - int count = 0; - sc.setJoinParameters("tagSearch", "resourceType", TaggedResourceType.ISO.toString()); - for (String key : tags.keySet()) { - sc.setJoinParameters("tagSearch", "key" + String.valueOf(count), key); - sc.setJoinParameters("tagSearch", "value" + String.valueOf(count), tags.get(key)); - count++; - } - } - - return listBy(sc); - } - - @Override - public List userIsoSearch(boolean listRemoved) { - - SearchBuilder sb = null; - sb = UserIsoSearch; - SearchCriteria sc = sb.create(); - - sc.setParameters("format", Storage.ImageFormat.ISO); - sc.setParameters("type", TemplateType.USER.toString()); - - if (!listRemoved) { - sc.setParameters("removed", (Object) null); - } - - return listBy(sc); - } - - @Override - public List listAllSystemVMTemplates() { - SearchCriteria sc = tmpltTypeSearch.create(); - sc.setParameters("templateType", Storage.TemplateType.SYSTEM); - - Filter filter = new Filter(ImageDataVO.class, "id", false, null, null); - return listBy(sc, filter); - } - - @Override - public List listPrivateTemplatesByHost(Long hostId) { - - String sql = "select * from template_host_ref as thr INNER JOIN vm_template as t ON t.id=thr.template_id " - + "where thr.host_id=? and t.public=0 and t.featured=0 and t.type='USER' and t.removed is NULL"; - - List l = new ArrayList(); - - Transaction txn = Transaction.currentTxn(); - - PreparedStatement pstmt = null; - try { - pstmt = txn.prepareAutoCloseStatement(sql); - pstmt.setLong(1, hostId); - ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - l.add(rs.getLong(1)); - } - } catch (SQLException e) { - } catch (Throwable e) { - } - return l; - } - - @Override - public List listReadyTemplates() { - SearchCriteria sc = createSearchCriteria(); - sc.addAnd("ready", SearchCriteria.Op.EQ, true); - sc.addAnd("format", SearchCriteria.Op.NEQ, Storage.ImageFormat.ISO); - return listIncludingRemovedBy(sc); - } - - @Override - public List findIsosByIdAndPath(Long domainId, Long accountId, String path) { - SearchCriteria sc = createSearchCriteria(); - sc.addAnd("iso", SearchCriteria.Op.EQ, true); - if (domainId != null) { - sc.addAnd("domainId", SearchCriteria.Op.EQ, domainId); - } - if (accountId != null) { - sc.addAnd("accountId", SearchCriteria.Op.EQ, accountId); - } - if (path != null) { - sc.addAnd("path", SearchCriteria.Op.EQ, path); - } - return listIncludingRemovedBy(sc); - } - - @Override - public List listByAccountId(long accountId) { - SearchCriteria sc = AccountIdSearch.create(); - sc.setParameters("accountId", accountId); - return listBy(sc); - } - - @Override - public List listByHypervisorType(List hyperTypes) { - SearchCriteria sc = createSearchCriteria(); - hyperTypes.add(HypervisorType.None); - sc.addAnd("hypervisorType", SearchCriteria.Op.IN, hyperTypes.toArray()); - return listBy(sc); - } - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - boolean result = super.configure(name, params); - - PublicSearch = createSearchBuilder(); - PublicSearch.and("public", PublicSearch.entity().isPublicTemplate(), SearchCriteria.Op.EQ); - - routerTmpltName = (String) params.get("routing.uniquename"); - - s_logger.debug("Found parameter routing unique name " + routerTmpltName); - if (routerTmpltName == null) { - routerTmpltName = "routing"; - } - - consoleProxyTmpltName = (String) params.get("consoleproxy.uniquename"); - if (consoleProxyTmpltName == null) { - consoleProxyTmpltName = "routing"; - } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Use console proxy template : " + consoleProxyTmpltName); - } - - UniqueNameSearch = createSearchBuilder(); - UniqueNameSearch.and("uniqueName", UniqueNameSearch.entity().getUniqueName(), SearchCriteria.Op.EQ); - NameSearch = createSearchBuilder(); - NameSearch.and("name", NameSearch.entity().getName(), SearchCriteria.Op.EQ); - - NameAccountIdSearch = createSearchBuilder(); - NameAccountIdSearch.and("name", NameAccountIdSearch.entity().getName(), SearchCriteria.Op.EQ); - NameAccountIdSearch.and("accountId", NameAccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ); - - PublicIsoSearch = createSearchBuilder(); - PublicIsoSearch.and("public", PublicIsoSearch.entity().isPublicTemplate(), SearchCriteria.Op.EQ); - PublicIsoSearch.and("format", PublicIsoSearch.entity().getFormat(), SearchCriteria.Op.EQ); - PublicIsoSearch.and("type", PublicIsoSearch.entity().getTemplateType(), SearchCriteria.Op.EQ); - PublicIsoSearch.and("bootable", PublicIsoSearch.entity().isBootable(), SearchCriteria.Op.EQ); - PublicIsoSearch.and("removed", PublicIsoSearch.entity().getRemoved(), SearchCriteria.Op.EQ); - - UserIsoSearch = createSearchBuilder(); - UserIsoSearch.and("format", UserIsoSearch.entity().getFormat(), SearchCriteria.Op.EQ); - UserIsoSearch.and("type", UserIsoSearch.entity().getTemplateType(), SearchCriteria.Op.EQ); - UserIsoSearch.and("removed", UserIsoSearch.entity().getRemoved(), SearchCriteria.Op.EQ); - - tmpltTypeHyperSearch = createSearchBuilder(); - tmpltTypeHyperSearch.and("templateType", tmpltTypeHyperSearch.entity().getTemplateType(), SearchCriteria.Op.EQ); - SearchBuilder hostHyperSearch = hostDao.createSearchBuilder(); - hostHyperSearch.and("type", hostHyperSearch.entity().getType(), SearchCriteria.Op.EQ); - hostHyperSearch.and("zoneId", hostHyperSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); - hostHyperSearch.groupBy(hostHyperSearch.entity().getHypervisorType()); - - tmpltTypeHyperSearch.join("tmplHyper", hostHyperSearch, hostHyperSearch.entity().getHypervisorType(), tmpltTypeHyperSearch.entity().getHypervisorType(), JoinBuilder.JoinType.INNER); - hostHyperSearch.done(); - tmpltTypeHyperSearch.done(); - - tmpltTypeHyperSearch2 = createSearchBuilder(); - tmpltTypeHyperSearch2.and("templateType", tmpltTypeHyperSearch2.entity().getTemplateType(), SearchCriteria.Op.EQ); - tmpltTypeHyperSearch2.and("hypervisorType", tmpltTypeHyperSearch2.entity().getHypervisorType(), SearchCriteria.Op.EQ); - - tmpltTypeSearch = createSearchBuilder(); - tmpltTypeSearch.and("removed", tmpltTypeSearch.entity().getRemoved(), SearchCriteria.Op.NULL); - tmpltTypeSearch.and("templateType", tmpltTypeSearch.entity().getTemplateType(), SearchCriteria.Op.EQ); - - AccountIdSearch = createSearchBuilder(); - AccountIdSearch.and("accountId", AccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ); - AccountIdSearch.and("publicTemplate", AccountIdSearch.entity().isPublicTemplate(), SearchCriteria.Op.EQ); - AccountIdSearch.done(); - - SearchBuilder tmpltZoneSearch = templateZoneDao.createSearchBuilder(); - tmpltZoneSearch.and("removed", tmpltZoneSearch.entity().getRemoved(), SearchCriteria.Op.NULL); - tmpltZoneSearch.and("zoneId", tmpltZoneSearch.entity().getZoneId(), SearchCriteria.Op.EQ); - - TmpltsInZoneSearch = createSearchBuilder(); - TmpltsInZoneSearch.and("removed", TmpltsInZoneSearch.entity().getRemoved(), SearchCriteria.Op.NULL); - TmpltsInZoneSearch.and().op("avoidtype", TmpltsInZoneSearch.entity().getTemplateType(), SearchCriteria.Op.NEQ); - TmpltsInZoneSearch.or("templateType", TmpltsInZoneSearch.entity().getTemplateType(), SearchCriteria.Op.NULL); - TmpltsInZoneSearch.cp(); - TmpltsInZoneSearch.join("tmpltzone", tmpltZoneSearch, tmpltZoneSearch.entity().getTemplateId(), TmpltsInZoneSearch.entity().getId(), JoinBuilder.JoinType.INNER); - tmpltZoneSearch.done(); - TmpltsInZoneSearch.done(); - - CountTemplatesByAccount = createSearchBuilder(Long.class); - CountTemplatesByAccount.select(null, Func.COUNT, null); - CountTemplatesByAccount.and("account", CountTemplatesByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); - CountTemplatesByAccount.and("removed", CountTemplatesByAccount.entity().getRemoved(), SearchCriteria.Op.NULL); - CountTemplatesByAccount.done(); - - updateStateSearch = this.createSearchBuilder(); - updateStateSearch.and("id", updateStateSearch.entity().getId(), Op.EQ); - updateStateSearch.and("state", updateStateSearch.entity().getState(), Op.EQ); - updateStateSearch.and("updatedCount", updateStateSearch.entity().getUpdatedCount(), Op.EQ); - updateStateSearch.done(); - return result; - } - - @Override - public String getRoutingTemplateUniqueName() { - return routerTmpltName; - } - - @Override - public Set> searchSwiftTemplates(String name, String keyword, TemplateFilter templateFilter, boolean isIso, List hypers, Boolean bootable, DomainVO domain, - Long pageSize, Long startIndex, Long zoneId, HypervisorType hyperType, boolean onlyReady, boolean showDomr, List permittedAccounts, Account caller, Map tags) { - - StringBuilder builder = new StringBuilder(); - if (!permittedAccounts.isEmpty()) { - for (Account permittedAccount : permittedAccounts) { - builder.append(permittedAccount.getAccountId() + ","); - } - } - - String permittedAccountsStr = builder.toString(); - - if (permittedAccountsStr.length() > 0) { - // chop the "," off - permittedAccountsStr = permittedAccountsStr.substring(0, permittedAccountsStr.length() - 1); - } - - Transaction txn = Transaction.currentTxn(); - txn.start(); - - Set> templateZonePairList = new HashSet>(); - PreparedStatement pstmt = null; - ResultSet rs = null; - String sql = SELECT_TEMPLATE_SWIFT_REF; - try { - String joinClause = ""; - String whereClause = " WHERE t.removed IS NULL"; - - if (isIso) { - whereClause += " AND t.format = 'ISO'"; - if (!hyperType.equals(HypervisorType.None)) { - joinClause = " INNER JOIN guest_os guestOS on (guestOS.id = t.guest_os_id) INNER JOIN guest_os_hypervisor goh on ( goh.guest_os_id = guestOS.id) "; - whereClause += " AND goh.hypervisor_type = '" + hyperType.toString() + "'"; - } - } else { - whereClause += " AND t.format <> 'ISO'"; - if (hypers.isEmpty()) { - return templateZonePairList; - } else { - StringBuilder relatedHypers = new StringBuilder(); - for (HypervisorType hyper : hypers) { - relatedHypers.append("'"); - relatedHypers.append(hyper.toString()); - relatedHypers.append("'"); - relatedHypers.append(","); - } - relatedHypers.setLength(relatedHypers.length() - 1); - whereClause += " AND t.hypervisor_type IN (" + relatedHypers + ")"; - } - } - joinClause += " INNER JOIN template_swift_ref tsr on (t.id = tsr.template_id)"; - if (keyword != null) { - whereClause += " AND t.name LIKE \"%" + keyword + "%\""; - } else if (name != null) { - whereClause += " AND t.name LIKE \"%" + name + "%\""; - } - - if (bootable != null) { - whereClause += " AND t.bootable = " + bootable; - } - - if (!showDomr) { - whereClause += " AND t.type != '" + Storage.TemplateType.SYSTEM.toString() + "'"; - } - - if (templateFilter == TemplateFilter.featured) { - whereClause += " AND t.public = 1 AND t.featured = 1"; - } else if ((templateFilter == TemplateFilter.self || templateFilter == TemplateFilter.selfexecutable) && caller.getType() != Account.ACCOUNT_TYPE_ADMIN) { - if (caller.getType() == Account.ACCOUNT_TYPE_DOMAIN_ADMIN || caller.getType() == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) { - joinClause += " INNER JOIN account a on (t.account_id = a.id) INNER JOIN domain d on (a.domain_id = d.id)"; - whereClause += " AND d.path LIKE '" + domain.getPath() + "%'"; - } else { - whereClause += " AND t.account_id IN (" + permittedAccountsStr + ")"; - } - } else if (templateFilter == TemplateFilter.sharedexecutable && caller.getType() != Account.ACCOUNT_TYPE_ADMIN) { - if (caller.getType() == Account.ACCOUNT_TYPE_NORMAL) { - joinClause += " LEFT JOIN launch_permission lp ON t.id = lp.template_id WHERE" + " (t.account_id IN (" + permittedAccountsStr + ") OR" + " lp.account_id IN (" - + permittedAccountsStr + "))"; - } else { - joinClause += " INNER JOIN account a on (t.account_id = a.id) "; - } - } else if (templateFilter == TemplateFilter.executable && !permittedAccounts.isEmpty()) { - whereClause += " AND (t.public = 1 OR t.account_id IN (" + permittedAccountsStr + "))"; - } else if (templateFilter == TemplateFilter.community) { - whereClause += " AND t.public = 1 AND t.featured = 0"; - } else if (templateFilter == TemplateFilter.all && caller.getType() == Account.ACCOUNT_TYPE_ADMIN) { - } else if (caller.getType() != Account.ACCOUNT_TYPE_ADMIN) { - return templateZonePairList; - } - - sql += joinClause + whereClause + getOrderByLimit(pageSize, startIndex); - pstmt = txn.prepareStatement(sql); - rs = pstmt.executeQuery(); - while (rs.next()) { - Pair templateZonePair = new Pair(rs.getLong(1), -1L); - templateZonePairList.add(templateZonePair); - } - - } catch (Exception e) { - s_logger.warn("Error listing templates", e); - } finally { - try { - if (rs != null) { - rs.close(); - } - if (pstmt != null) { - pstmt.close(); - } - txn.commit(); - } catch (SQLException sqle) { - s_logger.warn("Error in cleaning up", sqle); - } - } - - return templateZonePairList; - } - - @Override - public Set> searchTemplates(String name, String keyword, TemplateFilter templateFilter, boolean isIso, List hypers, Boolean bootable, DomainVO domain, - Long pageSize, Long startIndex, Long zoneId, HypervisorType hyperType, boolean onlyReady, boolean showDomr, List permittedAccounts, Account caller, - ListProjectResourcesCriteria listProjectResourcesCriteria, Map tags) { - StringBuilder builder = new StringBuilder(); - if (!permittedAccounts.isEmpty()) { - for (Account permittedAccount : permittedAccounts) { - builder.append(permittedAccount.getAccountId() + ","); - } - } - - String permittedAccountsStr = builder.toString(); - - if (permittedAccountsStr.length() > 0) { - // chop the "," off - permittedAccountsStr = permittedAccountsStr.substring(0, permittedAccountsStr.length() - 1); - } - - Transaction txn = Transaction.currentTxn(); - txn.start(); - - /* Use LinkedHashSet here to guarantee iteration order */ - Set> templateZonePairList = new LinkedHashSet>(); - PreparedStatement pstmt = null; - ResultSet rs = null; - StringBuilder relatedDomainIds = new StringBuilder(); - String sql = SELECT_TEMPLATE_ZONE_REF; - String groupByClause = ""; - try { - // short accountType; - // String accountId = null; - String guestOSJoin = ""; - StringBuilder templateHostRefJoin = new StringBuilder(); - String dataCenterJoin = "", lpjoin = ""; - String tagsJoin = ""; - - if (isIso && !hyperType.equals(HypervisorType.None)) { - guestOSJoin = " INNER JOIN guest_os guestOS on (guestOS.id = t.guest_os_id) INNER JOIN guest_os_hypervisor goh on ( goh.guest_os_id = guestOS.id) "; - } - if (onlyReady) { - templateHostRefJoin.append(" INNER JOIN template_host_ref thr on (t.id = thr.template_id) INNER JOIN host h on (thr.host_id = h.id)"); - sql = SELECT_TEMPLATE_HOST_REF; - groupByClause = " GROUP BY t.id, h.data_center_id "; - } - if ((templateFilter == TemplateFilter.featured) || (templateFilter == TemplateFilter.community)) { - dataCenterJoin = " INNER JOIN data_center dc on (h.data_center_id = dc.id)"; - } - - if (templateFilter == TemplateFilter.sharedexecutable) { - lpjoin = " INNER JOIN launch_permission lp ON t.id = lp.template_id "; - } - - if (tags != null && !tags.isEmpty()) { - tagsJoin = " INNER JOIN resource_tags r ON t.id = r.resource_id "; - } - - sql += guestOSJoin + templateHostRefJoin + dataCenterJoin + lpjoin + tagsJoin; - String whereClause = ""; - - // All joins have to be made before we start setting the condition - // settings - if ((listProjectResourcesCriteria == ListProjectResourcesCriteria.SkipProjectResources || (!permittedAccounts.isEmpty() && !(templateFilter == TemplateFilter.community || templateFilter == TemplateFilter.featured))) - && !(caller.getType() != Account.ACCOUNT_TYPE_NORMAL && templateFilter == TemplateFilter.all)) { - whereClause += " INNER JOIN account a on (t.account_id = a.id)"; - if ((templateFilter == TemplateFilter.self || templateFilter == TemplateFilter.selfexecutable) - && (caller.getType() == Account.ACCOUNT_TYPE_DOMAIN_ADMIN || caller.getType() == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN)) { - whereClause += " INNER JOIN domain d on (a.domain_id = d.id) WHERE d.path LIKE '" + domain.getPath() + "%'"; - if (listProjectResourcesCriteria == ListProjectResourcesCriteria.SkipProjectResources) { - whereClause += " AND a.type != " + Account.ACCOUNT_TYPE_PROJECT; - } - } else if (listProjectResourcesCriteria == ListProjectResourcesCriteria.SkipProjectResources) { - whereClause += " WHERE a.type != " + Account.ACCOUNT_TYPE_PROJECT; - } - } - - if (!permittedAccounts.isEmpty()) { - for (Account account : permittedAccounts) { - // accountType = account.getType(); - // accountId = Long.toString(account.getId()); - DomainVO accountDomain = domainDao.findById(account.getDomainId()); - - // get all parent domain ID's all the way till root domain - DomainVO domainTreeNode = accountDomain; - while (true) { - relatedDomainIds.append(domainTreeNode.getId()); - relatedDomainIds.append(","); - if (domainTreeNode.getParent() != null) { - domainTreeNode = domainDao.findById(domainTreeNode.getParent()); - } else { - break; - } - } - - // get all child domain ID's - if (isAdmin(account.getType())) { - List allChildDomains = domainDao.findAllChildren(accountDomain.getPath(), accountDomain.getId()); - for (DomainVO childDomain : allChildDomains) { - relatedDomainIds.append(childDomain.getId()); - relatedDomainIds.append(","); - } - } - relatedDomainIds.setLength(relatedDomainIds.length() - 1); - } - } - - String attr = " AND "; - if (whereClause.endsWith(" WHERE ")) { - attr += " WHERE "; - } - - if (!isIso) { - if (hypers.isEmpty()) { - return templateZonePairList; - } else { - StringBuilder relatedHypers = new StringBuilder(); - for (HypervisorType hyper : hypers) { - relatedHypers.append("'"); - relatedHypers.append(hyper.toString()); - relatedHypers.append("'"); - relatedHypers.append(","); - } - relatedHypers.setLength(relatedHypers.length() - 1); - whereClause += attr + " t.hypervisor_type IN (" + relatedHypers + ")"; - } - } - - if (!permittedAccounts.isEmpty() && !(templateFilter == TemplateFilter.featured || templateFilter == TemplateFilter.community || templateFilter == TemplateFilter.executable) - && !isAdmin(caller.getType())) { - whereClause += attr + "t.account_id IN (" + permittedAccountsStr + ")"; - } - - if (templateFilter == TemplateFilter.featured) { - whereClause += attr + "t.public = 1 AND t.featured = 1"; - if (!permittedAccounts.isEmpty()) { - whereClause += attr + "(dc.domain_id IN (" + relatedDomainIds + ") OR dc.domain_id is NULL)"; - } - } else if (templateFilter == TemplateFilter.self || templateFilter == TemplateFilter.selfexecutable) { - whereClause += " AND t.account_id IN (" + permittedAccountsStr + ")"; - } else if (templateFilter == TemplateFilter.sharedexecutable) { - whereClause += " AND " + " (t.account_id IN (" + permittedAccountsStr + ") OR" + " lp.account_id IN (" + permittedAccountsStr + "))"; - } else if (templateFilter == TemplateFilter.executable && !permittedAccounts.isEmpty()) { - whereClause += attr + "(t.public = 1 OR t.account_id IN (" + permittedAccountsStr + "))"; - } else if (templateFilter == TemplateFilter.community) { - whereClause += attr + "t.public = 1 AND t.featured = 0"; - if (!permittedAccounts.isEmpty()) { - whereClause += attr + "(dc.domain_id IN (" + relatedDomainIds + ") OR dc.domain_id is NULL)"; - } - } else if (caller.getType() != Account.ACCOUNT_TYPE_ADMIN && !isIso) { - return templateZonePairList; - } - - if (tags != null && !tags.isEmpty()) { - whereClause += " AND ("; - boolean first = true; - for (String key : tags.keySet()) { - if (!first) { - whereClause += " OR "; - } - whereClause += "(r.key=\"" + key + "\" and r.value=\"" + tags.get(key) + "\")"; - first = false; - } - whereClause += ")"; - } - - if (whereClause.equals("")) { - whereClause += " WHERE "; - } else if (!whereClause.equals(" WHERE ")) { - whereClause += " AND "; - } - - sql += whereClause + getExtrasWhere(templateFilter, name, keyword, isIso, bootable, hyperType, zoneId, onlyReady, showDomr) + groupByClause + getOrderByLimit(pageSize, startIndex); - - pstmt = txn.prepareStatement(sql); - rs = pstmt.executeQuery(); - - while (rs.next()) { - Pair templateZonePair = new Pair(rs.getLong(1), rs.getLong(2)); - templateZonePairList.add(templateZonePair); - } - // for now, defaulting pageSize to a large val if null; may need to - // revisit post 2.2RC2 - if (isIso && - templateZonePairList.size() < (pageSize != null ? pageSize : 500) && - templateFilter != TemplateFilter.community && - !(templateFilter == TemplateFilter.self) /* TODO: Fix this! && !BaseCmd.isRootAdmin(caller.getType())*/) { // evaluates - // to - // true - // If - // root - // admin - // and - // filter=self - - List publicIsos = publicIsoSearch(bootable, false, tags); - List userIsos = userIsoSearch(false); - - // Listing the ISOs according to the page size.Restricting the - // total no. of ISOs on a page - // to be less than or equal to the pageSize parameter - - int i = 0; - - if (startIndex > userIsos.size()) { - i = (int) (startIndex - userIsos.size()); - } - - for (; i < publicIsos.size(); i++) { - if (templateZonePairList.size() >= pageSize) { - break; - } else { - if (keyword != null && publicIsos.get(i).getName().contains(keyword)) { - templateZonePairList.add(new Pair(publicIsos.get(i).getId(), null)); - continue; - } else if (name != null && publicIsos.get(i).getName().contains(name)) { - templateZonePairList.add(new Pair(publicIsos.get(i).getId(), null)); - continue; - } else if (keyword == null && name == null) { - templateZonePairList.add(new Pair(publicIsos.get(i).getId(), null)); - } - } - } - } - } catch (Exception e) { - s_logger.warn("Error listing templates", e); - } finally { - try { - if (rs != null) { - rs.close(); - } - if (pstmt != null) { - pstmt.close(); - } - txn.commit(); - } catch (SQLException sqle) { - s_logger.warn("Error in cleaning up", sqle); - } - } - - return templateZonePairList; - } - - private String getExtrasWhere(TemplateFilter templateFilter, String name, String keyword, boolean isIso, Boolean bootable, HypervisorType hyperType, Long zoneId, boolean onlyReady, - boolean showDomr) { - String sql = ""; - if (keyword != null) { - sql += " t.name LIKE \"%" + keyword + "%\" AND"; - } else if (name != null) { - sql += " t.name LIKE \"%" + name + "%\" AND"; - } - - if (isIso) { - sql += " t.format = 'ISO'"; - if (!hyperType.equals(HypervisorType.None)) { - sql += " AND goh.hypervisor_type = '" + hyperType.toString() + "'"; - } - } else { - sql += " t.format <> 'ISO'"; - if (!hyperType.equals(HypervisorType.None)) { - sql += " AND t.hypervisor_type = '" + hyperType.toString() + "'"; - } - } - - if (bootable != null) { - sql += " AND t.bootable = " + bootable; - } - - if (onlyReady) { - sql += " AND thr.download_state = '" + Status.DOWNLOADED.toString() + "'" + " AND thr.destroyed=0 "; - if (zoneId != null) { - sql += " AND h.data_center_id = " + zoneId; - } - } else if (zoneId != null) { - sql += " AND tzr.zone_id = " + zoneId + " AND tzr.removed is null"; - } else { - sql += " AND tzr.removed is null "; - } - if (!showDomr) { - sql += " AND t.type != '" + Storage.TemplateType.SYSTEM.toString() + "'"; - } - - sql += " AND t.removed IS NULL"; - - return sql; - } - - private String getOrderByLimit(Long pageSize, Long startIndex) { - Boolean isAscending = Boolean.parseBoolean(configDao.getValue("sortkey.algorithm")); - isAscending = (isAscending == null ? true : isAscending); - - String sql; - if (isAscending) { - sql = " ORDER BY t.sort_key ASC"; - } else { - sql = " ORDER BY t.sort_key DESC"; - } - - if ((pageSize != null) && (startIndex != null)) { - sql += " LIMIT " + startIndex.toString() + "," + pageSize.toString(); - } - return sql; - } - - @Override - @DB - public long addTemplateToZone(ImageDataVO tmplt, long zoneId) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - ImageDataVO tmplt2 = findById(tmplt.getId()); - if (tmplt2 == null) { - if (persist(tmplt) == null) { - throw new CloudRuntimeException("Failed to persist the template " + tmplt); - } - if (tmplt.getDetails() != null) { - templateDetailsDao.persist(tmplt.getId(), tmplt.getDetails()); - } - } - VMTemplateZoneVO tmpltZoneVO = templateZoneDao.findByZoneTemplate(zoneId, tmplt.getId()); - if (tmpltZoneVO == null) { - tmpltZoneVO = new VMTemplateZoneVO(zoneId, tmplt.getId(), new Date()); - templateZoneDao.persist(tmpltZoneVO); - } else { - tmpltZoneVO.setRemoved(null); - tmpltZoneVO.setLastUpdated(new Date()); - templateZoneDao.update(tmpltZoneVO.getId(), tmpltZoneVO); - } - txn.commit(); - - return tmplt.getId(); - } - - @Override - @DB - public List listAllInZone(long dataCenterId) { - SearchCriteria sc = TmpltsInZoneSearch.create(); - sc.setParameters("avoidtype", TemplateType.PERHOST.toString()); - sc.setJoinParameters("tmpltzone", "zoneId", dataCenterId); - return listBy(sc); - } - - @Override - public List listDefaultBuiltinTemplates() { - SearchCriteria sc = tmpltTypeSearch.create(); - sc.setParameters("templateType", Storage.TemplateType.BUILTIN); - return listBy(sc); - } - - @Override - public ImageDataVO findSystemVMTemplate(long zoneId) { - SearchCriteria sc = tmpltTypeHyperSearch.create(); - sc.setParameters("templateType", Storage.TemplateType.SYSTEM); - sc.setJoinParameters("tmplHyper", "type", Host.Type.Routing); - sc.setJoinParameters("tmplHyper", "zoneId", zoneId); - - // order by descending order of id and select the first (this is going - // to be the latest) - List tmplts = listBy(sc, new Filter(ImageDataVO.class, "id", false, null, 1l)); - - if (tmplts.size() > 0) { - return tmplts.get(0); - } else { - return null; - } - } - - @Override - public ImageDataVO findSystemVMTemplate(long zoneId, HypervisorType hType) { - SearchCriteria sc = tmpltTypeHyperSearch.create(); - sc.setParameters("templateType", Storage.TemplateType.SYSTEM); - sc.setJoinParameters("tmplHyper", "type", Host.Type.Routing); - sc.setJoinParameters("tmplHyper", "zoneId", zoneId); - - // order by descending order of id - List tmplts = listBy(sc, new Filter(ImageDataVO.class, "id", false, null, null)); - - for (ImageDataVO tmplt : tmplts) { - if (tmplt.getHypervisorType() == hType) { - return tmplt; - } - } - if (tmplts.size() > 0 && hType == HypervisorType.Any) { - return tmplts.get(0); - } - return null; - } - - @Override - public ImageDataVO findRoutingTemplate(HypervisorType hType) { - SearchCriteria sc = tmpltTypeHyperSearch2.create(); - sc.setParameters("templateType", Storage.TemplateType.SYSTEM); - sc.setParameters("hypervisorType", hType); - - // order by descending order of id and select the first (this is going - // to be the latest) - List tmplts = listBy(sc, new Filter(ImageDataVO.class, "id", false, null, 1l)); - - if (tmplts.size() > 0) { - return tmplts.get(0); - } else { - return null; - } - } - - @Override - public Long countTemplatesForAccount(long accountId) { - SearchCriteria sc = CountTemplatesByAccount.create(); - sc.setParameters("account", accountId); - return customSearch(sc, null).get(0); - } - - @Override - @DB - public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - ImageDataVO template = createForUpdate(); - template.setRemoved(new Date()); - - ImageDataVO vo = findById(id); - if (vo != null) { - if (vo.getFormat().equalsIgnoreCase(new ISO().toString())) { - _tagsDao.removeByIdAndType(id, TaggedResourceType.ISO); - } else { - _tagsDao.removeByIdAndType(id, TaggedResourceType.Template); - } - } - - boolean result = update(id, template); - txn.commit(); - return result; - } - - private boolean isAdmin(short accountType) { - return ((accountType == Account.ACCOUNT_TYPE_ADMIN) || (accountType == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) || (accountType == Account.ACCOUNT_TYPE_DOMAIN_ADMIN) || (accountType == Account.ACCOUNT_TYPE_READ_ONLY_ADMIN)); - } - - @Override - public boolean updateState(TemplateState currentState, TemplateEvent event, - TemplateState nextState, ImageDataVO vo, Object data) { - Long oldUpdated = vo.getUpdatedCount(); - Date oldUpdatedTime = vo.getUpdated(); - - - SearchCriteria sc = updateStateSearch.create(); - sc.setParameters("id", vo.getId()); - sc.setParameters("state", currentState); - sc.setParameters("updatedCount", vo.getUpdatedCount()); - - vo.incrUpdatedCount(); - - UpdateBuilder builder = getUpdateBuilder(vo); - builder.set(vo, "state", nextState); - builder.set(vo, "updated", new Date()); - - int rows = update((ImageDataVO) vo, sc); - if (rows == 0 && s_logger.isDebugEnabled()) { - ImageDataVO dbVol = findByIdIncludingRemoved(vo.getId()); - if (dbVol != null) { - StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); - str.append(": DB Data={id=").append(dbVol.getId()).append("; state=").append(dbVol.getState()).append("; updatecount=").append(dbVol.getUpdatedCount()).append(";updatedTime=") - .append(dbVol.getUpdated()); - str.append(": New Data={id=").append(vo.getId()).append("; state=").append(nextState).append("; event=").append(event).append("; updatecount=").append(vo.getUpdatedCount()) - .append("; updatedTime=").append(vo.getUpdated()); - str.append(": stale Data={id=").append(vo.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatecount=").append(oldUpdated) - .append("; updatedTime=").append(oldUpdatedTime); - } else { - s_logger.debug("Unable to update objectIndatastore: id=" + vo.getId() + ", as there is no such object exists in the database anymore"); - } - } - return rows > 0; - } -} \ No newline at end of file diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataVO.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataVO.java deleted file mode 100644 index e3ddaed721a..00000000000 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageDataVO.java +++ /dev/null @@ -1,450 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.image.db; - -import java.util.Date; -import java.util.Map; -import java.util.UUID; - -import javax.persistence.Column; -import javax.persistence.Entity; -import javax.persistence.EnumType; -import javax.persistence.Enumerated; -import javax.persistence.Id; -import javax.persistence.Table; -import javax.persistence.TableGenerator; -import javax.persistence.Temporal; -import javax.persistence.TemporalType; -import javax.persistence.Transient; - -import org.apache.cloudstack.api.Identity; -import org.apache.cloudstack.storage.image.TemplateState; - -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.Storage; -import com.cloud.storage.Storage.TemplateType; -import com.cloud.storage.VMTemplateVO; -import com.cloud.utils.db.GenericDao; -import com.cloud.utils.fsm.StateObject; - -@Entity -@Table(name = "vm_template") -public class ImageDataVO implements Identity, StateObject { - @Id - @TableGenerator(name = "vm_template_sq", table = "sequence", pkColumnName = "name", valueColumnName = "value", pkColumnValue = "vm_template_seq", allocationSize = 1) - @Column(name = "id", nullable = false) - private long id; - - @Column(name = "format") - private String format; - - @Column(name = "unique_name") - private String uniqueName; - - @Column(name = "name") - private String name = null; - - @Column(name = "public") - private boolean publicTemplate = true; - - @Column(name = "featured") - private boolean featured; - - @Column(name = "type") - private Storage.TemplateType templateType; - - @Column(name = "url") - private String url = null; - - @Column(name = "hvm") - private boolean requiresHvm; - - @Column(name = "bits") - private int bits; - - @Temporal(value = TemporalType.TIMESTAMP) - @Column(name = GenericDao.CREATED_COLUMN) - private Date created = null; - - @Column(name = GenericDao.REMOVED) - @Temporal(TemporalType.TIMESTAMP) - private Date removed; - - @Column(name = "account_id") - private long accountId; - - @Column(name = "checksum") - private String checksum; - - @Column(name = "display_text", length = 4096) - private String displayText; - - @Column(name = "enable_password") - private boolean enablePassword; - - @Column(name = "guest_os_id") - private long guestOSId; - - @Column(name = "bootable") - private boolean bootable = true; - - @Column(name = "prepopulate") - private boolean prepopulate = false; - - @Column(name = "cross_zones") - private boolean crossZones = false; - - @Column(name = "hypervisor_type") - @Enumerated(value = EnumType.STRING) - private HypervisorType hypervisorType; - - @Column(name = "extractable") - private boolean extractable = true; - - @Column(name = "source_template_id") - private Long sourceTemplateId; - - @Column(name = "template_tag") - private String templateTag; - - @Column(name = "uuid") - private String uuid; - - @Column(name = "sort_key") - private int sortKey; - - @Column(name = "enable_sshkey") - private boolean enableSshKey; - - @Column(name = "image_data_store_id") - private long imageDataStoreId; - - @Column(name = "size") - private Long size; - - @Column(name = "state") - private TemplateState state; - - @Column(name="update_count", updatable = true) - protected long updatedCount; - - @Column(name = "updated") - @Temporal(value = TemporalType.TIMESTAMP) - Date updated; - - @Transient - Map details; - - public String getUniqueName() { - return uniqueName; - } - - public void setUniqueName(String uniqueName) { - this.uniqueName = uniqueName; - } - - public ImageDataVO() { - this.uuid = UUID.randomUUID().toString(); - this.state = TemplateState.Allocated; - this.created = new Date(); - } - - public boolean getEnablePassword() { - return enablePassword; - } - - public String getFormat() { - return format; - } - - public void setEnablePassword(boolean enablePassword) { - this.enablePassword = enablePassword; - } - - public void setFormat(String format) { - this.format = format; - } - - public long getId() { - return id; - } - - public TemplateType getTemplateType() { - return templateType; - } - - public void setTemplateType(TemplateType type) { - this.templateType = type; - } - - public boolean requiresHvm() { - return requiresHvm; - } - - public void setRequireHvm(boolean hvm) { - this.requiresHvm = hvm; - } - - public int getBits() { - return bits; - } - - public void setBits(int bits) { - this.bits = bits; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public Date getRemoved() { - return removed; - } - - public boolean isPublicTemplate() { - return publicTemplate; - } - - public void setPublicTemplate(boolean publicTemplate) { - this.publicTemplate = publicTemplate; - } - - public boolean isFeatured() { - return featured; - } - - public void setFeatured(boolean featured) { - this.featured = featured; - } - - public Date getCreated() { - return created; - } - - public String getUrl() { - return url; - } - - public void setUrl(String url) { - this.url = url; - } - - public long getAccountId() { - return accountId; - } - - public void setAccountId(long accountId) { - this.accountId = accountId; - } - - public String getChecksum() { - return checksum; - } - - public void setChecksum(String checksum) { - this.checksum = checksum; - } - - public String getDisplayText() { - return displayText; - } - - public void setDisplayText(String displayText) { - this.displayText = displayText; - } - - public long getGuestOSId() { - return guestOSId; - } - - public void setGuestOSId(long guestOSId) { - this.guestOSId = guestOSId; - } - - public boolean isBootable() { - return bootable; - } - - public void setBootable(boolean bootable) { - this.bootable = bootable; - } - - public void setPrepopulate(boolean prepopulate) { - this.prepopulate = prepopulate; - } - - public boolean isPrepopulate() { - return prepopulate; - } - - public void setCrossZones(boolean crossZones) { - this.crossZones = crossZones; - } - - public boolean isCrossZones() { - return crossZones; - } - - public HypervisorType getHypervisorType() { - return hypervisorType; - } - - public void setHypervisorType(HypervisorType hyperType) { - hypervisorType = hyperType; - } - - public boolean isExtractable() { - return extractable; - } - - public void setExtractable(boolean extractable) { - this.extractable = extractable; - } - - public Long getSourceTemplateId() { - return sourceTemplateId; - } - - public void setSourceTemplateId(Long sourceTemplateId) { - this.sourceTemplateId = sourceTemplateId; - } - - public String getTemplateTag() { - return templateTag; - } - - public void setTemplateTag(String templateTag) { - this.templateTag = templateTag; - } - - public long getDomainId() { - return -1; - } - - @Override - public String getUuid() { - return this.uuid; - } - - public void setUuid(String uuid) { - this.uuid = uuid; - } - - public Map getDetails() { - return this.details; - } - - public void setDetails(Map details) { - this.details = details; - } - - @Override - public boolean equals(Object that) { - if (this == that) { - return true; - } - if (!(that instanceof VMTemplateVO)) { - return false; - } - VMTemplateVO other = (VMTemplateVO) that; - - return ((this.getUniqueName().equals(other.getUniqueName()))); - } - - @Override - public int hashCode() { - return uniqueName.hashCode(); - } - - @Transient - String toString; - - @Override - public String toString() { - if (toString == null) { - toString = new StringBuilder("Tmpl[").append(id).append("-").append(format).append("-").append(uniqueName).toString(); - } - return toString; - } - - public void setRemoved(Date removed) { - this.removed = removed; - } - - public void setSortKey(int key) { - sortKey = key; - } - - public int getSortKey() { - return sortKey; - } - - public boolean getEnableSshKey() { - return enableSshKey; - } - - public void setEnableSshKey(boolean enable) { - enableSshKey = enable; - } - - public Long getImageDataStoreId() { - return this.imageDataStoreId; - } - - public void setImageDataStoreId(long dataStoreId) { - this.imageDataStoreId = dataStoreId; - } - - public void setSize(Long size) { - this.size = size; - } - - public Long getSize() { - return this.size; - } - - public TemplateState getState() { - return this.state; - } - - public long getUpdatedCount() { - return this.updatedCount; - } - - public void incrUpdatedCount() { - this.updatedCount++; - } - - public void decrUpdatedCount() { - this.updatedCount--; - } - - public Date getUpdated() { - return updated; - } - - public void setUpdated(Date updated) { - this.updated = updated; - } - -} diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/motion/ImageMotionService.java b/engine/storage/src/org/apache/cloudstack/storage/image/motion/ImageMotionService.java index 422bc066211..908d6d52c20 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/motion/ImageMotionService.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/motion/ImageMotionService.java @@ -19,9 +19,9 @@ package org.apache.cloudstack.storage.image.motion; import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.image.TemplateInfo; import org.apache.cloudstack.storage.volume.TemplateOnPrimaryDataStoreInfo; public interface ImageMotionService { diff --git a/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java new file mode 100644 index 00000000000..cfd9f400839 --- /dev/null +++ b/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -0,0 +1,725 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.motion; + +import java.util.Date; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.BackupSnapshotAnswer; +import com.cloud.agent.api.BackupSnapshotCommand; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.CreatePrivateTemplateFromSnapshotCommand; +import com.cloud.agent.api.CreatePrivateTemplateFromVolumeCommand; +import com.cloud.agent.api.CreateVolumeFromSnapshotAnswer; +import com.cloud.agent.api.CreateVolumeFromSnapshotCommand; +import com.cloud.agent.api.UpgradeSnapshotCommand; +import com.cloud.agent.api.storage.CopyVolumeAnswer; +import com.cloud.agent.api.storage.CopyVolumeCommand; +import com.cloud.agent.api.storage.CreateAnswer; +import com.cloud.agent.api.storage.CreateCommand; +import com.cloud.agent.api.storage.CreatePrivateTemplateAnswer; +import com.cloud.agent.api.to.S3TO; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.agent.api.to.SwiftTO; +import com.cloud.configuration.Config; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VMTemplateHostVO; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VolumeHostVO; +import com.cloud.storage.VolumeManager; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateHostDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeHostDao; +import com.cloud.storage.s3.S3Manager; +import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.storage.swift.SwiftManager; +import com.cloud.template.TemplateManager; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.DiskProfile; + +@Component +public class AncientDataMotionStrategy implements DataMotionStrategy { + private static final Logger s_logger = Logger + .getLogger(AncientDataMotionStrategy.class); + @Inject + TemplateManager templateMgr; + @Inject + VolumeHostDao volumeHostDao; + @Inject + HostDao hostDao; + @Inject + ConfigurationDao configDao; + @Inject + StorageManager storageMgr; + @Inject + VolumeDao volDao; + @Inject + VMTemplateDao templateDao; + @Inject + SnapshotManager snapshotMgr; + @Inject + SnapshotDao snapshotDao; + @Inject + PrimaryDataStoreDao primaryDataStoreDao; + @Inject + DataStoreManager dataStoreMgr; + @Inject + VMTemplateHostDao templateHostDao; + @Inject DiskOfferingDao diskOfferingDao; + @Inject VMTemplatePoolDao templatePoolDao; + @Inject + VolumeManager volumeMgr; + @Inject + private SwiftManager _swiftMgr; + @Inject + private S3Manager _s3Mgr; + + @Override + public boolean canHandle(DataObject srcData, DataObject destData) { + // TODO Auto-generated method stub + return true; + } + + @DB + protected Answer copyVolumeFromImage(DataObject srcData, DataObject destData) { + String value = configDao.getValue(Config.RecreateSystemVmEnabled.key()); + int _copyvolumewait = NumbersUtil.parseInt(value, + Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); + + VolumeHostVO volumeHostVO = volumeHostDao.findByVolumeId(srcData + .getId()); + HostVO secStorage = hostDao.findById(volumeHostVO.getHostId()); + String secondaryStorageURL = secStorage.getStorageUrl(); + String[] volumePath = volumeHostVO.getInstallPath().split("/"); + String volumeUUID = volumePath[volumePath.length - 1].split("\\.")[0]; + StoragePool destPool = (StoragePool) destData.getDataStore(); + CopyVolumeCommand cvCmd = new CopyVolumeCommand(srcData.getId(), + volumeUUID, destPool, secondaryStorageURL, false, + _copyvolumewait); + CopyVolumeAnswer cvAnswer = null; + String errMsg = null; + try { + cvAnswer = (CopyVolumeAnswer) this.storageMgr.sendToPool(destPool, + cvCmd); + } catch (StorageUnavailableException e1) { + s_logger.debug("Failed to copy volume " + srcData.getId() + " to " + + destData.getId(), e1); + errMsg = e1.toString(); + } + + if (cvAnswer == null || !cvAnswer.getResult()) { + errMsg = cvAnswer.getDetails(); + } + + VolumeVO vol = this.volDao.findById(destData.getId()); + Transaction txn = Transaction.currentTxn(); + txn.start(); + vol.setPath(cvAnswer.getVolumePath()); + vol.setFolder(destPool.getPath()); + vol.setPodId(destPool.getPodId()); + vol.setPoolId(destPool.getId()); + vol.setPodId(destPool.getPodId()); + + this.volDao.update(vol.getId(), vol); + volumeHostDao.remove(volumeHostVO.getId()); + txn.commit(); + return cvAnswer; + } + + private Answer copyTemplate(DataObject srcData, DataObject destData) { + VMTemplateVO template = this.templateDao.findById(srcData.getId()); + templateMgr.prepareTemplateForCreate(template, + (StoragePool) destData.getDataStore()); + return null; + } + + protected Answer copyFromSnapshot(DataObject snapObj, DataObject volObj) { + SnapshotVO snapshot = this.snapshotDao.findById(snapObj.getId()); + StoragePool pool = (StoragePool) volObj.getDataStore(); + String vdiUUID = null; + Long snapshotId = snapshot.getId(); + Long volumeId = snapshot.getVolumeId(); + Long dcId = snapshot.getDataCenterId(); + String secondaryStoragePoolUrl = this.snapshotMgr + .getSecondaryStorageURL(snapshot); + long accountId = snapshot.getAccountId(); + + String backedUpSnapshotUuid = snapshot.getBackupSnapshotId(); + snapshot = snapshotDao.findById(snapshotId); + if (snapshot.getVersion().trim().equals("2.1")) { + VolumeVO volume = this.volDao.findByIdIncludingRemoved(volumeId); + if (volume == null) { + throw new CloudRuntimeException("failed to upgrade snapshot " + + snapshotId + " due to unable to find orignal volume:" + + volumeId + ", try it later "); + } + if (volume.getTemplateId() == null) { + snapshotDao.updateSnapshotVersion(volumeId, "2.1", "2.2"); + } else { + VMTemplateVO template = templateDao + .findByIdIncludingRemoved(volume.getTemplateId()); + if (template == null) { + throw new CloudRuntimeException( + "failed to upgrade snapshot " + + snapshotId + + " due to unalbe to find orignal template :" + + volume.getTemplateId() + + ", try it later "); + } + Long templateId = template.getId(); + Long tmpltAccountId = template.getAccountId(); + if (!snapshotDao.lockInLockTable(snapshotId.toString(), 10)) { + throw new CloudRuntimeException( + "failed to upgrade snapshot " + + snapshotId + + " due to this snapshot is being used, try it later "); + } + UpgradeSnapshotCommand cmd = new UpgradeSnapshotCommand(null, + secondaryStoragePoolUrl, dcId, accountId, volumeId, + templateId, tmpltAccountId, null, + snapshot.getBackupSnapshotId(), snapshot.getName(), + "2.1"); + Answer answer = null; + try { + answer = this.storageMgr.sendToPool(pool, cmd); + } catch (StorageUnavailableException e) { + } finally { + snapshotDao.unlockFromLockTable(snapshotId.toString()); + } + if ((answer != null) && answer.getResult()) { + snapshotDao.updateSnapshotVersion(volumeId, "2.1", "2.2"); + } else { + throw new CloudRuntimeException("Unable to upgrade snapshot from 2.1 to 2.2 for " + + snapshot.getId()); + } + } + } + String basicErrMsg = "Failed to create volume from " + + snapshot.getName() + " on pool " + pool; + + try { + if (snapshot.getSwiftId() != null && snapshot.getSwiftId() != 0) { + snapshotMgr.downloadSnapshotsFromSwift(snapshot); + } else if (snapshot.getS3Id() != null && snapshot.getS3Id() != 0) { + snapshotMgr.downloadSnapshotsFromS3(snapshot); + } + String value = configDao + .getValue(Config.CreateVolumeFromSnapshotWait.toString()); + int _createVolumeFromSnapshotWait = NumbersUtil.parseInt(value, + Integer.parseInt(Config.CreateVolumeFromSnapshotWait + .getDefaultValue())); + CreateVolumeFromSnapshotCommand createVolumeFromSnapshotCommand = new CreateVolumeFromSnapshotCommand( + pool, secondaryStoragePoolUrl, dcId, accountId, volumeId, + backedUpSnapshotUuid, snapshot.getName(), + _createVolumeFromSnapshotWait); + CreateVolumeFromSnapshotAnswer answer; + if (!snapshotDao.lockInLockTable(snapshotId.toString(), 10)) { + throw new CloudRuntimeException("failed to create volume from " + + snapshotId + + " due to this snapshot is being used, try it later "); + } + answer = (CreateVolumeFromSnapshotAnswer) this.storageMgr + .sendToPool(pool, createVolumeFromSnapshotCommand); + if (answer != null && answer.getResult()) { + vdiUUID = answer.getVdi(); + VolumeVO vol = this.volDao.findById(volObj.getId()); + vol.setPath(vdiUUID); + this.volDao.update(vol.getId(), vol); + return null; + } else { + s_logger.error(basicErrMsg + " due to " + + ((answer == null) ? "null" : answer.getDetails())); + throw new CloudRuntimeException(basicErrMsg); + } + } catch (StorageUnavailableException e) { + s_logger.error(basicErrMsg, e); + throw new CloudRuntimeException(basicErrMsg); + } finally { + if (snapshot.getSwiftId() != null) { + snapshotMgr.deleteSnapshotsDirForVolume( + secondaryStoragePoolUrl, dcId, accountId, volumeId); + } + } + } + + protected Answer cloneVolume(DataObject template, DataObject volume) { + VolumeInfo volInfo = (VolumeInfo)volume; + DiskOfferingVO offering = diskOfferingDao.findById(volInfo.getDiskOfferingId()); + VMTemplateStoragePoolVO tmpltStoredOn = templatePoolDao.findByPoolTemplate(template.getDataStore().getId(), template.getId()); + + DiskProfile diskProfile = new DiskProfile(volInfo, offering, + null); + CreateCommand cmd = new CreateCommand(diskProfile, + tmpltStoredOn.getLocalDownloadPath(), + new StorageFilerTO((StoragePool)template.getDataStore())); + Answer answer = null; + StoragePool pool = (StoragePool)volume.getDataStore(); + String errMsg = null; + try { + answer = storageMgr.sendToPool(pool, null, cmd); + } catch (StorageUnavailableException e) { + s_logger.debug("Failed to send to storage pool", e); + throw new CloudRuntimeException("Failed to send to storage pool", e); + } + + if (answer.getResult()) { + VolumeVO vol = this.volDao.findById(volume.getId()); + CreateAnswer createAnswer = (CreateAnswer) answer; + vol.setFolder(pool.getPath()); + vol.setPath(createAnswer.getVolume().getPath()); + vol.setSize(createAnswer.getVolume().getSize()); + vol.setPoolType(pool.getPoolType()); + vol.setPoolId(pool.getId()); + vol.setPodId(pool.getPodId()); + this.volDao.update(vol.getId(), vol); + + } else { + if (tmpltStoredOn != null + && (answer instanceof CreateAnswer) + && ((CreateAnswer) answer) + .templateReloadRequested()) { + if (!templateMgr + .resetTemplateDownloadStateOnPool(tmpltStoredOn + .getId())) { + + } + } + errMsg = answer.getDetails(); + } + + return answer; + } + + protected Answer copyVolumeBetweenPools(DataObject srcData, DataObject destData) { + VolumeInfo volume = (VolumeInfo)srcData; + VolumeInfo destVolume = (VolumeInfo)destData; + String secondaryStorageURL = this.templateMgr.getSecondaryStorageURL(volume + .getDataCenterId()); + StoragePool srcPool = (StoragePool)this.dataStoreMgr.getDataStore(volume + .getPoolId(), DataStoreRole.Primary); + + StoragePool destPool = (StoragePool)this.dataStoreMgr.getDataStore(destVolume.getPoolId(), DataStoreRole.Primary); + + String value = this.configDao.getValue(Config.CopyVolumeWait.toString()); + int _copyvolumewait = NumbersUtil.parseInt(value, + Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); + CopyVolumeCommand cvCmd = new CopyVolumeCommand(volume.getId(), + volume.getPath(), srcPool, secondaryStorageURL, true, + _copyvolumewait); + CopyVolumeAnswer cvAnswer; + try { + cvAnswer = (CopyVolumeAnswer) this.storageMgr.sendToPool(srcPool, cvCmd); + } catch (StorageUnavailableException e1) { + throw new CloudRuntimeException( + "Failed to copy the volume from the source primary storage pool to secondary storage.", + e1); + } + + if (cvAnswer == null || !cvAnswer.getResult()) { + throw new CloudRuntimeException( + "Failed to copy the volume from the source primary storage pool to secondary storage."); + } + + String secondaryStorageVolumePath = cvAnswer.getVolumePath(); + + cvCmd = new CopyVolumeCommand(volume.getId(), + secondaryStorageVolumePath, destPool, + secondaryStorageURL, false, _copyvolumewait); + try { + cvAnswer = (CopyVolumeAnswer) this.storageMgr.sendToPool(destPool, cvCmd); + } catch (StorageUnavailableException e1) { + throw new CloudRuntimeException( + "Failed to copy the volume from secondary storage to the destination primary storage pool."); + } + + if (cvAnswer == null || !cvAnswer.getResult()) { + throw new CloudRuntimeException( + "Failed to copy the volume from secondary storage to the destination primary storage pool."); + } + + VolumeVO destVol = this.volDao.findById(destVolume.getId()); + destVol.setPath(cvAnswer.getVolumePath()); + this.volDao.update(destVol.getId(), destVol); + return cvAnswer; + } + + @Override + public Void copyAsync(DataObject srcData, DataObject destData, + AsyncCompletionCallback callback) { + Answer answer = null; + String errMsg = null; + try { + if (destData.getType() == DataObjectType.VOLUME + && srcData.getType() == DataObjectType.VOLUME && srcData.getDataStore().getRole() == DataStoreRole.Image) { + answer = copyVolumeFromImage(srcData, destData); + } else if (destData.getType() == DataObjectType.TEMPLATE + && srcData.getType() == DataObjectType.TEMPLATE) { + answer = copyTemplate(srcData, destData); + } else if (srcData.getType() == DataObjectType.SNAPSHOT + && destData.getType() == DataObjectType.VOLUME) { + answer = copyFromSnapshot(srcData, destData); + } else if (srcData.getType() == DataObjectType.SNAPSHOT + && destData.getType() == DataObjectType.TEMPLATE) { + answer = createTemplateFromSnashot(srcData, destData); + } else if (srcData.getType() == DataObjectType.VOLUME + && destData.getType() == DataObjectType.TEMPLATE) { + answer = createTemplateFromVolume(srcData, destData); + } else if (srcData.getType() == DataObjectType.TEMPLATE + && destData.getType() == DataObjectType.VOLUME) { + answer = cloneVolume(srcData, destData); + } else if (destData.getType() == DataObjectType.VOLUME + && srcData.getType() == DataObjectType.VOLUME && srcData.getDataStore().getRole() == DataStoreRole.Primary) { + answer = copyVolumeBetweenPools(srcData, destData); + } else if (srcData.getType() == DataObjectType.SNAPSHOT && + destData.getType() == DataObjectType.SNAPSHOT) { + answer = copySnapshot(srcData, destData); + } + } catch (Exception e) { + s_logger.debug("copy failed", e); + errMsg = e.toString(); + } + CopyCommandResult result = new CopyCommandResult(null, answer); + result.setResult(errMsg); + callback.complete(result); + + return null; + } + + @DB + protected Answer createTemplateFromSnashot(DataObject srcData, + DataObject destData) { + long snapshotId = srcData.getId(); + SnapshotVO snapshot = snapshotDao.findById(snapshotId); + if (snapshot == null) { + throw new CloudRuntimeException("Unable to find Snapshot for Id " + + srcData.getId()); + } + Long zoneId = snapshot.getDataCenterId(); + HostVO secondaryStorageHost = this.templateMgr + .getSecondaryStorageHost(zoneId); + String secondaryStorageURL = snapshotMgr + .getSecondaryStorageURL(snapshot); + VMTemplateVO template = this.templateDao.findById(destData.getId()); + String name = template.getName(); + String backupSnapshotUUID = snapshot.getBackupSnapshotId(); + if (backupSnapshotUUID == null) { + throw new CloudRuntimeException( + "Unable to create private template from snapshot " + + snapshotId + + " due to there is no backupSnapshotUUID for this snapshot"); + } + + Long dcId = snapshot.getDataCenterId(); + Long accountId = snapshot.getAccountId(); + Long volumeId = snapshot.getVolumeId(); + + String origTemplateInstallPath = null; + List pools = this.storageMgr + .ListByDataCenterHypervisor(zoneId, + snapshot.getHypervisorType()); + if (pools == null || pools.size() == 0) { + throw new CloudRuntimeException( + "Unable to find storage pools in zone " + zoneId); + } + StoragePoolVO poolvo = pools.get(0); + StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore( + poolvo.getId(), DataStoreRole.Primary); + if (snapshot.getVersion() != null + && snapshot.getVersion().equalsIgnoreCase("2.1")) { + VolumeVO volume = this.volDao.findByIdIncludingRemoved(volumeId); + if (volume == null) { + throw new CloudRuntimeException("failed to upgrade snapshot " + + snapshotId + " due to unable to find orignal volume:" + + volumeId + ", try it later "); + } + if (volume.getTemplateId() == null) { + snapshotDao.updateSnapshotVersion(volumeId, "2.1", "2.2"); + } else { + template = templateDao.findByIdIncludingRemoved(volume + .getTemplateId()); + if (template == null) { + throw new CloudRuntimeException( + "failed to upgrade snapshot " + + snapshotId + + " due to unalbe to find orignal template :" + + volume.getTemplateId() + + ", try it later "); + } + Long origTemplateId = template.getId(); + Long origTmpltAccountId = template.getAccountId(); + if (!this.volDao.lockInLockTable(volumeId.toString(), 10)) { + throw new CloudRuntimeException( + "failed to upgrade snapshot " + snapshotId + + " due to volume:" + volumeId + + " is being used, try it later "); + } + UpgradeSnapshotCommand cmd = new UpgradeSnapshotCommand(null, + secondaryStorageURL, dcId, accountId, volumeId, + origTemplateId, origTmpltAccountId, null, + snapshot.getBackupSnapshotId(), snapshot.getName(), + "2.1"); + if (!this.volDao.lockInLockTable(volumeId.toString(), 10)) { + throw new CloudRuntimeException( + "Creating template failed due to volume:" + + volumeId + + " is being used, try it later "); + } + Answer answer = null; + try { + answer = this.storageMgr.sendToPool(pool, cmd); + cmd = null; + } catch (StorageUnavailableException e) { + } finally { + this.volDao.unlockFromLockTable(volumeId.toString()); + } + if ((answer != null) && answer.getResult()) { + snapshotDao.updateSnapshotVersion(volumeId, "2.1", "2.2"); + } else { + throw new CloudRuntimeException( + "Unable to upgrade snapshot"); + } + } + } + if (snapshot.getSwiftId() != null && snapshot.getSwiftId() != 0) { + snapshotMgr.downloadSnapshotsFromSwift(snapshot); + } + String value = configDao + .getValue(Config.CreatePrivateTemplateFromSnapshotWait + .toString()); + int _createprivatetemplatefromsnapshotwait = NumbersUtil.parseInt( + value, Integer + .parseInt(Config.CreatePrivateTemplateFromSnapshotWait + .getDefaultValue())); + + CreatePrivateTemplateFromSnapshotCommand cmd = new CreatePrivateTemplateFromSnapshotCommand( + pool, secondaryStorageURL, dcId, accountId, + snapshot.getVolumeId(), backupSnapshotUUID, snapshot.getName(), + origTemplateInstallPath, template.getId(), name, + _createprivatetemplatefromsnapshotwait); + + return sendCommand(cmd, pool, template.getId(), dcId, + secondaryStorageHost.getId()); + } + + @DB + protected Answer sendCommand(Command cmd, StoragePool pool, + long templateId, long zoneId, long hostId) { + + CreatePrivateTemplateAnswer answer = null; + try { + answer = (CreatePrivateTemplateAnswer) this.storageMgr.sendToPool( + pool, cmd); + } catch (StorageUnavailableException e) { + throw new CloudRuntimeException( + "Failed to execute CreatePrivateTemplateFromSnapshotCommand", + e); + } + + if (answer == null || !answer.getResult()) { + return answer; + } + + VMTemplateVO privateTemplate = templateDao.findById(templateId); + String answerUniqueName = answer.getUniqueName(); + if (answerUniqueName != null) { + privateTemplate.setUniqueName(answerUniqueName); + } + ImageFormat format = answer.getImageFormat(); + if (format != null) { + privateTemplate.setFormat(format); + } else { + // This never occurs. + // Specify RAW format makes it unusable for snapshots. + privateTemplate.setFormat(ImageFormat.RAW); + } + + String checkSum = this.templateMgr + .getChecksum(hostId, answer.getPath()); + + Transaction txn = Transaction.currentTxn(); + + txn.start(); + + privateTemplate.setChecksum(checkSum); + templateDao.update(privateTemplate.getId(), privateTemplate); + + // add template zone ref for this template + templateDao.addTemplateToZone(privateTemplate, zoneId); + VMTemplateHostVO templateHostVO = new VMTemplateHostVO(hostId, + privateTemplate.getId()); + templateHostVO.setDownloadPercent(100); + templateHostVO.setDownloadState(Status.DOWNLOADED); + templateHostVO.setInstallPath(answer.getPath()); + templateHostVO.setLastUpdated(new Date()); + templateHostVO.setSize(answer.getVirtualSize()); + templateHostVO.setPhysicalSize(answer.getphysicalSize()); + templateHostDao.persist(templateHostVO); + txn.close(); + return answer; + } + + private Answer createTemplateFromVolume(DataObject srcObj, + DataObject destObj) { + long volumeId = srcObj.getId(); + VolumeVO volume = this.volDao.findById(volumeId); + if (volume == null) { + throw new CloudRuntimeException("Unable to find volume for Id " + + volumeId); + } + long accountId = volume.getAccountId(); + + String vmName = this.volumeMgr.getVmNameOnVolume(volume); + Long zoneId = volume.getDataCenterId(); + HostVO secondaryStorageHost = this.templateMgr + .getSecondaryStorageHost(zoneId); + if (secondaryStorageHost == null) { + throw new CloudRuntimeException( + "Can not find the secondary storage for zoneId " + zoneId); + } + String secondaryStorageURL = secondaryStorageHost.getStorageUrl(); + VMTemplateVO template = this.templateDao.findById(destObj.getId()); + StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore( + volume.getPoolId(), DataStoreRole.Primary); + String value = configDao + .getValue(Config.CreatePrivateTemplateFromVolumeWait.toString()); + int _createprivatetemplatefromvolumewait = NumbersUtil.parseInt(value, + Integer.parseInt(Config.CreatePrivateTemplateFromVolumeWait + .getDefaultValue())); + + CreatePrivateTemplateFromVolumeCommand cmd = new CreatePrivateTemplateFromVolumeCommand( + pool, secondaryStorageURL, destObj.getId(), accountId, + template.getName(), template.getUniqueName(), volume.getPath(), + vmName, _createprivatetemplatefromvolumewait); + + return sendCommand(cmd, pool, template.getId(), zoneId, + secondaryStorageHost.getId()); + } + + private HostVO getSecHost(long volumeId, long dcId) { + Long id = snapshotDao.getSecHostId(volumeId); + if ( id != null) { + return hostDao.findById(id); + } + return this.templateMgr.getSecondaryStorageHost(dcId); + } + + protected Answer copySnapshot(DataObject srcObject, DataObject destObject) { + SnapshotInfo srcSnapshot = (SnapshotInfo)srcObject; + VolumeInfo baseVolume = srcSnapshot.getBaseVolume(); + Long dcId = baseVolume.getDataCenterId(); + Long accountId = baseVolume.getAccountId(); + + HostVO secHost = getSecHost(baseVolume.getId(), baseVolume.getDataCenterId()); + + String secondaryStoragePoolUrl = secHost.getStorageUrl(); + String snapshotUuid = srcSnapshot.getPath(); + // In order to verify that the snapshot is not empty, + // we check if the parent of the snapshot is not the same as the parent of the previous snapshot. + // We pass the uuid of the previous snapshot to the plugin to verify this. + SnapshotVO prevSnapshot = null; + String prevSnapshotUuid = null; + String prevBackupUuid = null; + + + SwiftTO swift = _swiftMgr.getSwiftTO(); + S3TO s3 = _s3Mgr.getS3TO(); + + long prevSnapshotId = srcSnapshot.getPrevSnapshotId(); + if (prevSnapshotId > 0) { + prevSnapshot = snapshotDao.findByIdIncludingRemoved(prevSnapshotId); + if ( prevSnapshot.getBackupSnapshotId() != null && swift == null) { + if (prevSnapshot.getVersion() != null && prevSnapshot.getVersion().equals("2.2")) { + prevBackupUuid = prevSnapshot.getBackupSnapshotId(); + prevSnapshotUuid = prevSnapshot.getPath(); + } + } else if ((prevSnapshot.getSwiftId() != null && swift != null) + || (prevSnapshot.getS3Id() != null && s3 != null)) { + prevBackupUuid = prevSnapshot.getBackupSnapshotId(); + prevSnapshotUuid = prevSnapshot.getPath(); + } + } + boolean isVolumeInactive = this.volumeMgr.volumeInactive(baseVolume); + String vmName = this.volumeMgr.getVmNameOnVolume(baseVolume); + StoragePool srcPool = (StoragePool)dataStoreMgr.getPrimaryDataStore(baseVolume.getPoolId()); + String value = configDao.getValue(Config.BackupSnapshotWait.toString()); + int _backupsnapshotwait = NumbersUtil.parseInt(value, Integer.parseInt(Config.BackupSnapshotWait.getDefaultValue())); + BackupSnapshotCommand backupSnapshotCommand = new BackupSnapshotCommand(secondaryStoragePoolUrl, dcId, accountId, baseVolume.getId(), srcSnapshot.getId(), baseVolume.getPath(), srcPool, snapshotUuid, + srcSnapshot.getName(), prevSnapshotUuid, prevBackupUuid, isVolumeInactive, vmName, _backupsnapshotwait); + + if ( swift != null ) { + backupSnapshotCommand.setSwift(swift); + } else if (s3 != null) { + backupSnapshotCommand.setS3(s3); + } + BackupSnapshotAnswer answer = (BackupSnapshotAnswer) this.snapshotMgr.sendToPool(baseVolume, backupSnapshotCommand); + if (answer != null && answer.getResult()) { + SnapshotVO snapshotVO = this.snapshotDao.findById(srcSnapshot.getId()); + if (backupSnapshotCommand.getSwift() != null ) { + snapshotVO.setSwiftId(swift.getId()); + snapshotVO.setBackupSnapshotId(answer.getBackupSnapshotName()); + } else if (backupSnapshotCommand.getS3() != null) { + snapshotVO.setS3Id(s3.getId()); + snapshotVO.setBackupSnapshotId(answer.getBackupSnapshotName()); + } else { + snapshotVO.setSecHostId(secHost.getId()); + snapshotVO.setBackupSnapshotId(answer.getBackupSnapshotName()); + } + if (answer.isFull()) { + snapshotVO.setPrevSnapshotId(0L); + } + this.snapshotDao.update(srcSnapshot.getId(), snapshotVO); + } + return answer; + } + +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotEntityImpl.java b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotEntityImpl.java index 6a7d78a972a..0a91186aaab 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotEntityImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotEntityImpl.java @@ -105,13 +105,6 @@ public class SnapshotEntityImpl implements SnapshotEntity { return null; } - @Override - public Type getType() { - // TODO Auto-generated method stub - return null; - } - - @Override public HypervisorType getHypervisorType() { // TODO Auto-generated method stub @@ -190,4 +183,10 @@ public class SnapshotEntityImpl implements SnapshotEntity { return null; } + @Override + public Type getRecurringType() { + // TODO Auto-generated method stub + return null; + } + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotService.java b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotService.java index d50c9a0c8f3..f3e5c4aea50 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotService.java +++ b/engine/storage/src/org/apache/cloudstack/storage/snapshot/SnapshotService.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.storage.snapshot; import org.apache.cloudstack.engine.cloud.entity.api.SnapshotEntity; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; public interface SnapshotService { public SnapshotEntity getSnapshotEntity(long snapshotId); diff --git a/engine/storage/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java b/engine/storage/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java index cd67b97b02c..aa47e8f4977 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java @@ -21,12 +21,12 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; public class PrimaryDataStoreTO { private final String uuid; private final String name; - private final String type; + private String type; private final long id; public PrimaryDataStoreTO(PrimaryDataStoreInfo dataStore) { this.uuid = dataStore.getUuid(); this.name = dataStore.getName(); - this.type = dataStore.getType(); + // this.type = dataStore.getType(); this.id = dataStore.getId(); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/to/TemplateTO.java b/engine/storage/src/org/apache/cloudstack/storage/to/TemplateTO.java index ed5990986e5..bc55ea8c3ea 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/to/TemplateTO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/to/TemplateTO.java @@ -16,8 +16,8 @@ // under the License. package org.apache.cloudstack.storage.to; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; -import org.apache.cloudstack.storage.image.TemplateInfo; import org.apache.cloudstack.storage.image.datastore.ImageDataStoreInfo; public class TemplateTO { diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/TemplateOnPrimaryDataStoreInfo.java b/engine/storage/src/org/apache/cloudstack/storage/volume/TemplateOnPrimaryDataStoreInfo.java index 368c33a32bf..b8d0857d495 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/TemplateOnPrimaryDataStoreInfo.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/TemplateOnPrimaryDataStoreInfo.java @@ -18,8 +18,8 @@ */ package org.apache.cloudstack.storage.volume; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.storage.datastore.PrimaryDataStore; -import org.apache.cloudstack.storage.image.TemplateInfo; public interface TemplateOnPrimaryDataStoreInfo { public String getPath(); diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java index 20ceaa303fc..c6ca90d1641 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java @@ -23,40 +23,38 @@ import java.util.Map; import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; -import org.apache.cloudstack.storage.datastore.PrimaryDataStore; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.springframework.stereotype.Component; +import com.cloud.storage.Storage.StoragePoolType; import com.cloud.utils.exception.CloudRuntimeException; @Component public class PrimaryDataStoreHelper { @Inject private PrimaryDataStoreDao dataStoreDao; - public PrimaryDataStoreVO createPrimaryDataStore(Map params) { - PrimaryDataStoreVO dataStoreVO = dataStoreDao.findPoolByUUID(params.get("uuid")); + public StoragePoolVO createPrimaryDataStore(Map params) { + StoragePoolVO dataStoreVO = dataStoreDao.findPoolByUUID((String)params.get("uuid")); if (dataStoreVO != null) { throw new CloudRuntimeException("duplicate uuid: " + params.get("uuid")); } - dataStoreVO = new PrimaryDataStoreVO(); - dataStoreVO.setStorageProviderId(Long.parseLong(params.get("providerId"))); - dataStoreVO.setHostAddress(params.get("server")); - dataStoreVO.setPath(params.get("path")); - dataStoreVO.setPoolType(params.get("protocol")); - dataStoreVO.setPort(Integer.parseInt(params.get("port"))); - dataStoreVO.setName(params.get("name")); - dataStoreVO.setUuid(params.get("uuid")); + dataStoreVO = new StoragePoolVO(); + dataStoreVO.setStorageProviderId(Long.parseLong((String)params.get("providerId"))); + dataStoreVO.setHostAddress((String)params.get("server")); + dataStoreVO.setPath((String)params.get("path")); + dataStoreVO.setPoolType((StoragePoolType)params.get("protocol")); + dataStoreVO.setPort(Integer.parseInt((String)params.get("port"))); + dataStoreVO.setName((String)params.get("name")); + dataStoreVO.setUuid((String)params.get("uuid")); dataStoreVO = dataStoreDao.persist(dataStoreVO); return dataStoreVO; } public boolean deletePrimaryDataStore(long id) { - PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(id); + StoragePoolVO dataStoreVO = dataStoreDao.findById(id); if (dataStoreVO == null) { throw new CloudRuntimeException("can't find store: " + id); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDao.java b/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDao.java index 45ff1ec2258..63cdb16c596 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDao.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDao.java @@ -18,7 +18,7 @@ */ package org.apache.cloudstack.storage.volume.db; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import com.cloud.utils.db.GenericDao; import com.cloud.utils.fsm.StateDao; diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDaoImpl.java index b47f08881e1..ad561502266 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDaoImpl.java @@ -20,9 +20,9 @@ package org.apache.cloudstack.storage.volume.db; import java.util.Date; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.Event; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine.State; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreVO.java b/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreVO.java index 2d355df7e2a..48a9f334a19 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreVO.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreVO.java @@ -32,7 +32,9 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; -import org.apache.cloudstack.storage.volume.ObjectInDataStoreStateMachine; + +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; + import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.fsm.StateObject; diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/db/VolumeDao2Impl.java b/engine/storage/src/org/apache/cloudstack/storage/volume/db/VolumeDao2Impl.java index 1e12498dff6..d8497e2f3da 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/db/VolumeDao2Impl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/db/VolumeDao2Impl.java @@ -24,6 +24,7 @@ import java.util.Date; import java.util.List; import javax.ejb.Local; +import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.type.RootDisk; import org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeType; @@ -36,6 +37,7 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.server.ResourceTag.TaggedResourceType; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Volume; +import com.cloud.tags.dao.ResourceTagDao; import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.utils.Pair; @@ -63,8 +65,7 @@ public class VolumeDao2Impl extends GenericDaoBase implements Vo protected final SearchBuilder InstanceStatesSearch; protected final SearchBuilder AllFieldsSearch; protected GenericSearchBuilder CountByAccount; - //ResourceTagsDaoImpl _tagsDao = ComponentLocator.inject(ResourceTagsDaoImpl.class); - ResourceTagsDaoImpl _tagsDao = null; + @Inject ResourceTagDao _tagsDao = null; protected static final String SELECT_VM_SQL = "SELECT DISTINCT instance_id from volumes v where v.host_id = ? and v.mirror_state = ?"; protected static final String SELECT_HYPERTYPE_FROM_VOLUME = "SELECT c.hypervisor_type from volumes v, storage_pool s, cluster c where v.pool_id = s.id and s.cluster_id = c.id and v.id = ?"; diff --git a/engine/storage/volume/pom.xml b/engine/storage/volume/pom.xml index e424cab5d0e..19357ab11e4 100644 --- a/engine/storage/volume/pom.xml +++ b/engine/storage/volume/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java index 9c009c95623..fbfade6c6aa 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/DefaultPrimaryDataStore.java @@ -18,45 +18,50 @@ package org.apache.cloudstack.storage.datastore; import java.io.File; import java.util.ArrayList; +import java.util.Date; import java.util.List; import javax.inject.Inject; -import org.apache.cloudstack.engine.datacenter.entity.api.DataCenterResourceEntity.State; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; -import org.apache.cloudstack.storage.datastore.provider.DataStoreProvider; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.image.ImageDataFactory; -import org.apache.cloudstack.storage.image.TemplateInfo; -import org.apache.cloudstack.storage.snapshot.SnapshotDataFactory; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.volume.VolumeObject; -import org.apache.cloudstack.storage.volume.db.VolumeDao2; -import org.apache.cloudstack.storage.volume.db.VolumeVO; import org.apache.log4j.Logger; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StoragePoolStatus; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.storage.encoding.EncodingType; public class DefaultPrimaryDataStore implements PrimaryDataStore { private static final Logger s_logger = Logger .getLogger(DefaultPrimaryDataStore.class); protected PrimaryDataStoreDriver driver; - protected PrimaryDataStoreVO pdsv; + protected StoragePoolVO pdsv; @Inject protected PrimaryDataStoreDao dataStoreDao; protected PrimaryDataStoreLifeCycle lifeCycle; @@ -67,15 +72,16 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { @Inject SnapshotDataFactory snapshotFactory; protected DataStoreProvider provider; - @Inject - private VolumeDao2 volumeDao; + VMTemplatePoolDao templatePoolDao; - protected DefaultPrimaryDataStore() { + private VolumeDao volumeDao; + + public DefaultPrimaryDataStore() { } - public void configure(PrimaryDataStoreVO pdsv, + public void configure(StoragePoolVO pdsv, PrimaryDataStoreDriver driver, DataStoreProvider provider) { this.pdsv = pdsv; this.driver = driver; @@ -83,7 +89,7 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { } public static DefaultPrimaryDataStore createDataStore( - PrimaryDataStoreVO pdsv, PrimaryDataStoreDriver driver, + StoragePoolVO pdsv, PrimaryDataStoreDriver driver, DataStoreProvider provider) { DefaultPrimaryDataStore dataStore = (DefaultPrimaryDataStore)ComponentContext.inject(DefaultPrimaryDataStore.class); dataStore.configure(pdsv, driver, provider); @@ -109,19 +115,16 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { @Override public DataStoreDriver getDriver() { - // TODO Auto-generated method stub return this.driver; } @Override public DataStoreRole getRole() { - // TODO Auto-generated method stub return DataStoreRole.Primary; } @Override public long getId() { - // TODO Auto-generated method stub return this.pdsv.getId(); } @@ -143,7 +146,7 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { @Override public Scope getScope() { - PrimaryDataStoreVO vo = dataStoreDao.findById(this.pdsv.getId()); + StoragePoolVO vo = dataStoreDao.findById(this.pdsv.getId()); if (vo.getScope() == ScopeType.CLUSTER) { return new ClusterScope(vo.getClusterId(), vo.getPodId(), vo.getDataCenterId()); @@ -156,7 +159,7 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { @Override public boolean isHypervisorSupported(HypervisorType hypervisor) { // TODO Auto-generated method stub - return false; + return true; } @Override @@ -171,28 +174,10 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { return false; } - @Override - public long getCapacity() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public long getAvailableCapacity() { - // TODO Auto-generated method stub - return 0; - } @Override public String getUuid() { - // TODO Auto-generated method stub - return null; - } - - @Override - public State getManagedState() { - // TODO Auto-generated method stub - return null; + return this.pdsv.getUuid(); } @Override @@ -201,12 +186,6 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { return null; } - @Override - public String getType() { - // TODO Auto-generated method stub - return null; - } - @Override public PrimaryDataStoreLifeCycle getLifeCycle() { return this.lifeCycle; @@ -214,14 +193,13 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { @Override public boolean exists(DataObject data) { - return (objectInStoreMgr.findObject(data.getId(), data.getType(), - this.getId(), this.getRole()) != null) ? true : false; + return (objectInStoreMgr.findObject(data, data.getDataStore()) != null) ? true : false; } @Override public TemplateInfo getTemplate(long templateId) { - ObjectInDataStoreVO obj = objectInStoreMgr.findObject(templateId, DataObjectType.TEMPLATE, this.getId(), this.getRole()); - if (obj == null) { + VMTemplateStoragePoolVO template = templatePoolDao.findByPoolTemplate(this.getId(), templateId); + if (template == null || template.getState() != ObjectInDataStoreStateMachine.State.Ready) { return null; } return imageDataFactory.getTemplate(templateId, this); @@ -238,4 +216,117 @@ public class DefaultPrimaryDataStore implements PrimaryDataStore { // TODO Auto-generated method stub return null; } + + @Override + public DataObject create(DataObject obj) { + //create template on primary storage + if (obj.getType() == DataObjectType.TEMPLATE) { + VMTemplateStoragePoolVO templateStoragePoolRef = templatePoolDao.findByPoolTemplate(this.getId(), obj.getId()); + if (templateStoragePoolRef == null) { + try { + templateStoragePoolRef = new VMTemplateStoragePoolVO(this.getId(), obj.getId()); + templateStoragePoolRef = templatePoolDao.persist(templateStoragePoolRef); + } catch (Throwable t) { + templateStoragePoolRef = templatePoolDao.findByPoolTemplate(this.getId(), obj.getId()); + if (templateStoragePoolRef == null) { + throw new CloudRuntimeException("Failed to create template storage pool entry"); + } + } + } + + } + + return objectInStoreMgr.get(obj, this); + } + + @Override + public boolean delete(DataObject obj) { + // TODO Auto-generated method stub + return false; + } + + @Override + public long getDataCenterId() { + return this.pdsv.getDataCenterId(); + } + + @Override + public String getPath() { + return this.pdsv.getPath(); + } + + @Override + public StoragePoolType getPoolType() { + return this.pdsv.getPoolType(); + } + + @Override + public Date getCreated() { + return this.pdsv.getCreated(); + } + + @Override + public Date getUpdateTime() { + return this.pdsv.getUpdateTime(); + } + + @Override + public long getCapacityBytes() { + return this.pdsv.getCapacityBytes(); + } + + @Override + public long getAvailableBytes() { + return this.pdsv.getAvailableBytes(); + } + + @Override + public Long getClusterId() { + return this.pdsv.getClusterId(); + } + + @Override + public String getHostAddress() { + return this.pdsv.getHostAddress(); + } + + @Override + public String getUserInfo() { + return this.pdsv.getUserInfo(); + } + + @Override + public boolean isShared() { + return this.pdsv.getScope() == ScopeType.HOST ? false : true; + } + + @Override + public boolean isLocal() { + return !this.isShared(); + } + + @Override + public StoragePoolStatus getStatus() { + return this.pdsv.getStatus(); + } + + @Override + public int getPort() { + return this.pdsv.getPort(); + } + + @Override + public Long getPodId() { + return this.pdsv.getPodId(); + } + + @Override + public Long getStorageProviderId() { + return this.pdsv.getStorageProviderId(); + } + + @Override + public boolean isInMaintenance() { + return this.getStatus() == StoragePoolStatus.Maintenance ? true : false; + } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java new file mode 100644 index 00000000000..440cb8c5ea0 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/AncientPrimaryDataStoreDriverImpl.java @@ -0,0 +1,362 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.driver; + +import java.util.Set; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.volume.VolumeObject; +import org.apache.log4j.Logger; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.ManageSnapshotAnswer; +import com.cloud.agent.api.ManageSnapshotCommand; +import com.cloud.agent.api.storage.CreateAnswer; +import com.cloud.agent.api.storage.CreateCommand; +import com.cloud.agent.api.storage.DestroyCommand; +import com.cloud.agent.api.storage.ResizeVolumeAnswer; +import com.cloud.agent.api.storage.ResizeVolumeCommand; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.ResizeVolumePayload; +import com.cloud.storage.Storage; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VMTemplateHostVO; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VolumeManager; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.template.TemplateManager; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.DiskProfile; +import com.cloud.vm.dao.VMInstanceDao; + +public class AncientPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { + private static final Logger s_logger = Logger + .getLogger(AncientPrimaryDataStoreDriverImpl.class); + @Inject DiskOfferingDao diskOfferingDao; + @Inject VMTemplateDao templateDao; + @Inject VolumeDao volumeDao; + @Inject TemplateManager templateMgr; + @Inject HostDao hostDao; + @Inject StorageManager storageMgr; + @Inject VolumeManager volumeMgr; + @Inject VMInstanceDao vmDao; + @Inject SnapshotDao snapshotDao; + @Inject PrimaryDataStoreDao primaryStoreDao; + @Inject SnapshotManager snapshotMgr; + @Override + public String grantAccess(DataObject data, EndPoint ep) { + // TODO Auto-generated method stub + return null; + } + + @Override + public boolean revokeAccess(DataObject data, EndPoint ep) { + // TODO Auto-generated method stub + return false; + } + + @Override + public Set listObjects(DataStore store) { + // TODO Auto-generated method stub + return null; + } + + public boolean createVolume( + VolumeInfo volume) throws StorageUnavailableException { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Creating volume: " + volume); + } + + DiskOfferingVO offering = diskOfferingDao.findById(volume.getDiskOfferingId()); + DiskProfile diskProfile = new DiskProfile(volume, offering, + null); + + VMTemplateVO template = null; + if (volume.getTemplateId() != null) { + template = templateDao.findById(volume.getTemplateId()); + } + + StoragePool pool = (StoragePool)volume.getDataStore(); + VolumeVO vol = volumeDao.findById(volume.getId()); + if (pool != null) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Trying to create in " + pool); + } + vol.setPoolId(pool.getId()); + + CreateCommand cmd = null; + VMTemplateStoragePoolVO tmpltStoredOn = null; + + for (int i = 0; i < 2; i++) { + if (template != null + && template.getFormat() != Storage.ImageFormat.ISO) { + if (pool.getPoolType() == StoragePoolType.CLVM) { + // prepareISOForCreate does what we need, which is to + // tell us where the template is + VMTemplateHostVO tmpltHostOn = templateMgr + .prepareISOForCreate(template, pool); + if (tmpltHostOn == null) { + s_logger.debug("cannot find template " + + template.getId() + " " + + template.getName()); + throw new CloudRuntimeException("cannot find template" + + template.getId() + + template.getName()); + } + HostVO secondaryStorageHost = hostDao + .findById(tmpltHostOn.getHostId()); + String tmpltHostUrl = secondaryStorageHost + .getStorageUrl(); + String fullTmpltUrl = tmpltHostUrl + "/" + + tmpltHostOn.getInstallPath(); + cmd = new CreateCommand(diskProfile, fullTmpltUrl, + new StorageFilerTO(pool)); + } else { + tmpltStoredOn = templateMgr.prepareTemplateForCreate( + template, pool); + if (tmpltStoredOn == null) { + s_logger.debug("Cannot use this pool " + pool + + " because we can't propagate template " + + template); + throw new CloudRuntimeException("Cannot use this pool " + pool + + " because we can't propagate template " + + template); + } + cmd = new CreateCommand(diskProfile, + tmpltStoredOn.getLocalDownloadPath(), + new StorageFilerTO(pool)); + } + } else { + if (template != null + && Storage.ImageFormat.ISO == template.getFormat()) { + VMTemplateHostVO tmpltHostOn = templateMgr + .prepareISOForCreate(template, pool); + if (tmpltHostOn == null) { + throw new CloudRuntimeException( + "Did not find ISO in secondry storage in zone " + + pool.getDataCenterId()); + } + } + cmd = new CreateCommand(diskProfile, new StorageFilerTO( + pool)); + } + + Answer answer = storageMgr.sendToPool(pool, null, cmd); + if (answer.getResult()) { + CreateAnswer createAnswer = (CreateAnswer) answer; + vol.setFolder(pool.getPath()); + vol.setPath(createAnswer.getVolume().getPath()); + vol.setSize(createAnswer.getVolume().getSize()); + vol.setPoolType(pool.getPoolType()); + vol.setPoolId(pool.getId()); + vol.setPodId(pool.getPodId()); + this.volumeDao.update(vol.getId(), vol); + return true; + } else { + if (tmpltStoredOn != null + && (answer instanceof CreateAnswer) + && ((CreateAnswer) answer) + .templateReloadRequested()) { + if (!templateMgr + .resetTemplateDownloadStateOnPool(tmpltStoredOn + .getId())) { + break; // break out of template-redeploy retry loop + } + } else { + break; + } + } + } + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Unable to create volume " + volume.getId()); + } + return false; + } + + @Override + public void createAsync(DataObject data, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + String errMsg = null; + if (data.getType() == DataObjectType.VOLUME) { + try { + createVolume((VolumeInfo)data); + } catch (StorageUnavailableException e) { + s_logger.debug("failed to create volume", e); + errMsg = e.toString(); + } catch (Exception e) { + s_logger.debug("failed to create volume", e); + errMsg = e.toString(); + } + } + CreateCmdResult result = new CreateCmdResult(null, null); + if (errMsg != null) { + result.setResult(errMsg); + } + + callback.complete(result); + + } + + @Override + public void deleteAsync(DataObject data, + AsyncCompletionCallback callback) { + + String vmName = null; + VolumeVO vol = this.volumeDao.findById(data.getId()); + + + StoragePool pool = (StoragePool)data.getDataStore(); + + DestroyCommand cmd = new DestroyCommand(pool, vol, vmName); + + CommandResult result = new CommandResult(); + try { + Answer answer = this.storageMgr.sendToPool(pool, cmd); + if (answer != null && !answer.getResult()) { + result.setResult(answer.getDetails()); + s_logger.info("Will retry delete of " + vol + " from " + pool.getId()); + } + } catch (StorageUnavailableException e) { + s_logger.error("Storage is unavailable currently. Will retry delete of " + + vol + " from " + pool.getId(), e); + result.setResult(e.toString()); + } catch (Exception ex) { + s_logger.debug("Unable to destoy volume" + vol + " from " + pool.getId(), ex); + result.setResult(ex.toString()); + } + callback.complete(result); + } + + @Override + public void copyAsync(DataObject srcdata, DataObject destData, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } + + @Override + public boolean canCopy(DataObject srcData, DataObject destData) { + // TODO Auto-generated method stub + return false; + } + + @Override + public void takeSnapshot(SnapshotInfo snapshot, + AsyncCompletionCallback callback) { + CreateCmdResult result = null; + try { + VolumeInfo volume = snapshot.getBaseVolume(); + String vmName = this.volumeMgr.getVmNameOnVolume(volume); + SnapshotVO preSnapshotVO = this.snapshotMgr.getParentSnapshot(volume, snapshot); + String parentSnapshotPath = null; + if (preSnapshotVO != null) { + parentSnapshotPath = preSnapshotVO.getPath(); + } + StoragePool srcPool = (StoragePool)volume.getDataStore(); + + ManageSnapshotCommand cmd = new ManageSnapshotCommand(snapshot.getId(), volume.getPath(), srcPool, parentSnapshotPath, snapshot.getName(), vmName); + + ManageSnapshotAnswer answer = (ManageSnapshotAnswer) this.snapshotMgr.sendToPool(volume, cmd); + + if ((answer != null) && answer.getResult()) { + result = new CreateCmdResult(answer.getSnapshotPath(), null); + } else { + result = new CreateCmdResult(null, null); + } + } catch (Exception e) { + s_logger.debug("Failed to take snapshot: " + snapshot.getId(), e); + result = new CreateCmdResult(null, null); + result.setResult(e.toString()); + } + callback.complete(result); + } + + @Override + public void revertSnapshot(SnapshotInfo snapshot, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } + + @Override + public void resize(DataObject data, + AsyncCompletionCallback callback) { + VolumeObject vol = (VolumeObject)data; + StoragePool pool = (StoragePool)data.getDataStore(); + ResizeVolumePayload resizeParameter = (ResizeVolumePayload)vol.getpayload(); + + ResizeVolumeCommand resizeCmd = new ResizeVolumeCommand( + vol.getPath(), new StorageFilerTO(pool), vol.getSize(), + resizeParameter.newSize, resizeParameter.shrinkOk, resizeParameter.instanceName); + CreateCmdResult result = new CreateCmdResult(null, null); + try { + ResizeVolumeAnswer answer = (ResizeVolumeAnswer) this.storageMgr.sendToPool(pool, + resizeParameter.hosts, resizeCmd); + if (answer != null && answer.getResult()) { + long finalSize = answer.getNewSize(); + s_logger.debug("Resize: volume started at size " + vol.getSize() + + " and ended at size " + finalSize); + + vol.setSize(finalSize); + vol.update(); + } else if (answer != null) { + result.setResult(answer.getDetails()); + } else { + s_logger.debug("return a null answer, mark it as failed for unknown reason"); + result.setResult("return a null answer, mark it as failed for unknown reason"); + } + + } catch (Exception e) { + s_logger.debug("sending resize command failed", e); + result.setResult(e.toString()); + } + + callback.complete(result); + } + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/DefaultPrimaryDataStoreDriverImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/DefaultPrimaryDataStoreDriverImpl.java index dfe4518edab..6d0c2c6862b 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/DefaultPrimaryDataStoreDriverImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/driver/DefaultPrimaryDataStoreDriverImpl.java @@ -27,6 +27,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncRpcConext; @@ -35,8 +37,6 @@ import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.datastore.DataObjectManager; import org.apache.cloudstack.storage.endpoint.EndPointSelector; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; @@ -210,13 +210,6 @@ public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver return null; } - @Override - public void takeSnapshot(SnapshotInfo snapshot, - AsyncCompletionCallback callback) { - // TODO Auto-generated method stub - - } - @Override public void revertSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { @@ -238,5 +231,19 @@ public class DefaultPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver // TODO Auto-generated method stub } + + @Override + public void resize(DataObject data, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } + + @Override + public void takeSnapshot(SnapshotInfo snapshot, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCycleImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCycleImpl.java new file mode 100644 index 00000000000..6154a666b24 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCycleImpl.java @@ -0,0 +1,963 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.lifecycle; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreStatus; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CreateStoragePoolCommand; +import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.alert.AlertManager; +import com.cloud.capacity.Capacity; +import com.cloud.capacity.CapacityVO; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.exception.DiscoveryException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.resource.ResourceManager; +import com.cloud.server.ManagementServer; +import com.cloud.storage.OCFS2Manager; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolDiscoverer; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.StoragePoolStatus; +import com.cloud.storage.StoragePoolWorkVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.StoragePoolWorkDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.Account; +import com.cloud.user.User; +import com.cloud.user.UserContext; +import com.cloud.user.dao.UserDao; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.UriUtils; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.exception.ExecutionException; +import com.cloud.vm.ConsoleProxyVO; +import com.cloud.vm.DomainRouterVO; +import com.cloud.vm.SecondaryStorageVmVO; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachine.State; +import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.dao.ConsoleProxyDao; +import com.cloud.vm.dao.DomainRouterDao; +import com.cloud.vm.dao.SecondaryStorageVmDao; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; + +public class AncientPrimaryDataStoreLifeCycleImpl implements + PrimaryDataStoreLifeCycle { + private static final Logger s_logger = Logger + .getLogger(AncientPrimaryDataStoreLifeCycleImpl.class); + @Inject + protected ResourceManager _resourceMgr; + protected List _discoverers; + @Inject + PrimaryDataStoreDao primaryDataStoreDao; + @Inject + protected OCFS2Manager _ocfs2Mgr; + @Inject + DataStoreManager dataStoreMgr; + @Inject + AgentManager agentMgr; + @Inject + StorageManager storageMgr; + @Inject + protected CapacityDao _capacityDao; + + @Inject + VolumeDao volumeDao; + @Inject + VMInstanceDao vmDao; + @Inject + ManagementServer server; + @Inject + protected VirtualMachineManager vmMgr; + @Inject + protected SecondaryStorageVmDao _secStrgDao; + @Inject + UserVmDao userVmDao; + @Inject + protected UserDao _userDao; + @Inject + protected DomainRouterDao _domrDao; + @Inject + protected StoragePoolHostDao _storagePoolHostDao; + @Inject + protected AlertManager _alertMgr; + @Inject + protected ConsoleProxyDao _consoleProxyDao; + + @Inject + protected StoragePoolWorkDao _storagePoolWorkDao; + + @Override + public DataStore initialize(Map dsInfos) { + Long clusterId = (Long) dsInfos.get("clusterId"); + Long podId = (Long) dsInfos.get("podId"); + Long zoneId = (Long) dsInfos.get("zoneId"); + String url = (String) dsInfos.get("url"); + Long providerId = (Long)dsInfos.get("providerId"); + if (clusterId != null && podId == null) { + throw new InvalidParameterValueException( + "Cluster id requires pod id"); + } + + URI uri = null; + try { + uri = new URI(UriUtils.encodeURIComponent(url)); + if (uri.getScheme() == null) { + throw new InvalidParameterValueException("scheme is null " + + url + ", add nfs:// as a prefix"); + } else if (uri.getScheme().equalsIgnoreCase("nfs")) { + String uriHost = uri.getHost(); + String uriPath = uri.getPath(); + if (uriHost == null || uriPath == null + || uriHost.trim().isEmpty() || uriPath.trim().isEmpty()) { + throw new InvalidParameterValueException( + "host or path is null, should be nfs://hostname/path"); + } + } else if (uri.getScheme().equalsIgnoreCase("sharedMountPoint")) { + String uriPath = uri.getPath(); + if (uriPath == null) { + throw new InvalidParameterValueException( + "host or path is null, should be sharedmountpoint://localhost/path"); + } + } else if (uri.getScheme().equalsIgnoreCase("rbd")) { + String uriPath = uri.getPath(); + if (uriPath == null) { + throw new InvalidParameterValueException( + "host or path is null, should be rbd://hostname/pool"); + } + } + } catch (URISyntaxException e) { + throw new InvalidParameterValueException(url + + " is not a valid uri"); + } + + String tags = (String) dsInfos.get("tags"); + Map details = (Map) dsInfos + .get("details"); + if (tags != null) { + String[] tokens = tags.split(","); + + for (String tag : tokens) { + tag = tag.trim(); + if (tag.length() == 0) { + continue; + } + details.put(tag, "true"); + } + } + + String scheme = uri.getScheme(); + String storageHost = uri.getHost(); + String hostPath = uri.getPath(); + Object localStorage = dsInfos.get("localStorage"); + if (localStorage != null) { + hostPath = hostPath.replace("/", ""); + } + String userInfo = uri.getUserInfo(); + int port = uri.getPort(); + StoragePoolVO pool = null; + if (s_logger.isDebugEnabled()) { + s_logger.debug("createPool Params @ scheme - " + scheme + + " storageHost - " + storageHost + " hostPath - " + + hostPath + " port - " + port); + } + if (scheme.equalsIgnoreCase("nfs")) { + if (port == -1) { + port = 2049; + } + pool = new StoragePoolVO(StoragePoolType.NetworkFilesystem, + storageHost, port, hostPath); + } else if (scheme.equalsIgnoreCase("file")) { + if (port == -1) { + port = 0; + } + pool = new StoragePoolVO(StoragePoolType.Filesystem, + "localhost", 0, hostPath); + } else if (scheme.equalsIgnoreCase("sharedMountPoint")) { + pool = new StoragePoolVO(StoragePoolType.SharedMountPoint, + storageHost, 0, hostPath); + } else if (scheme.equalsIgnoreCase("clvm")) { + pool = new StoragePoolVO(StoragePoolType.CLVM, storageHost, 0, + hostPath.replaceFirst("/", "")); + } else if (scheme.equalsIgnoreCase("rbd")) { + if (port == -1) { + port = 6789; + } + pool = new StoragePoolVO(StoragePoolType.RBD, storageHost, + port, hostPath.replaceFirst("/", "")); + pool.setUserInfo(userInfo); + } else if (scheme.equalsIgnoreCase("PreSetup")) { + pool = new StoragePoolVO(StoragePoolType.PreSetup, + storageHost, 0, hostPath); + } else if (scheme.equalsIgnoreCase("iscsi")) { + String[] tokens = hostPath.split("/"); + int lun = NumbersUtil.parseInt(tokens[tokens.length - 1], -1); + if (port == -1) { + port = 3260; + } + if (lun != -1) { + if (clusterId == null) { + throw new IllegalArgumentException( + "IscsiLUN need to have clusters specified"); + } + hostPath.replaceFirst("/", ""); + pool = new StoragePoolVO(StoragePoolType.IscsiLUN, + storageHost, port, hostPath); + } else { + for (StoragePoolDiscoverer discoverer : _discoverers) { + Map> pools; + try { + pools = discoverer.find(zoneId, podId, uri, details); + } catch (DiscoveryException e) { + throw new IllegalArgumentException( + "Not enough information for discovery " + uri, + e); + } + if (pools != null) { + Map.Entry> entry = pools + .entrySet().iterator().next(); + pool = entry.getKey(); + details = entry.getValue(); + break; + } + } + } + } else if (scheme.equalsIgnoreCase("iso")) { + if (port == -1) { + port = 2049; + } + pool = new StoragePoolVO(StoragePoolType.ISO, storageHost, + port, hostPath); + } else if (scheme.equalsIgnoreCase("vmfs")) { + pool = new StoragePoolVO(StoragePoolType.VMFS, + "VMFS datastore: " + hostPath, 0, hostPath); + } else if (scheme.equalsIgnoreCase("ocfs2")) { + port = 7777; + pool = new StoragePoolVO(StoragePoolType.OCFS2, "clustered", + port, hostPath); + } else { + StoragePoolType type = Enum.valueOf(StoragePoolType.class, scheme); + + if (type != null) { + pool = new StoragePoolVO(type, storageHost, + 0, hostPath); + } else { + s_logger.warn("Unable to figure out the scheme for URI: " + uri); + throw new IllegalArgumentException( + "Unable to figure out the scheme for URI: " + uri); + } + } + + if (pool == null) { + s_logger.warn("Unable to figure out the scheme for URI: " + uri); + throw new IllegalArgumentException( + "Unable to figure out the scheme for URI: " + uri); + } + + if (localStorage == null) { + List pools = primaryDataStoreDao + .listPoolByHostPath(storageHost, hostPath); + if (!pools.isEmpty() && !scheme.equalsIgnoreCase("sharedmountpoint")) { + Long oldPodId = pools.get(0).getPodId(); + throw new CloudRuntimeException("Storage pool " + uri + + " already in use by another pod (id=" + oldPodId + ")"); + } + } + + long poolId = primaryDataStoreDao.getNextInSequence(Long.class, "id"); + Object existingUuid = dsInfos.get("uuid"); + String uuid = null; + + if (existingUuid != null) { + uuid = (String)existingUuid; + } else if (scheme.equalsIgnoreCase("sharedmountpoint") + || scheme.equalsIgnoreCase("clvm")) { + uuid = UUID.randomUUID().toString(); + } else if (scheme.equalsIgnoreCase("PreSetup")) { + uuid = hostPath.replace("/", ""); + } else { + uuid = UUID.nameUUIDFromBytes( + new String(storageHost + hostPath).getBytes()).toString(); + } + + List spHandles = primaryDataStoreDao + .findIfDuplicatePoolsExistByUUID(uuid); + if ((spHandles != null) && (spHandles.size() > 0)) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Another active pool with the same uuid already exists"); + } + throw new CloudRuntimeException( + "Another active pool with the same uuid already exists"); + } + + String poolName = (String) dsInfos.get("name"); + if (s_logger.isDebugEnabled()) { + s_logger.debug("In createPool Setting poolId - " + poolId + + " uuid - " + uuid + " zoneId - " + zoneId + " podId - " + + podId + " poolName - " + poolName); + } + + pool.setId(poolId); + pool.setUuid(uuid); + pool.setDataCenterId(zoneId); + pool.setPodId(podId); + pool.setName(poolName); + pool.setClusterId(clusterId); + pool.setStorageProviderId(providerId); + pool.setStatus(StoragePoolStatus.Initialized); + pool = primaryDataStoreDao.persist(pool, details); + + return dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); + } + + protected boolean createStoragePool(long hostId, StoragePool pool) { + s_logger.debug("creating pool " + pool.getName() + " on host " + + hostId); + if (pool.getPoolType() != StoragePoolType.NetworkFilesystem + && pool.getPoolType() != StoragePoolType.Filesystem + && pool.getPoolType() != StoragePoolType.IscsiLUN + && pool.getPoolType() != StoragePoolType.Iscsi + && pool.getPoolType() != StoragePoolType.VMFS + && pool.getPoolType() != StoragePoolType.SharedMountPoint + && pool.getPoolType() != StoragePoolType.PreSetup + && pool.getPoolType() != StoragePoolType.OCFS2 + && pool.getPoolType() != StoragePoolType.RBD + && pool.getPoolType() != StoragePoolType.CLVM) { + s_logger.warn(" Doesn't support storage pool type " + + pool.getPoolType()); + return false; + } + CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); + final Answer answer = agentMgr.easySend(hostId, cmd); + if (answer != null && answer.getResult()) { + return true; + } else { + primaryDataStoreDao.expunge(pool.getId()); + String msg = ""; + if (answer != null) { + msg = "Can not create storage pool through host " + hostId + + " due to " + answer.getDetails(); + s_logger.warn(msg); + } else { + msg = "Can not create storage pool through host " + hostId + + " due to CreateStoragePoolCommand returns null"; + s_logger.warn(msg); + } + throw new CloudRuntimeException(msg); + } + } + + @Override + public boolean attachCluster(DataStore store, ClusterScope scope) { + PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo) store; + // Check if there is host up in this cluster + List allHosts = _resourceMgr.listAllUpAndEnabledHosts( + Host.Type.Routing, primarystore.getClusterId(), + primarystore.getPodId(), primarystore.getDataCenterId()); + if (allHosts.isEmpty()) { + throw new CloudRuntimeException( + "No host up to associate a storage pool with in cluster " + + primarystore.getClusterId()); + } + + if (primarystore.getPoolType() == StoragePoolType.OCFS2 + && !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) { + s_logger.warn("Can not create storage pool " + primarystore + + " on cluster " + primarystore.getClusterId()); + primaryDataStoreDao.expunge(primarystore.getId()); + return false; + } + + boolean success = false; + for (HostVO h : allHosts) { + success = createStoragePool(h.getId(), primarystore); + if (success) { + break; + } + } + + s_logger.debug("In createPool Adding the pool to each of the hosts"); + List poolHosts = new ArrayList(); + for (HostVO h : allHosts) { + try { + this.storageMgr.connectHostToSharedPool(h.getId(), + primarystore.getId()); + poolHosts.add(h); + } catch (Exception e) { + s_logger.warn("Unable to establish a connection between " + h + + " and " + primarystore, e); + } + } + + if (poolHosts.isEmpty()) { + s_logger.warn("No host can access storage pool " + primarystore + + " on cluster " + primarystore.getClusterId()); + primaryDataStoreDao.expunge(primarystore.getId()); + return false; + } else { + storageMgr.createCapacityEntry(primarystore.getId()); + } + StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId()); + pool.setScope(ScopeType.CLUSTER); + pool.setStatus(StoragePoolStatus.Up); + this.primaryDataStoreDao.update(pool.getId(), pool); + return true; + } + + @Override + public boolean attachZone(DataStore dataStore, ZoneScope scope) { + List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId()); + for (HostVO host : hosts) { + try { + this.storageMgr.connectHostToSharedPool(host.getId(), + dataStore.getId()); + } catch (Exception e) { + s_logger.warn("Unable to establish a connection between " + host + + " and " + dataStore, e); + } + } + StoragePoolVO pool = this.primaryDataStoreDao.findById(dataStore.getId()); + + pool.setScope(ScopeType.ZONE); + pool.setStatus(StoragePoolStatus.Up); + this.primaryDataStoreDao.update(pool.getId(), pool); + return true; + } + + @Override + public boolean dettach() { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean unmanaged() { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean maintain(long storeId) { + Long userId = UserContext.current().getCallerUserId(); + User user = _userDao.findById(userId); + Account account = UserContext.current().getCaller(); + StoragePoolVO pool = this.primaryDataStoreDao.findById(storeId); + try { + StoragePool storagePool = (StoragePool) this.dataStoreMgr + .getDataStore(storeId, DataStoreRole.Primary); + List hosts = _resourceMgr.listHostsInClusterByStatus( + pool.getClusterId(), Status.Up); + if (hosts == null || hosts.size() == 0) { + pool.setStatus(StoragePoolStatus.Maintenance); + primaryDataStoreDao.update(pool.getId(), pool); + return true; + } else { + // set the pool state to prepare for maintenance + pool.setStatus(StoragePoolStatus.PrepareForMaintenance); + primaryDataStoreDao.update(pool.getId(), pool); + } + // remove heartbeat + for (HostVO host : hosts) { + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand( + false, storagePool); + final Answer answer = agentMgr.easySend(host.getId(), cmd); + if (answer == null || !answer.getResult()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("ModifyStoragePool false failed due to " + + ((answer == null) ? "answer null" : answer + .getDetails())); + } + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("ModifyStoragePool false secceeded"); + } + } + } + // check to see if other ps exist + // if they do, then we can migrate over the system vms to them + // if they dont, then just stop all vms on this one + List upPools = primaryDataStoreDao + .listByStatusInZone(pool.getDataCenterId(), + StoragePoolStatus.Up); + boolean restart = true; + if (upPools == null || upPools.size() == 0) { + restart = false; + } + + // 2. Get a list of all the ROOT volumes within this storage pool + List allVolumes = this.volumeDao.findByPoolId(pool + .getId()); + + // 3. Enqueue to the work queue + for (VolumeVO volume : allVolumes) { + VMInstanceVO vmInstance = vmDao + .findById(volume.getInstanceId()); + + if (vmInstance == null) { + continue; + } + + // enqueue sp work + if (vmInstance.getState().equals(State.Running) + || vmInstance.getState().equals(State.Starting) + || vmInstance.getState().equals(State.Stopping)) { + + try { + StoragePoolWorkVO work = new StoragePoolWorkVO( + vmInstance.getId(), pool.getId(), false, false, + server.getId()); + _storagePoolWorkDao.persist(work); + } catch (Exception e) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Work record already exists, re-using by re-setting values"); + } + StoragePoolWorkVO work = _storagePoolWorkDao + .findByPoolIdAndVmId(pool.getId(), + vmInstance.getId()); + work.setStartedAfterMaintenance(false); + work.setStoppedForMaintenance(false); + work.setManagementServerId(server.getId()); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + + // 4. Process the queue + List pendingWork = _storagePoolWorkDao + .listPendingWorkForPrepareForMaintenanceByPoolId(pool + .getId()); + + for (StoragePoolWorkVO work : pendingWork) { + // shut down the running vms + VMInstanceVO vmInstance = vmDao.findById(work.getVmId()); + + if (vmInstance == null) { + continue; + } + + // if the instance is of type consoleproxy, call the console + // proxy + if (vmInstance.getType().equals( + VirtualMachine.Type.ConsoleProxy)) { + // call the consoleproxymanager + ConsoleProxyVO consoleProxy = _consoleProxyDao + .findById(vmInstance.getId()); + if (!vmMgr.advanceStop(consoleProxy, true, user, account)) { + String errorMsg = "There was an error stopping the console proxy id: " + + vmInstance.getId() + + " ,cannot enable storage maintenance"; + s_logger.warn(errorMsg); + throw new CloudRuntimeException(errorMsg); + } else { + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + + if (restart) { + + if (this.vmMgr.advanceStart(consoleProxy, null, user, + account) == null) { + String errorMsg = "There was an error starting the console proxy id: " + + vmInstance.getId() + + " on another storage pool, cannot enable primary storage maintenance"; + s_logger.warn(errorMsg); + } else { + // update work status + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + + // if the instance is of type uservm, call the user vm manager + if (vmInstance.getType().equals(VirtualMachine.Type.User)) { + UserVmVO userVm = userVmDao.findById(vmInstance.getId()); + if (!vmMgr.advanceStop(userVm, true, user, account)) { + String errorMsg = "There was an error stopping the user vm id: " + + vmInstance.getId() + + " ,cannot enable storage maintenance"; + s_logger.warn(errorMsg); + throw new CloudRuntimeException(errorMsg); + } else { + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + + // if the instance is of type secondary storage vm, call the + // secondary storage vm manager + if (vmInstance.getType().equals( + VirtualMachine.Type.SecondaryStorageVm)) { + SecondaryStorageVmVO secStrgVm = _secStrgDao + .findById(vmInstance.getId()); + if (!vmMgr.advanceStop(secStrgVm, true, user, account)) { + String errorMsg = "There was an error stopping the ssvm id: " + + vmInstance.getId() + + " ,cannot enable storage maintenance"; + s_logger.warn(errorMsg); + throw new CloudRuntimeException(errorMsg); + } else { + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + + if (restart) { + if (vmMgr.advanceStart(secStrgVm, null, user, account) == null) { + String errorMsg = "There was an error starting the ssvm id: " + + vmInstance.getId() + + " on another storage pool, cannot enable primary storage maintenance"; + s_logger.warn(errorMsg); + } else { + // update work status + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + + // if the instance is of type domain router vm, call the network + // manager + if (vmInstance.getType().equals( + VirtualMachine.Type.DomainRouter)) { + DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); + if (!vmMgr.advanceStop(domR, true, user, account)) { + String errorMsg = "There was an error stopping the domain router id: " + + vmInstance.getId() + + " ,cannot enable primary storage maintenance"; + s_logger.warn(errorMsg); + throw new CloudRuntimeException(errorMsg); + } else { + // update work status + work.setStoppedForMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + + if (restart) { + if (vmMgr.advanceStart(domR, null, user, account) == null) { + String errorMsg = "There was an error starting the domain router id: " + + vmInstance.getId() + + " on another storage pool, cannot enable primary storage maintenance"; + s_logger.warn(errorMsg); + } else { + // update work status + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } + } + + // 5. Update the status + pool.setStatus(StoragePoolStatus.Maintenance); + this.primaryDataStoreDao.update(pool.getId(), pool); + + return true; + } catch (Exception e) { + s_logger.error( + "Exception in enabling primary storage maintenance:", e); + setPoolStateToError(pool); + throw new CloudRuntimeException(e.getMessage()); + } + } + + private void setPoolStateToError(StoragePoolVO primaryStorage) { + primaryStorage.setStatus(StoragePoolStatus.ErrorInMaintenance); + this.primaryDataStoreDao.update(primaryStorage.getId(), primaryStorage); + } + + @Override + public boolean cancelMaintain(long storageId) { + // Change the storage state back to up + Long userId = UserContext.current().getCallerUserId(); + User user = _userDao.findById(userId); + Account account = UserContext.current().getCaller(); + StoragePoolVO poolVO = this.primaryDataStoreDao + .findById(storageId); + StoragePool pool = (StoragePool) this.dataStoreMgr.getDataStore( + storageId, DataStoreRole.Primary); + poolVO.setStatus(StoragePoolStatus.Up); + primaryDataStoreDao.update(storageId, poolVO); + + List hosts = _resourceMgr.listHostsInClusterByStatus( + pool.getClusterId(), Status.Up); + if (hosts == null || hosts.size() == 0) { + return true; + } + // add heartbeat + for (HostVO host : hosts) { + ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand( + true, pool); + final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd); + if (answer == null || !answer.getResult()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("ModifyStoragePool add failed due to " + + ((answer == null) ? "answer null" : answer + .getDetails())); + } + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("ModifyStoragePool add secceeded"); + } + } + } + + // 2. Get a list of pending work for this queue + List pendingWork = _storagePoolWorkDao + .listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId()); + + // 3. work through the queue + for (StoragePoolWorkVO work : pendingWork) { + try { + VMInstanceVO vmInstance = vmDao.findById(work.getVmId()); + + if (vmInstance == null) { + continue; + } + + // if the instance is of type consoleproxy, call the console + // proxy + if (vmInstance.getType().equals( + VirtualMachine.Type.ConsoleProxy)) { + + ConsoleProxyVO consoleProxy = _consoleProxyDao + .findById(vmInstance.getId()); + if (vmMgr.advanceStart(consoleProxy, null, user, account) == null) { + String msg = "There was an error starting the console proxy id: " + + vmInstance.getId() + + " on storage pool, cannot complete primary storage maintenance"; + s_logger.warn(msg); + throw new ExecutionException(msg); + } else { + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + + // if the instance is of type ssvm, call the ssvm manager + if (vmInstance.getType().equals( + VirtualMachine.Type.SecondaryStorageVm)) { + SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance + .getId()); + if (vmMgr.advanceStart(ssVm, null, user, account) == null) { + String msg = "There was an error starting the ssvm id: " + + vmInstance.getId() + + " on storage pool, cannot complete primary storage maintenance"; + s_logger.warn(msg); + throw new ExecutionException(msg); + } else { + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + + // if the instance is of type ssvm, call the ssvm manager + if (vmInstance.getType().equals( + VirtualMachine.Type.DomainRouter)) { + DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); + if (vmMgr.advanceStart(domR, null, user, account) == null) { + String msg = "There was an error starting the domR id: " + + vmInstance.getId() + + " on storage pool, cannot complete primary storage maintenance"; + s_logger.warn(msg); + throw new ExecutionException(msg); + } else { + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + + // if the instance is of type user vm, call the user vm manager + if (vmInstance.getType().equals(VirtualMachine.Type.User)) { + UserVmVO userVm = userVmDao.findById(vmInstance.getId()); + + if (vmMgr.advanceStart(userVm, null, user, account) == null) { + + String msg = "There was an error starting the user vm id: " + + vmInstance.getId() + + " on storage pool, cannot complete primary storage maintenance"; + s_logger.warn(msg); + throw new ExecutionException(msg); + } else { + // update work queue + work.setStartedAfterMaintenance(true); + _storagePoolWorkDao.update(work.getId(), work); + } + } + } catch (Exception e) { + s_logger.debug("Failed start vm", e); + throw new CloudRuntimeException(e.toString()); + } + } + return true; + } + + @DB + @Override + public boolean deleteDataStore(long storeId) { + // for the given pool id, find all records in the storage_pool_host_ref + List hostPoolRecords = this._storagePoolHostDao + .listByPoolId(storeId); + StoragePoolVO poolVO = this.primaryDataStoreDao.findById(storeId); + StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(storeId, DataStoreRole.Primary); + boolean deleteFlag = false; + Transaction txn = Transaction.currentTxn(); + try { + // if not records exist, delete the given pool (base case) + if (hostPoolRecords.size() == 0) { + + txn.start(); + poolVO.setUuid(null); + this.primaryDataStoreDao.update(poolVO.getId(), poolVO); + primaryDataStoreDao.remove(poolVO.getId()); + deletePoolStats(poolVO.getId()); + txn.commit(); + + deleteFlag = true; + return true; + } else { + // Remove the SR associated with the Xenserver + for (StoragePoolHostVO host : hostPoolRecords) { + DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand( + pool); + final Answer answer = agentMgr.easySend(host.getHostId(), + deleteCmd); + + if (answer != null && answer.getResult()) { + deleteFlag = true; + break; + } + } + } + } finally { + if (deleteFlag) { + // now delete the storage_pool_host_ref and storage_pool records + txn.start(); + for (StoragePoolHostVO host : hostPoolRecords) { + _storagePoolHostDao.deleteStoragePoolHostDetails( + host.getHostId(), host.getPoolId()); + } + poolVO.setUuid(null); + this.primaryDataStoreDao.update(poolVO.getId(), poolVO); + primaryDataStoreDao.remove(poolVO.getId()); + deletePoolStats(poolVO.getId()); + // Delete op_host_capacity entries + this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, + null, null, null, poolVO.getId()); + txn.commit(); + + s_logger.debug("Storage pool id=" + poolVO.getId() + + " is removed successfully"); + return true; + } else { + // alert that the storage cleanup is required + s_logger.warn("Failed to Delete storage pool id: " + poolVO.getId()); + _alertMgr + .sendAlert(AlertManager.ALERT_TYPE_STORAGE_DELETE, + poolVO.getDataCenterId(), poolVO.getPodId(), + "Unable to delete storage pool id= " + poolVO.getId(), + "Delete storage pool command failed. Please check logs."); + } + } + return false; + } + + @DB + private boolean deletePoolStats(Long poolId) { + CapacityVO capacity1 = _capacityDao.findByHostIdType(poolId, + CapacityVO.CAPACITY_TYPE_STORAGE); + CapacityVO capacity2 = _capacityDao.findByHostIdType(poolId, + CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED); + Transaction txn = Transaction.currentTxn(); + txn.start(); + if (capacity1 != null) { + _capacityDao.remove(capacity1.getId()); + } + + if (capacity2 != null) { + _capacityDao.remove(capacity2.getId()); + } + + txn.commit(); + return true; + } + + @Override + public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { + StoragePoolHostVO poolHost = _storagePoolHostDao.findByPoolHost(store.getId(), scope.getScopeId()); + if (poolHost == null) { + poolHost = new StoragePoolHostVO(store.getId(), scope.getScopeId(), existingInfo.getLocalPath()); + _storagePoolHostDao.persist(poolHost); + } + + StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId()); + pool.setScope(scope.getScopeType()); + pool.setAvailableBytes(existingInfo.getAvailableBytes()); + pool.setCapacityBytes(existingInfo.getCapacityBytes()); + pool.setStatus(StoragePoolStatus.Up); + this.primaryDataStoreDao.update(pool.getId(), pool); + this.storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getCapacityBytes() - pool.getAvailableBytes()); + + return true; + } + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java index ffe7efdcda7..5e8727a316a 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/DefaultPrimaryDataStoreLifeCycleImpl.java @@ -26,22 +26,22 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; import org.apache.cloudstack.storage.command.CreatePrimaryDataStoreCmd; -import org.apache.cloudstack.storage.datastore.DataStoreStatus; -import org.apache.cloudstack.storage.datastore.PrimaryDataStore; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.endpoint.EndPointSelector; -import org.apache.cloudstack.storage.image.datastore.ImageDataStoreHelper; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import com.cloud.agent.api.StoragePoolInfo; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.StoragePoolStatus; public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle { @Inject @@ -58,9 +58,9 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif } @Override - public DataStore initialize(Map dsInfos) { + public DataStore initialize(Map dsInfos) { - PrimaryDataStoreVO storeVO = primaryStoreHelper.createPrimaryDataStore(dsInfos); + StoragePoolVO storeVO = primaryStoreHelper.createPrimaryDataStore(dsInfos); return providerMgr.getPrimaryDataStore(storeVO.getId()); } @@ -83,11 +83,11 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif @Override public boolean attachCluster(DataStore dataStore, ClusterScope scope) { - PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(dataStore.getId()); + StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStore.getId()); dataStoreVO.setDataCenterId(scope.getZoneId()); dataStoreVO.setPodId(scope.getPodId()); dataStoreVO.setClusterId(scope.getScopeId()); - dataStoreVO.setStatus(DataStoreStatus.Attaching); + dataStoreVO.setStatus(StoragePoolStatus.Attaching); dataStoreVO.setScope(scope.getScopeType()); dataStoreDao.update(dataStoreVO.getId(), dataStoreVO); @@ -95,7 +95,7 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif attachCluster(dataStore); dataStoreVO = dataStoreDao.findById(dataStore.getId()); - dataStoreVO.setStatus(DataStoreStatus.Up); + dataStoreVO.setStatus(StoragePoolStatus.Up); dataStoreDao.update(dataStoreVO.getId(), dataStoreVO); return true; @@ -114,19 +114,19 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif } @Override - public boolean maintain() { + public boolean maintain(long storeId) { // TODO Auto-generated method stub return false; } @Override - public boolean cancelMaintain() { + public boolean cancelMaintain(long storeId) { // TODO Auto-generated method stub return false; } @Override - public boolean deleteDataStore() { + public boolean deleteDataStore(long storeId) { // TODO Auto-generated method stub return false; } @@ -139,4 +139,11 @@ public class DefaultPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLif return false; } + @Override + public boolean attachHost(DataStore store, HostScope scope, + StoragePoolInfo existingInfo) { + // TODO Auto-generated method stub + return false; + } + } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java index 1a24d87346e..e181adabb5b 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/DefaultPrimaryDataStoreProviderManagerImpl.java @@ -21,20 +21,22 @@ package org.apache.cloudstack.storage.datastore.manager; import java.util.HashMap; import java.util.Map; +import javax.annotation.PostConstruct; import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.storage.datastore.DefaultPrimaryDataStore; import org.apache.cloudstack.storage.datastore.PrimaryDataStore; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; import org.apache.cloudstack.storage.datastore.db.DataStoreProviderDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreVO; -import org.apache.cloudstack.storage.datastore.provider.DataStoreProvider; -import org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManager; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.springframework.stereotype.Component; -import com.cloud.utils.component.ComponentContext; +import com.cloud.storage.StorageManager; @Component public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataStoreProviderManager { @@ -44,16 +46,19 @@ public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataSt DataStoreProviderManager providerManager; @Inject PrimaryDataStoreDao dataStoreDao; - Map driverMaps = new HashMap(); + Map driverMaps; + @Inject StorageManager storageMgr; + @PostConstruct + public void config() { + driverMaps = new HashMap(); + } + @Override public PrimaryDataStore getPrimaryDataStore(long dataStoreId) { - PrimaryDataStoreVO dataStoreVO = dataStoreDao.findById(dataStoreId); + StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStoreId); long providerId = dataStoreVO.getStorageProviderId(); DataStoreProvider provider = providerManager.getDataStoreProviderById(providerId); - /*DefaultPrimaryDataStore dataStore = DefaultPrimaryDataStore.createDataStore(dataStoreVO, - driverMaps.get(provider.getUuid()), - provider);*/ DefaultPrimaryDataStore dataStore = DefaultPrimaryDataStore.createDataStore(dataStoreVO, driverMaps.get(provider.getUuid()), provider); return dataStore; } @@ -66,4 +71,15 @@ public class DefaultPrimaryDataStoreProviderManagerImpl implements PrimaryDataSt driverMaps.put(uuid, driver); return true; } + + @Override + public PrimaryDataStore getPrimaryDataStore(String uuid) { + StoragePoolVO dataStoreVO = dataStoreDao.findByUuid(uuid); + return getPrimaryDataStore(dataStoreVO.getId()); + } + + @Override + public boolean registerHostListener(String uuid, HypervisorHostListener listener) { + return storageMgr.registerHostListener(uuid, listener); + } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls index 9386454efb3..8d7a696957d 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/data model.ucls @@ -1,20 +1,20 @@ @@ -36,18 +36,18 @@ - + - + diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java new file mode 100644 index 00000000000..09e78e45659 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.provider; + +import java.util.Map; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; +import org.apache.cloudstack.storage.datastore.driver.AncientPrimaryDataStoreDriverImpl; +import org.apache.cloudstack.storage.datastore.lifecycle.AncientPrimaryDataStoreLifeCycleImpl; +import org.springframework.stereotype.Component; + +import com.cloud.utils.component.ComponentContext; + +@Component +public class AncientPrimaryDataStoreProviderImpl implements + PrimaryDataStoreProvider { + + private final String providerName = "ancient primary data store provider"; + protected PrimaryDataStoreDriver driver; + @Inject + PrimaryDataStoreProviderManager storeMgr; + protected DataStoreLifeCycle lifecyle; + protected String uuid; + protected long id; + @Override + public String getName() { + return providerName; + } + + @Override + public DataStoreLifeCycle getLifeCycle() { + return this.lifecyle; + } + + @Override + public boolean configure(Map params) { + lifecyle = ComponentContext.inject(AncientPrimaryDataStoreLifeCycleImpl.class); + driver = ComponentContext.inject(AncientPrimaryDataStoreDriverImpl.class); + uuid = (String)params.get("uuid"); + id = (Long)params.get("id"); + storeMgr.registerDriver(uuid, this.driver); + HypervisorHostListener listener = ComponentContext.inject(DefaultHostListener.class); + storeMgr.registerHostListener(uuid, listener); + return true; + } + + @Override + public String getUuid() { + return this.uuid; + } + + @Override + public long getId() { + return this.id; + } + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java new file mode 100644 index 00000000000..f2cb1c45c82 --- /dev/null +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.provider; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.ModifyStoragePoolAnswer; +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.alert.AlertManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.exception.CloudRuntimeException; + +public class DefaultHostListener implements HypervisorHostListener { + private static final Logger s_logger = Logger + .getLogger(DefaultHostListener.class); + @Inject AgentManager agentMgr; + @Inject DataStoreManager dataStoreMgr; + @Inject AlertManager alertMgr; + @Inject StoragePoolHostDao storagePoolHostDao; + @Inject PrimaryDataStoreDao primaryStoreDao; + @Override + public boolean hostConnect(long hostId, long poolId) { + StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); + final Answer answer = agentMgr.easySend(hostId, cmd); + + if (answer == null) { + throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command" + pool.getId()); + } + + if (!answer.getResult()) { + String msg = "Add host failed due to ModifyStoragePoolCommand failed" + answer.getDetails(); + alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg); + throw new CloudRuntimeException("Unable establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails() + pool.getId()); + } + + assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + pool.getId() + "Host=" + hostId; + ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer; + + StoragePoolHostVO poolHost = storagePoolHostDao.findByPoolHost(pool.getId(), hostId); + if (poolHost == null) { + poolHost = new StoragePoolHostVO(pool.getId(), hostId, mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/")); + storagePoolHostDao.persist(poolHost); + } else { + poolHost.setLocalPath(mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/")); + } + + StoragePoolVO poolVO = this.primaryStoreDao.findById(poolId); + poolVO.setAvailableBytes(mspAnswer.getPoolInfo().getAvailableBytes()); + poolVO.setCapacityBytes(mspAnswer.getPoolInfo().getCapacityBytes()); + primaryStoreDao.update(pool.getId(), poolVO); + + s_logger.info("Connection established between " + pool + " host + " + hostId); + return true; + } + + @Override + public boolean hostDisconnected(long hostId, long poolId) { + // TODO Auto-generated method stub + return false; + } + +} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java index 540ea6381fa..a1402c13b3d 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/DefaultPrimaryDatastoreProviderImpl.java @@ -21,10 +21,11 @@ import java.util.Map; import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; import org.apache.cloudstack.storage.datastore.driver.DefaultPrimaryDataStoreDriverImpl; import org.apache.cloudstack.storage.datastore.lifecycle.DefaultPrimaryDataStoreLifeCycleImpl; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; import org.springframework.stereotype.Component; import com.cloud.utils.component.ComponentContext; @@ -35,6 +36,7 @@ public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProv protected PrimaryDataStoreDriver driver; @Inject PrimaryDataStoreProviderManager storeMgr; + protected DataStoreLifeCycle lifecyle; protected String uuid; protected long id; @@ -52,9 +54,11 @@ public class DefaultPrimaryDatastoreProviderImpl implements PrimaryDataStoreProv public boolean configure(Map params) { lifecyle = ComponentContext.inject(DefaultPrimaryDataStoreLifeCycleImpl.class); driver = ComponentContext.inject(DefaultPrimaryDataStoreDriverImpl.class); + HypervisorHostListener listener = ComponentContext.inject(DefaultHostListener.class); uuid = (String)params.get("uuid"); id = (Long)params.get("id"); storeMgr.registerDriver(uuid, this.driver); + storeMgr.registerHostListener(uuid, listener); return true; } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategy.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategy.java index 7679bb3e729..99b34cbcf18 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategy.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategy.java @@ -18,9 +18,9 @@ */ package org.apache.cloudstack.storage.volume; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.datastore.PrimaryDataStore; -import org.apache.cloudstack.storage.image.TemplateInfo; import org.apache.cloudstack.storage.volume.VolumeServiceImpl.CreateBaseImageResult; public interface TemplateInstallStrategy { diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategyImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategyImpl.java index 80e098d769a..5f1735c180a 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategyImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/TemplateInstallStrategyImpl.java @@ -20,24 +20,16 @@ package org.apache.cloudstack.storage.volume; import javax.inject.Inject; -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; -import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.framework.async.AsyncRpcConext; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; import org.apache.cloudstack.storage.datastore.PrimaryDataStore; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.image.ImageDataFactory; -import org.apache.cloudstack.storage.image.TemplateInfo; import org.apache.cloudstack.storage.motion.DataMotionService; import org.apache.cloudstack.storage.volume.VolumeServiceImpl.CreateBaseImageResult; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.fsm.NoTransitionException; - @Component public class TemplateInstallStrategyImpl implements TemplateInstallStrategy { private static final Logger s_logger = Logger @@ -50,7 +42,7 @@ public class TemplateInstallStrategyImpl implements TemplateInstallStrategy { ImageDataFactory imageFactory; protected long waitingTime = 1800; // half an hour protected long waitingRetries = 10; - +/* protected TemplateInfo waitingForTemplateDownload(TemplateInfo template, PrimaryDataStore dataStore) { long retries = this.waitingRetries; @@ -106,8 +98,8 @@ public class TemplateInstallStrategyImpl implements TemplateInstallStrategy { boolean freshNewTemplate = false; if (obj == null) { try { - /*templateOnPrimaryStoreObj = objectInDataStoreMgr.create( - template, store);*/ + templateOnPrimaryStoreObj = objectInDataStoreMgr.create( + template, store); freshNewTemplate = true; } catch (Throwable e) { obj = objectInDataStoreMgr.findObject(template.getId(), @@ -264,13 +256,10 @@ public class TemplateInstallStrategyImpl implements TemplateInstallStrategy { res.setResult(result.getResult()); context.getParentCallback().complete(res); } - ObjectInDataStoreVO obj = objectInDataStoreMgr.findObject( - templateOnPrimaryStoreObj.getId(), templateOnPrimaryStoreObj - .getType(), templateOnPrimaryStoreObj.getDataStore() - .getId(), templateOnPrimaryStoreObj.getDataStore() - .getRole()); + DataObjectInStore obj = objectInDataStoreMgr.findObject( + templateOnPrimaryStoreObj, templateOnPrimaryStoreObj.getDataStore()); + - obj.setInstallPath(result.getPath()); CreateBaseImageResult res = new CreateBaseImageResult( templateOnPrimaryStoreObj); try { @@ -289,6 +278,12 @@ public class TemplateInstallStrategyImpl implements TemplateInstallStrategy { } context.getParentCallback().complete(res); return null; + }*/ + @Override + public Void installAsync(TemplateInfo template, PrimaryDataStore store, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + return null; } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java index 64af097bb32..e0ecd165d7f 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java @@ -20,21 +20,23 @@ package org.apache.cloudstack.storage.volume; import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.storage.datastore.DataStoreManager; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.datastore.VolumeDataFactory; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.volume.db.VolumeDao2; -import org.apache.cloudstack.storage.volume.db.VolumeVO; import org.springframework.stereotype.Component; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; + @Component public class VolumeDataFactoryImpl implements VolumeDataFactory { @Inject - VolumeDao2 volumeDao; + VolumeDao volumeDao; @Inject ObjectInDataStoreManager objMap; @Inject @@ -42,12 +44,30 @@ public class VolumeDataFactoryImpl implements VolumeDataFactory { @Override public VolumeInfo getVolume(long volumeId, DataStore store) { VolumeVO volumeVO = volumeDao.findById(volumeId); - ObjectInDataStoreVO obj = objMap.findObject(volumeId, DataObjectType.VOLUME, store.getId(), store.getRole()); - if (obj == null) { - VolumeObject vol = VolumeObject.getVolumeObject(null, volumeVO); - return vol; - } + VolumeObject vol = VolumeObject.getVolumeObject(store, volumeVO); + + return vol; + } + + @Override + public VolumeInfo getVolume(long volumeId) { + VolumeVO volumeVO = volumeDao.findById(volumeId); + VolumeObject vol = null; + if (volumeVO.getPoolId() == null) { + DataStore store = objMap.findStore(volumeVO.getUuid(), DataObjectType.VOLUME, DataStoreRole.Image); + vol = VolumeObject.getVolumeObject(store, volumeVO); + } else { + DataStore store = this.storeMgr.getDataStore(volumeVO.getPoolId(), DataStoreRole.Primary); + vol = VolumeObject.getVolumeObject(store, volumeVO); + } + return vol; + } + + @Override + public VolumeInfo getVolume(DataObject volume, DataStore store) { + VolumeInfo vol = (VolumeObject)getVolume(volume.getId(), store); + vol.addPayload(((VolumeInfo)volume).getpayload()); return vol; } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java index 14d741707b5..d3e8c543b54 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeEntityImpl.java @@ -22,27 +22,24 @@ import java.lang.reflect.Method; import java.util.Date; import java.util.List; import java.util.Map; -import java.util.concurrent.ExecutionException; import org.apache.cloudstack.engine.cloud.entity.api.SnapshotEntity; import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; import org.apache.cloudstack.engine.datacenter.entity.api.StorageEntity; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult; import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; import org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeType; -import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreEntityImpl; -import org.apache.cloudstack.storage.volume.VolumeService.VolumeApiResult; - -import com.cloud.utils.exception.CloudRuntimeException; public class VolumeEntityImpl implements VolumeEntity { private VolumeInfo volumeInfo; private final VolumeService vs; private VolumeApiResult result; - protected VolumeEntityImpl() { + public VolumeEntityImpl() { this.vs = null; } @@ -167,7 +164,7 @@ public class VolumeEntityImpl implements VolumeEntity { @Override public void destroy() { - AsyncCallFuture future = vs.deleteVolumeAsync(volumeInfo); + /*AsyncCallFuture future = vs.deleteVolumeAsync(volumeInfo); try { result = future.get(); if (!result.isSuccess()) { @@ -177,7 +174,7 @@ public class VolumeEntityImpl implements VolumeEntity { throw new CloudRuntimeException("wait to delete volume info failed", e); } catch (ExecutionException e) { throw new CloudRuntimeException("wait to delete volume failed", e); - } + }*/ } @Override diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManagerImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManagerImpl.java deleted file mode 100644 index bcff312626f..00000000000 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeManagerImpl.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.cloudstack.storage.volume; - -import javax.inject.Inject; - -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeProfile; -import org.apache.cloudstack.storage.volume.db.VolumeDao2; -import org.apache.cloudstack.storage.volume.db.VolumeVO; -import org.springframework.stereotype.Component; - -import com.cloud.storage.Volume; -import com.cloud.storage.Volume.Event; -import com.cloud.storage.Volume.State; -import com.cloud.utils.fsm.NoTransitionException; -import com.cloud.utils.fsm.StateMachine2; - -@Component -public class VolumeManagerImpl implements VolumeManager { - @Inject - protected VolumeDao2 _volumeDao; - private final StateMachine2 s_fsm = new StateMachine2(); - public VolumeManagerImpl() { - initStateMachine(); - } - - @Override - public VolumeVO allocateDuplicateVolume(VolumeVO oldVol) { - /* - VolumeVO newVol = new VolumeVO(oldVol.getVolumeType(), oldVol.getName(), oldVol.getDataCenterId(), oldVol.getDomainId(), oldVol.getAccountId(), oldVol.getDiskOfferingId(), oldVol.getSize()); - newVol.setTemplateId(oldVol.getTemplateId()); - newVol.setDeviceId(oldVol.getDeviceId()); - newVol.setInstanceId(oldVol.getInstanceId()); - newVol.setRecreatable(oldVol.isRecreatable()); - newVol.setReservationId(oldVol.getReservationId()); - */ - return null; - // return _volumeDao.persist(newVol); - } - - private void initStateMachine() { - s_fsm.addTransition(Volume.State.Allocated, Event.CreateRequested, Volume.State.Creating); - s_fsm.addTransition(Volume.State.Allocated, Event.DestroyRequested, Volume.State.Destroying); - s_fsm.addTransition(Volume.State.Creating, Event.OperationRetry, Volume.State.Creating); - s_fsm.addTransition(Volume.State.Creating, Event.OperationFailed, Volume.State.Allocated); - s_fsm.addTransition(Volume.State.Creating, Event.OperationSucceeded, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Creating, Event.DestroyRequested, Volume.State.Destroying); - s_fsm.addTransition(Volume.State.Creating, Event.CreateRequested, Volume.State.Creating); - s_fsm.addTransition(Volume.State.Allocated, Event.UploadRequested, Volume.State.UploadOp); - s_fsm.addTransition(Volume.State.UploadOp, Event.CopyRequested, Volume.State.Creating);// CopyRequested for volume from sec to primary storage - s_fsm.addTransition(Volume.State.Creating, Event.CopySucceeded, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Creating, Event.CopyFailed, Volume.State.UploadOp);// Copying volume from sec to primary failed. - s_fsm.addTransition(Volume.State.UploadOp, Event.DestroyRequested, Volume.State.Destroying); - s_fsm.addTransition(Volume.State.Ready, Event.DestroyRequested, Volume.State.Destroying); - s_fsm.addTransition(Volume.State.Destroy, Event.ExpungingRequested, Volume.State.Expunging); - s_fsm.addTransition(Volume.State.Ready, Event.SnapshotRequested, Volume.State.Snapshotting); - s_fsm.addTransition(Volume.State.Snapshotting, Event.OperationSucceeded, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Snapshotting, Event.OperationFailed, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Ready, Event.MigrationRequested, Volume.State.Migrating); - s_fsm.addTransition(Volume.State.Migrating, Event.OperationSucceeded, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Migrating, Event.OperationFailed, Volume.State.Ready); - s_fsm.addTransition(Volume.State.Destroy, Event.OperationSucceeded, Volume.State.Destroy); - s_fsm.addTransition(Volume.State.Destroying, Event.OperationSucceeded, Volume.State.Destroy); - s_fsm.addTransition(Volume.State.Destroying, Event.OperationFailed, Volume.State.Destroying); - s_fsm.addTransition(Volume.State.Destroying, Event.DestroyRequested, Volume.State.Destroying); - } - - @Override - public StateMachine2 getStateMachine() { - return s_fsm; - } - - @Override - public VolumeVO processEvent(Volume vol, Volume.Event event) throws NoTransitionException { - // _volStateMachine.transitTo(vol, event, null, _volumeDao); - return _volumeDao.findById(vol.getId()); - } - - @Override - public VolumeProfile getProfile(long volumeId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public VolumeVO getVolume(long volumeId) { - // TODO Auto-generated method stub - return null; - } - - @Override - public VolumeVO updateVolume(VolumeVO volume) { - // TODO Auto-generated method stub - return null; - } -} diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java index 9e04909135e..ceadb253976 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -16,19 +16,24 @@ // under the License. package org.apache.cloudstack.storage.volume; +import java.util.Date; + import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.disktype.DiskFormat; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.db.ObjectInDataStoreVO; -import org.apache.cloudstack.storage.volume.db.VolumeDao2; -import org.apache.cloudstack.storage.volume.db.VolumeVO; import org.apache.log4j.Logger; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; @@ -38,17 +43,16 @@ import com.cloud.utils.storage.encoding.EncodingType; public class VolumeObject implements VolumeInfo { private static final Logger s_logger = Logger.getLogger(VolumeObject.class); protected VolumeVO volumeVO; - private StateMachine2 _volStateMachine; + private StateMachine2 _volStateMachine; protected DataStore dataStore; @Inject - VolumeDao2 volumeDao; - @Inject - VolumeManager volumeMgr; + VolumeDao volumeDao; @Inject ObjectInDataStoreManager ojbectInStoreMgr; + private Object payload; - protected VolumeObject() { - + public VolumeObject() { + _volStateMachine = Volume.State.getStateMachine(); } protected void configure(DataStore dataStore, VolumeVO volumeVO) { @@ -70,6 +74,10 @@ public class VolumeObject implements VolumeInfo { public void setPath(String uuid) { volumeVO.setPath(uuid); } + + public void setSize(Long size) { + volumeVO.setSize(size); + } public Volume.State getState() { return volumeVO.getState(); @@ -88,12 +96,11 @@ public class VolumeObject implements VolumeInfo { public long getVolumeId() { return volumeVO.getId(); } - public boolean stateTransit(Volume.Event event) { boolean result = false; - _volStateMachine = volumeMgr.getStateMachine(); try { result = _volStateMachine.transitTo(volumeVO, event, null, volumeDao); + volumeVO = volumeDao.findById(volumeVO.getId()); } catch (NoTransitionException e) { String errorMessage = "Failed to transit volume: " + this.getVolumeId() + ", due to: " + e.toString(); s_logger.debug(errorMessage); @@ -122,7 +129,7 @@ public class VolumeObject implements VolumeInfo { if (this.dataStore == null) { throw new CloudRuntimeException("datastore must be set before using this object"); } - ObjectInDataStoreVO obj = ojbectInStoreMgr.findObject(this.volumeVO.getId(), DataObjectType.VOLUME, this.dataStore.getId(), this.dataStore.getRole()); + DataObjectInStore obj = ojbectInStoreMgr.findObject(this.volumeVO.getUuid(), DataObjectType.VOLUME, this.dataStore.getUuid(), this.dataStore.getRole()); if (obj.getState() != ObjectInDataStoreStateMachine.State.Ready) { return this.dataStore.getUri() + "&" + EncodingType.OBJTYPE + "=" + DataObjectType.VOLUME + @@ -145,4 +152,179 @@ public class VolumeObject implements VolumeInfo { // TODO Auto-generated method stub return null; } + + @Override + public void processEvent( + ObjectInDataStoreStateMachine.Event event) { + if (this.dataStore == null) { + return; + } + try { + Volume.Event volEvent = null; + if (this.dataStore.getRole() == DataStoreRole.Image) { + ojbectInStoreMgr.update(this, event); + if (event == ObjectInDataStoreStateMachine.Event.CreateRequested) { + volEvent = Volume.Event.UploadRequested; + } else if (event == ObjectInDataStoreStateMachine.Event.OperationSuccessed) { + volEvent = Volume.Event.CopySucceeded; + } else if (event == ObjectInDataStoreStateMachine.Event.OperationFailed) { + volEvent = Volume.Event.CopyFailed; + } + } else { + if (event == ObjectInDataStoreStateMachine.Event.CreateRequested || + event == ObjectInDataStoreStateMachine.Event.CreateOnlyRequested) { + volEvent = Volume.Event.CreateRequested; + } else if (event == ObjectInDataStoreStateMachine.Event.CopyingRequested) { + volEvent = Volume.Event.CopyRequested; + } + } + + if (event == ObjectInDataStoreStateMachine.Event.DestroyRequested) { + volEvent = Volume.Event.DestroyRequested; + } else if (event == ObjectInDataStoreStateMachine.Event.ExpungeRequested) { + volEvent = Volume.Event.ExpungingRequested; + } else if (event == ObjectInDataStoreStateMachine.Event.OperationSuccessed) { + volEvent = Volume.Event.OperationSucceeded; + } else if (event == ObjectInDataStoreStateMachine.Event.OperationFailed) { + volEvent = Volume.Event.OperationFailed; + } else if (event == ObjectInDataStoreStateMachine.Event.ResizeRequested) { + volEvent = Volume.Event.ResizeRequested; + } + this.stateTransit(volEvent); + } catch (Exception e) { + s_logger.debug("Failed to update state", e); + throw new CloudRuntimeException("Failed to update state:" + e.toString()); + } + + } + + @Override + public String getName() { + return this.volumeVO.getName(); + } + + @Override + public Long getInstanceId() { + return this.volumeVO.getInstanceId(); + } + + @Override + public String getFolder() { + return this.volumeVO.getFolder(); + } + + @Override + public String getPath() { + return this.volumeVO.getPath(); + } + + @Override + public Long getPodId() { + return this.volumeVO.getPodId(); + } + + @Override + public long getDataCenterId() { + return this.volumeVO.getDataCenterId(); + } + + @Override + public Type getVolumeType() { + return this.volumeVO.getVolumeType(); + } + + @Override + public Long getPoolId() { + return this.volumeVO.getPoolId(); + } + + @Override + public Date getAttached() { + return this.volumeVO.getAttached(); + } + + @Override + public Long getDeviceId() { + return this.volumeVO.getDeviceId(); + } + + @Override + public Date getCreated() { + return this.volumeVO.getCreated(); + } + + @Override + public long getDiskOfferingId() { + return this.volumeVO.getDiskOfferingId(); + } + + @Override + public String getChainInfo() { + return this.volumeVO.getChainInfo(); + } + + @Override + public boolean isRecreatable() { + return this.volumeVO.isRecreatable(); + } + + @Override + public long getUpdatedCount() { + return this.volumeVO.getUpdatedCount(); + } + + @Override + public void incrUpdatedCount() { + this.volumeVO.incrUpdatedCount(); + } + + @Override + public Date getUpdated() { + return this.volumeVO.getUpdated(); + } + + @Override + public String getReservationId() { + return this.volumeVO.getReservationId(); + } + + @Override + public void setReservationId(String reserv) { + this.volumeVO.setReservationId(reserv); + } + + @Override + public long getAccountId() { + return this.volumeVO.getAccountId(); + } + + @Override + public long getDomainId() { + return this.volumeVO.getDomainId(); + } + + @Override + public Long getTemplateId() { + return this.volumeVO.getTemplateId(); + } + + @Override + public void addPayload(Object data) { + this.payload = data; + } + + @Override + public Object getpayload() { + return this.payload; + } + + @Override + public HypervisorType getHypervisorType() { + return this.volumeDao.getHypervisorType(this.volumeVO.getId()); + } + + @Override + public Long getLastPoolId() { + return this.volumeVO.getLastPoolId(); + } } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 8cfbae455e7..32e7d274f01 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -22,12 +22,16 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.type.VolumeType; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; @@ -36,22 +40,28 @@ import org.apache.cloudstack.storage.datastore.DataObjectManager; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; import org.apache.cloudstack.storage.datastore.PrimaryDataStore; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; -import org.apache.cloudstack.storage.image.TemplateInfo; -import org.apache.cloudstack.storage.image.motion.ImageMotionService; -import org.apache.cloudstack.storage.volume.db.VolumeDao2; -import org.apache.cloudstack.storage.volume.db.VolumeVO; +import org.apache.cloudstack.storage.motion.DataMotionService; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.storage.StoragePool; import com.cloud.storage.Volume; +import com.cloud.storage.Volume.Type; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.utils.db.DB; - -//1. change volume state -//2. orchestrator of volume, control most of the information of volume, storage pool id, voluem state, scope etc. +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.VMInstanceDao; @Component public class VolumeServiceImpl implements VolumeService { + private static final Logger s_logger = Logger + .getLogger(VolumeServiceImpl.class); @Inject - VolumeDao2 volDao; + VolumeDao volDao; @Inject PrimaryDataStoreProviderManager dataStoreMgr; @Inject @@ -59,27 +69,31 @@ public class VolumeServiceImpl implements VolumeService { @Inject DataObjectManager dataObjectMgr; @Inject - ImageMotionService imageMotion; + DataMotionService motionSrv; @Inject TemplateInstallStrategy templateInstallStrategy; + @Inject + VolumeDataFactory volFactory; + @Inject SnapshotManager snapshotMgr; + @Inject VMInstanceDao vmDao; public VolumeServiceImpl() { } private class CreateVolumeContext extends AsyncRpcConext { - private VolumeObject volume; + private DataObject volume; private AsyncCallFuture future; /** * @param callback */ - public CreateVolumeContext(AsyncCompletionCallback callback, VolumeObject volume, AsyncCallFuture future) { + public CreateVolumeContext(AsyncCompletionCallback callback, DataObject volume, AsyncCallFuture future) { super(callback); this.volume = volume; this.future = future; } - public VolumeObject getVolume() { + public DataObject getVolume() { return this.volume; } @@ -89,49 +103,35 @@ public class VolumeServiceImpl implements VolumeService { } - - @Override - public AsyncCallFuture createVolumeAsync(VolumeInfo volume, long dataStoreId) { - PrimaryDataStore dataStore = dataStoreMgr.getPrimaryDataStore(dataStoreId); + public AsyncCallFuture createVolumeAsync(VolumeInfo volume, DataStore dataStore) { AsyncCallFuture future = new AsyncCallFuture(); - VolumeApiResult result = new VolumeApiResult(volume); - - if (dataStore == null) { - result.setResult("Can't find dataStoreId: " + dataStoreId); - future.complete(result); - return future; - } + DataObject volumeOnStore = dataStore.create(volume); + volumeOnStore.processEvent(Event.CreateOnlyRequested); - if (dataStore.exists(volume)) { - result.setResult("Volume: " + volume.getId() + " already exists on primary data store: " + dataStoreId); - future.complete(result); - return future; - } - - VolumeObject vo = (VolumeObject) volume; - vo.stateTransit(Volume.Event.CreateRequested); - - CreateVolumeContext context = new CreateVolumeContext(null, vo, future); + CreateVolumeContext context = new CreateVolumeContext(null, volumeOnStore, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().createVolumeCallback(null, null)) .setContext(context); - dataObjectMgr.createAsync(volume, dataStore, caller, true); + dataStore.getDriver().createAsync(volumeOnStore, caller); return future; } protected Void createVolumeCallback(AsyncCallbackDispatcher callback, CreateVolumeContext context) { CreateCmdResult result = callback.getResult(); - VolumeObject vo = context.getVolume(); - VolumeApiResult volResult = new VolumeApiResult(vo); + DataObject vo = context.getVolume(); + String errMsg = null; if (result.isSuccess()) { - vo.stateTransit(Volume.Event.OperationSucceeded); + vo.processEvent(Event.OperationSuccessed); } else { - vo.stateTransit(Volume.Event.OperationFailed); - volResult.setResult(result.getResult()); + vo.processEvent(Event.OperationFailed); + errMsg = result.getResult(); + } + VolumeApiResult volResult = new VolumeApiResult((VolumeObject)vo); + if (errMsg != null) { + volResult.setResult(errMsg); } - context.getFuture().complete(volResult); return null; } @@ -159,26 +159,47 @@ public class VolumeServiceImpl implements VolumeService { @DB @Override - public AsyncCallFuture deleteVolumeAsync(VolumeInfo volume) { - VolumeObject vo = (VolumeObject)volume; + public AsyncCallFuture expungeVolumeAsync(VolumeInfo volume) { AsyncCallFuture future = new AsyncCallFuture(); VolumeApiResult result = new VolumeApiResult(volume); - - DataStore dataStore = vo.getDataStore(); - vo.stateTransit(Volume.Event.DestroyRequested); - if (dataStore == null) { - vo.stateTransit(Volume.Event.OperationSucceeded); - volDao.remove(vo.getId()); + if (volume.getDataStore() == null) { + this.volDao.remove(volume.getId()); future.complete(result); return future; } + String vmName = null; + VolumeVO vol = this.volDao.findById(volume.getId()); + if (vol.getVolumeType() == Type.ROOT && vol.getInstanceId() != null) { + VirtualMachine vm = vmDao.findByIdIncludingRemoved(vol + .getInstanceId()); + if (vm != null) { + vmName = vm.getInstanceName(); + } + } + + String volumePath = vol.getPath(); + Long poolId = vol.getPoolId(); + if (poolId == null || volumePath == null || volumePath.trim().isEmpty()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Marking volume that was never created as destroyed: " + + vol); + } + this.volDao.remove(vol.getId()); + future.complete(result); + return future; + } + VolumeObject vo = (VolumeObject)volume; + + volume.processEvent(Event.ExpungeRequested); + + DeleteVolumeContext context = new DeleteVolumeContext(null, vo, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().deleteVolumeCallback(null, null)) .setContext(context); - dataObjectMgr.deleteAsync(volume, caller); + volume.getDataStore().getDriver().deleteAsync(volume, caller); return future; } @@ -187,10 +208,10 @@ public class VolumeServiceImpl implements VolumeService { VolumeObject vo = context.getVolume(); VolumeApiResult apiResult = new VolumeApiResult(vo); if (result.isSuccess()) { - vo.stateTransit(Volume.Event.OperationSucceeded); + vo.processEvent(Event.OperationSuccessed); volDao.remove(vo.getId()); } else { - vo.stateTransit(Volume.Event.OperationFailed); + vo.processEvent(Event.OperationFailed); apiResult.setResult(result.getResult()); } context.getFuture().complete(apiResult); @@ -203,24 +224,6 @@ public class VolumeServiceImpl implements VolumeService { return false; } - @Override - public boolean createVolumeFromSnapshot(long volumeId, long snapshotId) { - // TODO Auto-generated method stub - return false; - } - - @Override - public boolean rokeAccess(long volumeId, long endpointId) { - // TODO Auto-generated method stub - return false; - } - - @Override - public VolumeEntity allocateVolumeInDb(long size, VolumeType type, String volName, Long templateId) { - VolumeVO vo = volDao.allocVolume(size, type, volName, templateId); - return new VolumeEntityImpl(VolumeObject.getVolumeObject(null, vo), this); - } - @Override public VolumeEntity getVolumeEntity(long volumeId) { VolumeVO vo = volDao.findById(volumeId); @@ -236,25 +239,21 @@ public class VolumeServiceImpl implements VolumeService { } } - @Override - public String grantAccess(VolumeInfo volume, EndPoint endpointId) { - // TODO Auto-generated method stub - return null; - } - class CreateBaseImageContext extends AsyncRpcConext { private final VolumeInfo volume; private final PrimaryDataStore dataStore; private final TemplateInfo srcTemplate; private final AsyncCallFuture future; + final DataObject destObj; public CreateBaseImageContext(AsyncCompletionCallback callback, VolumeInfo volume, PrimaryDataStore datastore, TemplateInfo srcTemplate, - AsyncCallFuture future) { + AsyncCallFuture future, DataObject destObj) { super(callback); this.volume = volume; this.dataStore = datastore; this.future = future; this.srcTemplate = srcTemplate; + this.destObj = destObj; } public VolumeInfo getVolume() { @@ -285,33 +284,45 @@ public class VolumeServiceImpl implements VolumeService { @DB protected void createBaseImageAsync(VolumeInfo volume, PrimaryDataStore dataStore, TemplateInfo template, AsyncCallFuture future) { + + DataObject templateOnPrimaryStoreObj = dataStore.create(template); CreateBaseImageContext context = new CreateBaseImageContext(null, volume, dataStore, template, - future); - - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + future, templateOnPrimaryStoreObj); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().copyBaseImageCallback(null, null)) .setContext(context); - DataObject templateOnPrimaryStoreObj = dataObjectMgr.createInternalStateOnly(template, dataStore); + + templateOnPrimaryStoreObj.processEvent(Event.CreateOnlyRequested); - dataObjectMgr.copyAsync(context.srcTemplate, templateOnPrimaryStoreObj, caller); + try { + motionSrv.copyAsync(template, templateOnPrimaryStoreObj, caller); + } catch (Exception e) { + s_logger.debug("failed to create template on storage", e); + templateOnPrimaryStoreObj.processEvent(Event.OperationFailed); + VolumeApiResult result = new VolumeApiResult(volume); + result.setResult(e.toString()); + caller.complete(result); + } return; } @DB - protected Void copyBaseImageCallback(AsyncCallbackDispatcher callback, CreateBaseImageContext context) { - CreateCmdResult result = callback.getResult(); + protected Void copyBaseImageCallback(AsyncCallbackDispatcher callback, CreateBaseImageContext context) { + CopyCommandResult result = callback.getResult(); VolumeApiResult res = new VolumeApiResult(context.getVolume()); AsyncCallFuture future = context.getFuture(); + DataObject templateOnPrimaryStoreObj = context.destObj; if (!result.isSuccess()) { + templateOnPrimaryStoreObj.processEvent(Event.OperationFailed); res.setResult(result.getResult()); future.complete(res); return null; } - DataObject templateOnPrimaryStoreObj = objectInDataStoreMgr.get(context.srcTemplate, context.dataStore); - + + templateOnPrimaryStoreObj.processEvent(Event.OperationSuccessed); createVolumeFromBaseImageAsync(context.volume, templateOnPrimaryStoreObj, context.dataStore, future); return null; } @@ -332,10 +343,7 @@ public class VolumeServiceImpl implements VolumeService { this.templateOnStore = templateOnStore; } - public VolumeObject getVolumeObject() { - return this.vo; - } - + public AsyncCallFuture getFuture() { return this.future; } @@ -343,39 +351,32 @@ public class VolumeServiceImpl implements VolumeService { @DB protected void createVolumeFromBaseImageAsync(VolumeInfo volume, DataObject templateOnPrimaryStore, PrimaryDataStore pd, AsyncCallFuture future) { - VolumeObject vo = (VolumeObject) volume; - try { - vo.stateTransit(Volume.Event.CreateRequested); - } catch (Exception e) { - VolumeApiResult result = new VolumeApiResult(volume); - result.setResult(e.toString()); - future.complete(result); - return; - } - + VolumeObject vo = (VolumeObject)volume; CreateVolumeFromBaseImageContext context = new CreateVolumeFromBaseImageContext(null, vo, pd, templateOnPrimaryStore, future); - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().copyBaseImageCallBack(null, null)) .setContext(context); - DataObject volumeOnPrimaryStorage = dataObjectMgr.createInternalStateOnly(volume, pd); - dataObjectMgr.copyAsync(context.templateOnStore, volumeOnPrimaryStorage, caller); + DataObject volumeOnPrimaryStorage = pd.create(volume); + volume.processEvent(Event.CreateOnlyRequested); + + motionSrv.copyAsync(context.templateOnStore, volumeOnPrimaryStorage, caller); return; } @DB - public Void copyBaseImageCallBack(AsyncCallbackDispatcher callback, CreateVolumeFromBaseImageContext context) { - VolumeObject vo = context.getVolumeObject(); - CreateCmdResult result = callback.getResult(); + public Void copyBaseImageCallBack(AsyncCallbackDispatcher callback, CreateVolumeFromBaseImageContext context) { + VolumeObject vo = context.vo; + CopyCommandResult result = callback.getResult(); VolumeApiResult volResult = new VolumeApiResult(vo); if (result.isSuccess()) { if (result.getPath() != null) { vo.setPath(result.getPath()); } - vo.stateTransit(Volume.Event.OperationSucceeded); + vo.processEvent(Event.OperationSuccessed); } else { - vo.stateTransit(Volume.Event.OperationFailed); + vo.processEvent(Event.OperationFailed); volResult.setResult(result.getResult()); } @@ -397,13 +398,252 @@ public class VolumeServiceImpl implements VolumeService { return future; } - createVolumeFromBaseImageAsync(volume, template, pd, future); + createVolumeFromBaseImageAsync(volume, templateOnPrimaryStore, pd, future); return future; } @Override - public TemplateOnPrimaryDataStoreInfo grantAccess(TemplateOnPrimaryDataStoreInfo template, EndPoint endPoint) { - // TODO Auto-generated method stub + @DB + public boolean destroyVolume(long volumeId) + throws ConcurrentOperationException { + + VolumeInfo vol = this.volFactory.getVolume(volumeId); + vol.processEvent(Event.DestroyRequested); + this.snapshotMgr.deletePoliciesForVolume(volumeId); + + vol.processEvent(Event.OperationSuccessed); + + return true; + } + + @Override + public AsyncCallFuture createVolumeFromSnapshot( + VolumeInfo volume, DataStore store, SnapshotInfo snapshot) { + AsyncCallFuture future = new AsyncCallFuture(); + + try { + DataObject volumeOnStore = store.create(volume); + volume.processEvent(Event.CreateOnlyRequested); + CreateVolumeFromBaseImageContext context = new CreateVolumeFromBaseImageContext(null, + (VolumeObject)volume, store, volumeOnStore, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().createVolumeFromSnapshotCallback(null, null)) + .setContext(context); + this.motionSrv.copyAsync(snapshot, volumeOnStore, caller); + } catch (Exception e) { + s_logger.debug("create volume from snapshot failed", e); + VolumeApiResult result = new VolumeApiResult(volume); + result.setResult(e.toString()); + future.complete(result); + } + + return future; + } + + protected Void createVolumeFromSnapshotCallback(AsyncCallbackDispatcher callback, + CreateVolumeFromBaseImageContext context) { + CopyCommandResult result = callback.getResult(); + VolumeInfo volume = context.vo; + VolumeApiResult apiResult = new VolumeApiResult(volume); + Event event = null; + if (result.isFailed()) { + apiResult.setResult(result.getResult()); + event = Event.OperationFailed; + } else { + event = Event.OperationSuccessed; + } + + try { + volume.processEvent(event); + } catch (Exception e) { + s_logger.debug("create volume from snapshot failed", e); + apiResult.setResult(e.toString()); + } + + AsyncCallFuture future = context.future; + future.complete(apiResult); + return null; + } + + protected VolumeVO duplicateVolumeOnAnotherStorage(Volume volume, StoragePool pool) { + Long lastPoolId = volume.getPoolId(); + VolumeVO newVol = new VolumeVO(volume); + newVol.setPoolId(pool.getId()); + newVol.setFolder(pool.getPath()); + newVol.setPodId(pool.getPodId()); + newVol.setPoolId(pool.getId()); + newVol.setLastPoolId(lastPoolId); + newVol.setPodId(pool.getPodId()); + return this.volDao.persist(newVol); + } + + + private class CopyVolumeContext extends AsyncRpcConext { + final VolumeInfo srcVolume; + final VolumeInfo destVolume; + final DataStore destStore; + final AsyncCallFuture future; + /** + * @param callback + */ + public CopyVolumeContext(AsyncCompletionCallback callback, AsyncCallFuture future, VolumeInfo srcVolume, VolumeInfo destVolume, + DataStore destStore) { + super(callback); + this.srcVolume = srcVolume; + this.destVolume = destVolume; + this.destStore = destStore; + this.future = future; + } + + } + @Override + public AsyncCallFuture copyVolume(VolumeInfo srcVolume, + DataStore destStore) { + AsyncCallFuture future = new AsyncCallFuture(); + VolumeApiResult res = new VolumeApiResult(srcVolume); + try { + if (!this.snapshotMgr.canOperateOnVolume(srcVolume)) { + s_logger.debug( + "There are snapshots creating on this volume, can not move this volume"); + + res.setResult("There are snapshots creating on this volume, can not move this volume"); + future.complete(res); + return future; + } + + VolumeVO destVol = duplicateVolumeOnAnotherStorage(srcVolume, (StoragePool)destStore); + VolumeInfo destVolume = this.volFactory.getVolume(destVol.getId(), destStore); + destVolume.processEvent(Event.CreateOnlyRequested); + srcVolume.processEvent(Event.CopyingRequested); + + CopyVolumeContext context = new CopyVolumeContext(null, future, srcVolume, + destVolume, + destStore); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().copyVolumeCallBack(null, null)) + .setContext(context); + this.motionSrv.copyAsync(srcVolume, destVolume, caller); + } catch (Exception e) { + s_logger.debug("Failed to copy volume", e); + res.setResult(e.toString()); + future.complete(res); + } + return future; + } + + protected Void copyVolumeCallBack(AsyncCallbackDispatcher callback, CopyVolumeContext context) { + VolumeInfo srcVolume = context.srcVolume; + VolumeInfo destVolume = context.destVolume; + CopyCommandResult result = callback.getResult(); + AsyncCallFuture future = context.future; + VolumeApiResult res = new VolumeApiResult(destVolume); + try { + if (result.isFailed()) { + res.setResult(result.getResult()); + destVolume.processEvent(Event.OperationFailed); + srcVolume.processEvent(Event.OperationFailed); + AsyncCallFuture destroyFuture = this.expungeVolumeAsync(destVolume); + destroyFuture.get(); + future.complete(res); + return null; + } + srcVolume.processEvent(Event.OperationSuccessed); + destVolume.processEvent(Event.OperationSuccessed); + AsyncCallFuture destroyFuture = this.expungeVolumeAsync(srcVolume); + destroyFuture.get(); + future.complete(res); + return null; + } catch (Exception e) { + s_logger.debug("Failed to process copy volume callback",e); + res.setResult(e.toString()); + future.complete(res); + } + return null; } + + @Override + public AsyncCallFuture registerVolume(VolumeInfo volume, DataStore store) { + + AsyncCallFuture future = new AsyncCallFuture(); + VolumeObject vo = (VolumeObject) volume; + + CreateVolumeContext context = new CreateVolumeContext(null, vo, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().registerVolumeCallback(null, null)) + .setContext(context); + + dataObjectMgr.createAsync(volume, store, caller, true); + return future; + } + + protected Void registerVolumeCallback(AsyncCallbackDispatcher callback, CreateVolumeContext context) { + CreateCmdResult result = callback.getResult(); + VolumeObject vo = (VolumeObject)context.volume; + /*if (result.isFailed()) { + vo.stateTransit(Volume.Event.OperationFailed); + } else { + vo.stateTransit(Volume.Event.OperationSucceeded); + }*/ + VolumeApiResult res = new VolumeApiResult(vo); + context.future.complete(res); + return null; + } + + + @Override + public AsyncCallFuture resize(VolumeInfo volume) { + AsyncCallFuture future = new AsyncCallFuture(); + VolumeApiResult result = new VolumeApiResult(volume); + try { + volume.processEvent(Event.ResizeRequested); + } catch (Exception e) { + s_logger.debug("Failed to change state to resize", e); + result.setResult(e.toString()); + future.complete(result); + return future; + } + CreateVolumeContext context = new CreateVolumeContext(null, volume, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().registerVolumeCallback(null, null)) + .setContext(context); + volume.getDataStore().getDriver().resize(volume, caller); + return future; + } + + protected Void resizeVolumeCallback(AsyncCallbackDispatcher callback, CreateVolumeContext context) { + CreateCmdResult result = callback.getResult(); + AsyncCallFuture future = context.future; + VolumeInfo volume = (VolumeInfo)context.volume; + + if (result.isFailed()) { + try { + volume.processEvent(Event.OperationFailed); + } catch (Exception e) { + s_logger.debug("Failed to change state", e); + } + VolumeApiResult res = new VolumeApiResult(volume); + res.setResult(result.getResult()); + future.complete(res); + return null; + } + + try { + volume.processEvent(Event.OperationSuccessed); + } catch(Exception e) { + s_logger.debug("Failed to change state", e); + VolumeApiResult res = new VolumeApiResult(volume); + res.setResult(result.getResult()); + future.complete(res); + return null; + } + + VolumeApiResult res = new VolumeApiResult(volume); + future.complete(res); + + return null; + } + + + } diff --git a/framework/api/pom.xml b/framework/api/pom.xml new file mode 100644 index 00000000000..5260ebc4bf6 --- /dev/null +++ b/framework/api/pom.xml @@ -0,0 +1,42 @@ + + + 4.0.0 + cloud-framework-api + + org.apache.cloudstack + cloudstack-framework + 4.2.0-SNAPSHOT + ../pom.xml + + + + + org.apache.cloudstack + cloud-utils + 4.2.0-SNAPSHOT + + + + + + install + src + ${project.basedir}/test + + + ${project.basedir}/test/resources + + + + diff --git a/framework/ipc/src/org/apache/cloudstack/framework/async/AsyncCallFuture.java b/framework/api/src/org/apache/cloudstack/framework/async/AsyncCallFuture.java similarity index 100% rename from framework/ipc/src/org/apache/cloudstack/framework/async/AsyncCallFuture.java rename to framework/api/src/org/apache/cloudstack/framework/async/AsyncCallFuture.java diff --git a/framework/ipc/src/org/apache/cloudstack/framework/async/AsyncCompletionCallback.java b/framework/api/src/org/apache/cloudstack/framework/async/AsyncCompletionCallback.java similarity index 100% rename from framework/ipc/src/org/apache/cloudstack/framework/async/AsyncCompletionCallback.java rename to framework/api/src/org/apache/cloudstack/framework/async/AsyncCompletionCallback.java diff --git a/framework/events/pom.xml b/framework/events/pom.xml index d21275a6744..7c788c35bbd 100644 --- a/framework/events/pom.xml +++ b/framework/events/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack-framework - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/framework/events/src/org/apache/cloudstack/framework/events/EventBus.java b/framework/events/src/org/apache/cloudstack/framework/events/EventBus.java index c16ee6f96f4..b83e3b28a7a 100644 --- a/framework/events/src/org/apache/cloudstack/framework/events/EventBus.java +++ b/framework/events/src/org/apache/cloudstack/framework/events/EventBus.java @@ -19,15 +19,13 @@ package org.apache.cloudstack.framework.events; -import com.cloud.utils.component.Adapter; - import java.util.UUID; /** * Interface to publish and subscribe to CloudStack events * */ -public interface EventBus extends Adapter{ +public interface EventBus { /** * publish an event on to the event bus diff --git a/framework/ipc/pom.xml b/framework/ipc/pom.xml index 6e01b7ec5d2..b7f4fcc78ce 100644 --- a/framework/ipc/pom.xml +++ b/framework/ipc/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-framework - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml @@ -25,13 +25,13 @@ org.apache.cloudstack cloud-core - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT org.apache.cloudstack cloud-utils - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT diff --git a/framework/jobs/pom.xml b/framework/jobs/pom.xml index 8b12f5d4bb5..56490216f16 100644 --- a/framework/jobs/pom.xml +++ b/framework/jobs/pom.xml @@ -26,4 +26,4 @@ quartz 2.1.6
- \ No newline at end of file + diff --git a/framework/pom.xml b/framework/pom.xml index dafc0eb5a2d..4633dab2b30 100644 --- a/framework/pom.xml +++ b/framework/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT install @@ -33,5 +33,6 @@ ipc rest events + api diff --git a/framework/rest/pom.xml b/framework/rest/pom.xml index e8322e03e59..a783bc8de2f 100644 --- a/framework/rest/pom.xml +++ b/framework/rest/pom.xml @@ -22,7 +22,7 @@ org.apache.cloudstack cloudstack-framework - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml cloud-framework-rest @@ -67,6 +67,10 @@ org.eclipse.jetty jetty-server + + org.apache.geronimo.specs + geronimo-servlet_3.0_spec +
diff --git a/packaging/centos63/cloud-agent.rc b/packaging/centos63/cloud-agent.rc index acf81316479..6d534732528 100755 --- a/packaging/centos63/cloud-agent.rc +++ b/packaging/centos63/cloud-agent.rc @@ -31,7 +31,8 @@ whatami=cloudstack-agent SHORTNAME="$whatami" PIDFILE=/var/run/"$whatami".pid LOCKFILE=/var/lock/subsys/"$SHORTNAME" -LOGFILE=/var/log/cloudstack/agent/agent.log +LOGDIR=/var/log/cloudstack/agent +LOGFILE=${LOGDIR}/agent.log PROGNAME="Cloud Agent" CLASS="com.cloud.agent.AgentShell" JSVC=`which jsvc 2>/dev/null`; @@ -46,7 +47,7 @@ unset OPTIONS [ -r /etc/sysconfig/"$SHORTNAME" ] && source /etc/sysconfig/"$SHORTNAME" # The first existing directory is used for JAVA_HOME (if JAVA_HOME is not defined in $DEFAULT) -JDK_DIRS="/usr/lib/jvm/jre /usr/lib/jvm/java-6-openjdk /usr/lib/jvm/java-6-openjdk-i386 /usr/lib/jvm/java-6-openjdk-amd64 /usr/lib/jvm/java-6-sun /usr/lib/jvm/java-1.5.0-sun /usr/lib/j2sdk1.5-sun /usr/lib/j2sdk1.5-ibm" +JDK_DIRS="/usr/lib/jvm/jre /usr/lib/jvm/java-7-openjdk /usr/lib/jvm/java-7-openjdk-i386 /usr/lib/jvm/java-7-openjdk-amd64 /usr/lib/jvm/java-6-openjdk /usr/lib/jvm/java-6-openjdk-i386 /usr/lib/jvm/java-6-openjdk-amd64 /usr/lib/jvm/java-6-sun" for jdir in $JDK_DIRS; do if [ -r "$jdir/bin/java" -a -z "${JAVA_HOME}" ]; then @@ -55,67 +56,64 @@ for jdir in $JDK_DIRS; do done export JAVA_HOME -SCP="" -DCP="" -ACP=`ls /usr/share/cloudstack-agent/cloud-plugin-hypervisor-kvm-*.jar`":"`ls /usr/share/cloudstack-agent/lib/* | tr '\n' ':'` -JCP="/usr/share/java/jna.jar:/usr/share/java/commons-daemon.jar" +ACP=`ls /usr/share/cloudstack-agent/lib/*.jar | tr '\n' ':' | sed s'/.$//'` +PCP=`ls /usr/share/cloudstack-agent/plugins/*.jar 2>/dev/null | tr '\n' ':' | sed s'/.$//'` # We need to append the JSVC daemon JAR to the classpath # AgentShell implements the JSVC daemon methods -export CLASSPATH="$SCP:$DCP:$ACP:$JCP:/etc/cloudstack/agent:/usr/share/cloudstack-common/scripts" +# We also need JNA in the classpath (from the distribution) for the Libvirt Java bindings +export CLASSPATH="/usr/share/java/commons-daemon.jar:/usr/share/java/jna.jar:$ACP:$PCP:/etc/cloudstack/agent:/usr/share/cloudstack-common/scripts" start() { - echo -n $"Starting $PROGNAME: " - if hostname --fqdn >/dev/null 2>&1 ; then - $JSVC -cp "$CLASSPATH" -pidfile "$PIDFILE" $CLASS - RETVAL=$? - echo - else - failure - echo - echo The host name does not resolve properly to an IP address. Cannot start "$PROGNAME". > /dev/stderr - RETVAL=9 - fi - [ $RETVAL = 0 ] && touch ${LOCKFILE} - return $RETVAL + echo -n $"Starting $PROGNAME: " + if hostname --fqdn >/dev/null 2>&1 ; then + $JSVC -cp "$CLASSPATH" -pidfile "$PIDFILE" -errfile SYSLOG $CLASS + RETVAL=$? + echo + else + failure + echo + echo The host name does not resolve properly to an IP address. Cannot start "$PROGNAME". > /dev/stderr + RETVAL=9 + fi + [ $RETVAL = 0 ] && touch ${LOCKFILE} + return $RETVAL } stop() { - echo -n $"Stopping $PROGNAME: " - $JSVC -pidfile "$PIDFILE" -stop $CLASS - RETVAL=$? - echo - [ $RETVAL = 0 ] && rm -f ${LOCKFILE} ${PIDFILE} + echo -n $"Stopping $PROGNAME: " + $JSVC -pidfile "$PIDFILE" -stop $CLASS + RETVAL=$? + echo + [ $RETVAL = 0 ] && rm -f ${LOCKFILE} ${PIDFILE} } - -# See how we were called. case "$1" in - start) - start - ;; - stop) - stop - ;; - status) + start) + start + ;; + stop) + stop + ;; + status) status -p ${PIDFILE} $SHORTNAME - RETVAL=$? - ;; - restart) - stop - sleep 3 - start - ;; - condrestart) - if status -p ${PIDFILE} $SHORTNAME >&/dev/null; then - stop - sleep 3 - start - fi - ;; - *) - echo $"Usage: $whatami {start|stop|restart|condrestart|status|help}" - RETVAL=3 + RETVAL=$? + ;; + restart) + stop + sleep 3 + start + ;; + condrestart) + if status -p ${PIDFILE} $SHORTNAME >&/dev/null; then + stop + sleep 3 + start + fi + ;; + *) + echo $"Usage: $whatami {start|stop|restart|condrestart|status|help}" + RETVAL=3 esac exit $RETVAL diff --git a/packaging/centos63/cloud-ipallocator.rc b/packaging/centos63/cloud-ipallocator.rc index ffeffa342c8..08cf9c88a0f 100755 --- a/packaging/centos63/cloud-ipallocator.rc +++ b/packaging/centos63/cloud-ipallocator.rc @@ -30,7 +30,7 @@ whatami=cloud-external-ipallocator SHORTNAME="$whatami" PIDFILE=/var/run/"$whatami".pid LOCKFILE=/var/lock/subsys/"$SHORTNAME" -LOGFILE=/var/log/cloud/ipallocator/ipallocator.log +LOGFILE=/var/log/cloudstack/ipallocator/ipallocator.log PROGNAME="External IPAllocator" unset OPTIONS diff --git a/packaging/centos63/cloud-usage.rc b/packaging/centos63/cloud-usage.rc index 8bee5aeb6a0..76f0e06fdfe 100755 --- a/packaging/centos63/cloud-usage.rc +++ b/packaging/centos63/cloud-usage.rc @@ -35,7 +35,8 @@ SHORTNAME="cloudstack-usage" PIDFILE=/var/run/"$SHORTNAME".pid LOCKFILE=/var/lock/subsys/"$SHORTNAME" -LOGFILE=/var/log/cloudstack/usage/usage.log +LOGDIR=/var/log/cloudstack/usage +LOGFILE=${LOGDIR}/usage.log PROGNAME="CloudStack Usage Monitor" CLASS="com.cloud.usage.UsageServer" PROG="jsvc" @@ -62,7 +63,7 @@ JCP="/usr/share/java/commons-daemon.jar" # We need to append the JSVC daemon JAR to the classpath # AgentShell implements the JSVC daemon methods -export CLASSPATH="$SCP:$DCP:$UCP:$JCP:/etc/sysconfig" +export CLASSPATH="$SCP:$DCP:$UCP:$JCP:/etc/cloudstack/usage" start() { if [ -s "$PIDFILE" ] && kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then @@ -79,7 +80,8 @@ start() { echo -n "Starting $PROGNAME" "$SHORTNAME" - if daemon --pidfile $PIDFILE $DAEMON -cp "$CLASSPATH" -pidfile "$PIDFILE" -user "$USER" -errfile SYSLOG -Dpid=$$ $CLASS + if daemon --pidfile $PIDFILE $DAEMON -cp "$CLASSPATH" -pidfile "$PIDFILE" -user "$USER" \ + -errfile $LOGDIR/cloudstack-usage.err -outfile $LOGDIR/cloudstack-usage.out -Dpid=$$ $CLASS RETVAL=$? then rc=0 diff --git a/packaging/centos63/cloud.spec b/packaging/centos63/cloud.spec index 5a15ce40929..16c36020b45 100644 --- a/packaging/centos63/cloud.spec +++ b/packaging/centos63/cloud.spec @@ -78,7 +78,8 @@ Requires: mkisofs Requires: MySQL-python Requires: python-paramiko Requires: ipmitool -Requires: %{name}-common = 4.1.0 +Requires: %{name}-common = %{_ver} +Requires: %{name}-awsapi = %{_ver} Obsoletes: cloud-client < 4.1.0 Obsoletes: cloud-client-ui < 4.1.0 Obsoletes: cloud-daemonize < 4.1.0 @@ -108,14 +109,15 @@ The Apache CloudStack files shared between agent and management server %package agent Summary: CloudStack Agent for KVM hypervisors Requires: java >= 1.6.0 +Requires: jna >= 3.2.4 Requires: %{name}-common = %{_ver} Requires: libvirt Requires: bridge-utils Requires: ebtables Requires: jsvc -Requires: jna Requires: jakarta-commons-daemon Requires: jakarta-commons-daemon-jsvc +Requires: perl Provides: cloud-agent Obsoletes: cloud-agent < 4.1.0 Obsoletes: cloud-test < 4.1.0 @@ -143,13 +145,16 @@ Apache CloudStack command line interface %package awsapi Summary: Apache CloudStack AWS API compatibility wrapper +Requires: %{name}-management = %{_ver} +Obsoletes: cloud-aws-api < 4.1.0 +Provides: cloud-aws-api %description awsapi Apache Cloudstack AWS API compatibility wrapper -%package docs -Summary: Apache CloudStack documentation -%description docs -Apache CloudStack documentations +#%package docs +#Summary: Apache CloudStack documentation +#%description docs +#Apache CloudStack documentations %prep echo Doing CloudStack build @@ -160,7 +165,7 @@ echo Doing CloudStack build cp packaging/centos63/replace.properties build/replace.properties echo VERSION=%{_maventag} >> build/replace.properties echo PACKAGE=%{name} >> build/replace.properties -mvn package -Dsystemvm +mvn -P awsapi package -Dsystemvm %install [ ${RPM_BUILD_ROOT} != "/" ] && rm -rf ${RPM_BUILD_ROOT} @@ -215,7 +220,7 @@ install -D client/target/utilities/bin/cloud-sysvmadm ${RPM_BUILD_ROOT}%{_bindir install -D client/target/utilities/bin/cloud-update-xenserver-licenses ${RPM_BUILD_ROOT}%{_bindir}/%{name}-update-xenserver-licenses cp -r client/target/utilities/scripts/db/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/setup -cp -r client/target/cloud-client-ui-4.1.0-SNAPSHOT/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client +cp -r client/target/cloud-client-ui-%{_maventag}/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client # Don't package the scripts in the management webapp rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client/WEB-INF/classes/scripts @@ -226,6 +231,10 @@ for name in db.properties log4j-cloud.xml tomcat6-nonssl.conf tomcat6-ssl.conf s mv ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client/WEB-INF/classes/$name \ ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/$name done + +ln -s %{_sysconfdir}/%{name}/management/log4j-cloud.xml \ + ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client/WEB-INF/classes/log4j-cloud.xml + mv ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/webapps/client/WEB-INF/classes/context.xml \ ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/Catalina/localhost/client @@ -251,16 +260,18 @@ chmod 770 ${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}/agent mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}/agent mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib +mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/plugins install -D packaging/centos63/cloud-agent.rc ${RPM_BUILD_ROOT}%{_sysconfdir}/init.d/%{name}-agent install -D agent/target/transformed/agent.properties ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/agent.properties install -D agent/target/transformed/environment.properties ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/environment.properties install -D agent/target/transformed/log4j-cloud.xml ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/log4j-cloud.xml install -D agent/target/transformed/cloud-setup-agent ${RPM_BUILD_ROOT}%{_bindir}/%{name}-setup-agent install -D agent/target/transformed/cloud-ssh ${RPM_BUILD_ROOT}%{_bindir}/%{name}-ssh -install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-%{_maventag}.jar ${RPM_BUILD_ROOT}%{_datadir}/%name-agent/cloud-plugin-hypervisor-kvm-%{_maventag}.jar +install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-%{_maventag}.jar ${RPM_BUILD_ROOT}%{_datadir}/%name-agent/lib/cloud-plugin-hypervisor-kvm-%{_maventag}.jar cp plugins/hypervisors/kvm/target/dependencies/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib # Usage server +mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/usage mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-usage/lib install -D usage/target/cloud-usage-%{_maventag}.jar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-usage/cloud-usage-%{_maventag}.jar cp usage/target/dependencies/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-usage/lib/ @@ -271,6 +282,14 @@ mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}/usage/ cp -r cloud-cli/cloudtool ${RPM_BUILD_ROOT}%{_libdir}/python2.6/site-packages/ install cloud-cli/cloudapis/cloud.py ${RPM_BUILD_ROOT}%{_libdir}/python2.6/site-packages/cloudapis.py +# AWS API +mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/bridge +mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/setup +cp -r awsapi/target/cloud-awsapi-%{_maventag}/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/bridge +install -D awsapi-setup/setup/cloud-setup-bridge ${RPM_BUILD_ROOT}%{_bindir}/cloudstack-setup-bridge +install -D awsapi-setup/setup/cloudstack-aws-api-register ${RPM_BUILD_ROOT}%{_bindir}/cloudstack-aws-api-register +cp -r awsapi-setup/db/mysql/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/setup + %clean [ ${RPM_BUILD_ROOT} != "/" ] && rm -rf ${RPM_BUILD_ROOT} @@ -300,16 +319,30 @@ if [ "$1" == "1" ] ; then /sbin/chkconfig --level 345 cloud-management on > /dev/null 2>&1 || true fi -if [ ! -f %{_datadir}/cloudstack/management/webapps/client/WEB-INF/classes/scripts/scripts/vm/hypervisor/xenserver/vhd-util ] ; then - echo Please download vhd-util from http://download.cloud.com.s3.amazonaws.com/tools/vhd-util and put it in - echo %{_datadir}/cloudstack/management/webapps/client/WEB-INF/classes/scripts/vm/hypervisor/xenserver/ +if [ -d "%{_datadir}/%{name}-management" ] ; then + ln -s %{_datadir}/%{name}-bridge/webapps %{_datadir}/%{name}-management/webapps7080 fi +if [ ! -f %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/vhd-util ] ; then + echo Please download vhd-util from http://download.cloud.com.s3.amazonaws.com/tools/vhd-util and put it in + echo %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/ +fi + +# change cloud user's home to 4.1+ version if needed. Would do this via 'usermod', but it +# requires that cloud user not be in use, so RPM could not be installed while management is running +if getent passwd cloud | grep -q /var/lib/cloud; then + sed -i 's/\/var\/lib\/cloud\/management/\/var\/cloudstack\/management/g' /etc/passwd +fi + + +#%post awsapi +#if [ -d "%{_datadir}/%{name}-management" ] ; then +# ln -s %{_datadir}/%{name}-bridge/webapps %{_datadir}/%{name}-management/webapps7080 +#fi + #No default permission as the permission setup is complex %files management %defattr(-,root,root,-) -%doc LICENSE -%doc NOTICE %dir %attr(0770,root,cloud) %{_sysconfdir}/%{name}/management/Catalina %dir %attr(0770,root,cloud) %{_sysconfdir}/%{name}/management/Catalina/localhost %dir %attr(0770,root,cloud) %{_sysconfdir}/%{name}/management/Catalina/localhost/client @@ -371,8 +404,8 @@ fi %attr(0755,root,root) %{_sysconfdir}/init.d/%{name}-agent %config(noreplace) %{_sysconfdir}/%{name}/agent %dir %{_localstatedir}/log/%{name}/agent -%attr(0644,root,root) %{_datadir}/%{name}-agent/*.jar %attr(0644,root,root) %{_datadir}/%{name}-agent/lib/*.jar +%dir %{_datadir}/%{name}-agent/plugins %doc LICENSE %doc NOTICE @@ -393,6 +426,7 @@ fi %attr(0644,root,root) %{_datadir}/%{name}-usage/*.jar %attr(0644,root,root) %{_datadir}/%{name}-usage/lib/*.jar %dir /var/log/%{name}/usage +%dir %{_sysconfdir}/%{name}/usage %doc LICENSE %doc NOTICE @@ -403,11 +437,16 @@ fi %doc LICENSE %doc NOTICE -%files docs -%doc LICENSE -%doc NOTICE +#%files docs +#%doc LICENSE +#%doc NOTICE %files awsapi +%defattr(0644,cloud,cloud,0755) +%{_datadir}/%{name}-bridge/webapps/bridge +%attr(0644,root,root) %{_datadir}/%{name}-bridge/setup/* +%attr(0755,root,root) %{_bindir}/cloudstack-aws-api-register +%attr(0755,root,root) %{_bindir}/cloudstack-setup-bridge %doc LICENSE %doc NOTICE diff --git a/packaging/centos63/package.sh b/packaging/centos63/package.sh index 5b1bab49b61..2515ecba11f 100755 --- a/packaging/centos63/package.sh +++ b/packaging/centos63/package.sh @@ -18,10 +18,10 @@ CWD=`pwd` RPMDIR=$CWD/../../dist/rpmbuild +PACK_PROJECT=cloudstack - -VERSION=`(cd ../../; mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=project.version) | grep -v '^\['` +VERSION=`(cd ../../; mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=project.version) | grep '^[0-9]\.'` if echo $VERSION | grep SNAPSHOT ; then REALVER=`echo $VERSION | cut -d '-' -f 1` DEFVER="-D_ver $REALVER" @@ -34,12 +34,12 @@ else fi mkdir -p $RPMDIR/SPECS -mkdir -p $RPMDIR/SOURCES/cloudstack-$VERSION +mkdir -p $RPMDIR/SOURCES/$PACK_PROJECT-$VERSION -(cd ../../; tar -c --exclude .git --exclude dist . | tar -C $RPMDIR/SOURCES/cloudstack-$VERSION -x ) -(cd $RPMDIR/SOURCES/; tar -czf cloudstack-$VERSION.tgz cloudstack-$VERSION) +(cd ../../; tar -c --exclude .git --exclude dist . | tar -C $RPMDIR/SOURCES/$PACK_PROJECT-$VERSION -x ) +(cd $RPMDIR/SOURCES/; tar -czf $PACK_PROJECT-$VERSION.tgz $PACK_PROJECT-$VERSION) cp cloud.spec $RPMDIR/SPECS -(cd $RPMDIR; rpmbuild -ba SPECS/cloud.spec "-D_topdir $RPMDIR" "$DEFVER" "$DEFREL" "$DEFPRE" ) +(cd $RPMDIR; rpmbuild -ba SPECS/cloud.spec "-D_topdir $RPMDIR" "$DEFVER" "$DEFREL" "$DEFPRE") diff --git a/packaging/debian/init/cloud-agent b/packaging/debian/init/cloud-agent new file mode 100755 index 00000000000..c87a5c09f81 --- /dev/null +++ b/packaging/debian/init/cloud-agent @@ -0,0 +1,171 @@ +#!/bin/bash + +### BEGIN INIT INFO +# Provides: cloud agent +# Required-Start: $network $local_fs +# Required-Stop: $network $local_fs +# Default-Start: 3 4 5 +# Default-Stop: 0 1 2 6 +# Short-Description: Start/stop Apache CloudStack Agent +# Description: This scripts Starts/Stops the Apache CloudStack agent +## The CloudStack Agent is a part of the Apache CloudStack project and is used +## for managing KVM-based Hypervisors and performing secondary storage tasks inside +## the Secondary Storage System Virtual Machine. +## JSVC (Java daemonizing) is used for starting and stopping the agent +### END INIT INFO + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +. /lib/lsb/init-functions + +SHORTNAME="cloud-agent" +PIDFILE=/var/run/"$SHORTNAME".pid +LOCKFILE=/var/lock/subsys/"$SHORTNAME" +PROGNAME="CloudStack Agent" +CLASS="com.cloud.agent.AgentShell" +PROG="jsvc" +DAEMON="/usr/bin/jsvc" +SHUTDOWN_WAIT="30" + +unset OPTIONS +[ -r /etc/default/"$SHORTNAME" ] && source /etc/default/"$SHORTNAME" + +# The first existing directory is used for JAVA_HOME (if JAVA_HOME is not defined in $DEFAULT) +JDK_DIRS="/usr/lib/jvm/java-7-openjdk-amd64 /usr/lib/jvm/java-7-openjdk-i386 /usr/lib/jvm/java-6-openjdk /usr/lib/jvm/java-6-openjdk-i386 /usr/lib/jvm/java-6-openjdk-amd64 /usr/lib/jvm/java-6-sun" + +for jdir in $JDK_DIRS; do + if [ -r "$jdir/bin/java" -a -z "${JAVA_HOME}" ]; then + JAVA_HOME="$jdir" + fi +done +export JAVA_HOME + +ACP=`ls /usr/share/cloudstack-agent/lib/*.jar | tr '\n' ':' | sed s'/.$//'` +PCP=`ls /usr/share/cloudstack-agent/plugins/*.jar 2>/dev/null | tr '\n' ':' | sed s'/.$//'` + +# We need to append the JSVC daemon JAR to the classpath +# AgentShell implements the JSVC daemon methods +# We also need JNA in the classpath (from the distribution) for the Libvirt Java bindings +export CLASSPATH="/usr/share/java/commons-daemon.jar:/usr/share/java/jna.jar:$ACP:$PCP:/etc/cloudstack/agent" + +wait_for_network() { + i=1 + while [ $i -lt 10 ] + do + # Under Ubuntu and Debian libvirt by default creates a bridge called virbr0. + # That's why we want more then 3 lines back from brctl, so that there is a manually created bridge + if [ "$(brctl show|wc -l)" -gt 2 ]; then + break + else + sleep 1 + let i=$i+1 + continue + fi + done +} + +start() { + if [ -s "$PIDFILE" ] && kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then + log_daemon_msg "$PROGNAME apparently already running" + log_end_msg 0 + exit 0 + fi + + log_daemon_msg "Starting $PROGNAME" "$SHORTNAME" + if hostname --fqdn >/dev/null 2>&1 ; then + true + else + log_failure_msg "The host name does not resolve properly to an IP address. Cannot start $PROGNAME" + log_end_msg 1 + exit 1 + fi + + wait_for_network + + if start_daemon -p $PIDFILE $DAEMON -cp "$CLASSPATH" -pidfile "$PIDFILE" -errfile SYSLOG $CLASS + RETVAL=$? + then + rc=0 + sleep 1 + if ! kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then + log_failure_msg "$PROG failed to start" + rc=1 + fi + else + rc=1 + fi + + if [ $rc -eq 0 ]; then + log_end_msg 0 + else + log_end_msg 1 + rm -f "$PIDFILE" + fi +} + +stop() { + count="0" + + log_daemon_msg "Stopping $PROGNAME" "$SHORTNAME" + killproc -p $PIDFILE $DAEMON + + until [ "$count" -gt "$SHUTDOWN_WAIT" ] + do + agentPid=$(ps aux|grep [j]svc|grep $SHORTNAME) + if [ "$?" -gt "0" ];then + break + fi + sleep 1 + let count="${count}+1" + done + + agentPid=$(ps aux|grep [j]svc|grep $SHORTNAME) + if [ "$?" -eq "0" ]; then + agentPid=$(ps aux|grep [j]svc|awk '{print $2}') + if [ "$agentPid" != "" ]; then + log_warning_msg "$PROG still running, forcing kill" + kill -9 $agentPid + fi + fi + + log_end_msg $? + rm -f "$PIDFILE" +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + status) + status_of_proc -p "$PIDFILE" "$PROG" "$SHORTNAME" + RETVAL=$? + ;; + restart | force-reload) + stop + sleep 3 + start + ;; + *) + echo "Usage: $0 {start|stop|restart|force-reload|status}" + RETVAL=3 +esac + +exit $RETVAL diff --git a/packaging/debian/init/cloud-management b/packaging/debian/init/cloud-management new file mode 100755 index 00000000000..490bf1e8e68 --- /dev/null +++ b/packaging/debian/init/cloud-management @@ -0,0 +1,244 @@ +#!/bin/sh +# +# /etc/init.d/tomcat6 -- startup script for the Tomcat 6 servlet engine + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +### BEGIN INIT INFO +# Provides: tomcat-vmops +# Required-Start: $local_fs $remote_fs $network +# Required-Stop: $local_fs $remote_fs $network +# Should-Start: $named +# Should-Stop: $named +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start Tomcat (CloudStack). +# Description: Start the Tomcat servlet engine that runs the CloudStack Management Server. +### END INIT INFO + +PATH=/bin:/usr/bin:/sbin:/usr/sbin +NAME=cloud-management +DESC="CloudStack-specific Tomcat servlet engine" +DAEMON=/usr/bin/jsvc +CATALINA_HOME=/usr/share/cloud/management +DEFAULT=/etc/cloud/management/tomcat6.conf +JVM_TMP=/tmp/$NAME-temp + +# We have to explicitly set the HOME variable to the homedir from the user "cloud" +# This is because various scripts run by the management server read the HOME variable +# and fail when this init script is run manually. +HOME=$(echo ~cloud) + +if [ `id -u` -ne 0 ]; then + echo "You need root privileges to run this script" + exit 1 +fi + +# Make sure tomcat is started with system locale +if [ -r /etc/default/locale ]; then + . /etc/default/locale + export LANG +fi + +. /lib/lsb/init-functions +. /etc/default/rcS + + +# The following variables can be overwritten in $DEFAULT + +# Run Tomcat 6 as this user ID +TOMCAT6_USER=tomcat6 + +# The first existing directory is used for JAVA_HOME (if JAVA_HOME is not +# defined in $DEFAULT) +JDK_DIRS="/usr/lib/jvm/java-1.6.0-openjdk-amd64/ /usr/lib/jvm/java-1.6.0-openjdk-i386/ /usr/lib/jvm/java-1.6.0-openjdk/ /usr/lib/jvm/java-6-openjdk /usr/lib/jvm/java-6-sun" + +# Look for the right JVM to use +for jdir in $JDK_DIRS; do + if [ -r "$jdir/bin/java" -a -z "${JAVA_HOME}" ]; then + JAVA_HOME="$jdir" + fi +done +export JAVA_HOME + +# Directory for per-instance configuration files and webapps +CATALINA_BASE=/usr/share/cloud/management + +# Use the Java security manager? (yes/no) +TOMCAT6_SECURITY=no + +# Default Java options +# Set java.awt.headless=true if JAVA_OPTS is not set so the +# Xalan XSL transformer can work without X11 display on JDK 1.4+ +# It also looks like the default heap size of 64M is not enough for most cases +# so the maximum heap size is set to 128M +if [ -z "$JAVA_OPTS" ]; then + JAVA_OPTS="-Djava.awt.headless=true -Xmx128M" +fi + +# End of variables that can be overwritten in $DEFAULT + +# overwrite settings from default file +if [ -f "$DEFAULT" ]; then + . "$DEFAULT" +fi + +if [ ! -f "$CATALINA_HOME/bin/bootstrap.jar" ]; then + log_failure_msg "$NAME is not installed" + exit 1 +fi + +[ -f "$DAEMON" ] || exit 0 + +POLICY_CACHE="$CATALINA_BASE/work/catalina.policy" + +JAVA_OPTS="$JAVA_OPTS -Djava.endorsed.dirs=$CATALINA_HOME/endorsed -Dcatalina.base=$CATALINA_BASE -Dcatalina.home=$CATALINA_HOME -Djava.io.tmpdir=$JVM_TMP" + +# Set the JSP compiler if set in the tomcat6.default file +if [ -n "$JSP_COMPILER" ]; then + JAVA_OPTS="$JAVA_OPTS -Dbuild.compiler=$JSP_COMPILER" +fi + +if [ "$TOMCAT6_SECURITY" = "yes" ]; then + JAVA_OPTS="$JAVA_OPTS -Djava.security.manager -Djava.security.policy=$POLICY_CACHE" +fi + +# Set juli LogManager if logging.properties is provided +if [ -r "$CATALINA_BASE"/conf/logging.properties ]; then + JAVA_OPTS="$JAVA_OPTS "-Djava.util.logging.manager=org.apache.juli.ClassLoaderLogManager" "-Djava.util.logging.config.file="$CATALINA_BASE/conf/logging.properties" +fi + +# Define other required variables +CATALINA_PID="/var/run/$NAME.pid" +BOOTSTRAP_CLASS=org.apache.catalina.startup.Bootstrap +JSVC_CLASSPATH="/usr/share/java/commons-daemon.jar:$CATALINA_HOME/bin/bootstrap.jar" +JSVC_CLASSPATH=$CLASSPATH:$JSVC_CLASSPATH + +# Look for Java Secure Sockets Extension (JSSE) JARs +if [ -z "${JSSE_HOME}" -a -r "${JAVA_HOME}/jre/lib/jsse.jar" ]; then + JSSE_HOME="${JAVA_HOME}/jre/" +fi +export JSSE_HOME + +case "$1" in + start) + if [ -z "$JAVA_HOME" ]; then + log_failure_msg "no JDK found - please set JAVA_HOME" + exit 1 + fi + + if [ ! -d "$CATALINA_BASE/conf" ]; then + log_failure_msg "invalid CATALINA_BASE: $CATALINA_BASE" + exit 1 + fi + + log_daemon_msg "Starting $DESC" "$NAME" + if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \ + --user $TOMCAT6_USER --startas "$JAVA_HOME/bin/java" \ + >/dev/null; then + + # Regenerate POLICY_CACHE file + umask 022 + echo "// AUTO-GENERATED FILE from /etc/tomcat6/policy.d/" \ + > "$POLICY_CACHE" + echo "" >> "$POLICY_CACHE" + if ls $CATALINA_BASE/conf/policy.d/*.policy > /dev/null 2>&1 ; then + cat $CATALINA_BASE/conf/policy.d/*.policy \ + >> "$POLICY_CACHE" + fi + + # Remove / recreate JVM_TMP directory + rm -rf "$JVM_TMP" + mkdir "$JVM_TMP" || { + log_failure_msg "could not create JVM temporary directory" + exit 1 + } + chown $TOMCAT6_USER "$JVM_TMP" + cd "$JVM_TMP" + + + # fix storage issues on nfs mounts + umask 000 + $DAEMON -user "$TOMCAT6_USER" -cp "$JSVC_CLASSPATH" \ + -outfile SYSLOG -errfile SYSLOG \ + -pidfile "$CATALINA_PID" $JAVA_OPTS "$BOOTSTRAP_CLASS" + + sleep 5 + if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \ + --user $TOMCAT6_USER --startas "$JAVA_HOME/bin/java" \ + >/dev/null; then + log_end_msg 1 + else + log_end_msg 0 + fi + else + log_progress_msg "(already running)" + log_end_msg 0 + fi + ;; + stop) + log_daemon_msg "Stopping $DESC" "$NAME" + if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \ + --user "$TOMCAT6_USER" --startas "$JAVA_HOME/bin/java" \ + >/dev/null; then + log_progress_msg "(not running)" + else + $DAEMON -cp "$JSVC_CLASSPATH" -pidfile "$CATALINA_PID" \ + -stop "$BOOTSTRAP_CLASS" + fi + rm -rf "$JVM_TMP" + log_end_msg 0 + ;; + status) + if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \ + --user $TOMCAT6_USER --startas "$JAVA_HOME/bin/java" \ + >/dev/null; then + + if [ -f "$CATALINA_PID" ]; then + log_success_msg "$DESC is not running, but pid file exists." + exit 1 + else + log_success_msg "$DESC is not running." + exit 3 + fi + else + log_success_msg "$DESC is running with pid `cat $CATALINA_PID`" + fi + ;; + restart|force-reload) + if start-stop-daemon --test --stop --pidfile "$CATALINA_PID" \ + --user $TOMCAT6_USER --startas "$JAVA_HOME/bin/java" \ + >/dev/null; then + $0 stop + sleep 1 + fi + $0 start + ;; + try-restart) + if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \ + --user $TOMCAT6_USER --startas "$JAVA_HOME/bin/java" \ + >/dev/null; then + $0 start + fi + ;; + *) + log_success_msg "Usage: $0 {start|stop|restart|try-restart|force-reload|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/packaging/debian/init/cloud-usage b/packaging/debian/init/cloud-usage new file mode 100755 index 00000000000..1cc75cc03f8 --- /dev/null +++ b/packaging/debian/init/cloud-usage @@ -0,0 +1,127 @@ +#!/bin/bash + +### BEGIN INIT INFO +# Provides: cloudstack usage +# Required-Start: $network $local_fs +# Required-Stop: $network $local_fs +# Default-Start: 3 4 5 +# Default-Stop: 0 1 2 6 +# Short-Description: Start/stop Apache CloudStack Usage Monitor +# Description: This scripts Starts/Stops the Apache CloudStack Usage Monitor +## The CloudStack Usage Monitor is a part of the Apache CloudStack project and is used +## for storing usage statistics from instances. +## JSVC (Java daemonizing) is used for starting and stopping the usage monitor. +### END INIT INFO + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +. /lib/lsb/init-functions + +SHORTNAME="cloudstack-usage" +PIDFILE=/var/run/"$SHORTNAME".pid +PROGNAME="CloudStack Usage Monitor" +CLASS="com.cloud.usage.UsageServer" +PROG="jsvc" +DAEMON="/usr/bin/jsvc" + +unset OPTIONS +[ -r /etc/default/"$SHORTNAME" ] && source /etc/default/"$SHORTNAME" + +# The first existing directory is used for JAVA_HOME (if JAVA_HOME is not defined in $DEFAULT) +JDK_DIRS="/usr/lib/jvm/java-7-openjdk-amd64 /usr/lib/jvm/java-7-openjdk-i386 /usr/lib/jvm/java-6-openjdk /usr/lib/jvm/java-6-openjdk-i386 /usr/lib/jvm/java-6-openjdk-amd64 /usr/lib/jvm/java-6-sun" + +for jdir in $JDK_DIRS; do + if [ -r "$jdir/bin/java" -a -z "${JAVA_HOME}" ]; then + JAVA_HOME="$jdir" + fi +done +export JAVA_HOME + +UCP=`ls /usr/share/cloudstack-usage/lib/*.jar | tr '\n' ':' | sed s'/.$//'` +PCP=`ls /usr/share/cloudstack-usage/plugins/*.jar 2>/dev/null | tr '\n' ':' | sed s'/.$//'` + +# We need to append the JSVC daemon JAR to the classpath +# AgentShell implements the JSVC daemon methods +export CLASSPATH="/usr/share/java/commons-daemon.jar:$UCP:$PCP:/etc/cloudstack/usage" + +start() { + if [ -s "$PIDFILE" ] && kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then + log_daemon_msg "$PROGNAME apparently already running" + log_end_msg 0 + exit 0 + fi + + log_daemon_msg "Starting $PROGNAME" "$SHORTNAME" + if hostname --fqdn >/dev/null 2>&1 ; then + true + else + log_failure_msg "The host name does not resolve properly to an IP address. Cannot start $PROGNAME" + log_end_msg 1 + exit 1 + fi + + if start_daemon -p $PIDFILE $DAEMON -cp "$CLASSPATH" -pidfile "$PIDFILE" -outfile SYSLOG -errfile SYSLOG -Dpid=$$ $CLASS + RETVAL=$? + then + rc=0 + sleep 1 + if ! kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then + log_failure_msg "$PROG failed to start" + rc=1 + fi + else + rc=1 + fi + + if [ $rc -eq 0 ]; then + log_end_msg 0 + else + log_end_msg 1 + rm -f "$PIDFILE" + fi +} + +stop() { + log_daemon_msg "Stopping $PROGNAME" "$SHORTNAME" + killproc -p $PIDFILE $DAEMON + log_end_msg $? + rm -f "$PIDFILE" +} + +case "$1" in + start) + start + ;; + stop) + stop + ;; + status) + status_of_proc -p "$PIDFILE" "$PROG" "$SHORTNAME" + RETVAL=$? + ;; + restart | force-reload) + stop + sleep 3 + start + ;; + *) + echo "Usage: $0 {start|stop|restart|force-reload|status}" + RETVAL=3 +esac + +exit $RETVAL diff --git a/packaging/debian/replace.properties b/packaging/debian/replace.properties index 6520f63e682..fee1defd357 100644 --- a/packaging/debian/replace.properties +++ b/packaging/debian/replace.properties @@ -26,37 +26,37 @@ COMPONENTS-SPEC=components-premium.xml AWSAPILOG=awsapi.log REMOTEHOST=localhost AGENTCLASSPATH= -AGENTLOG=/var/log/cloud/agent/agent.log -AGENTLOGDIR=/var/log/cloud/agent/ -AGENTSYSCONFDIR=/etc/cloud/agent -APISERVERLOG=/var/log/cloud/management/apilog.log -AWSAPILOG=/var/log/cloud/awsapi/awsapi.log +AGENTLOG=/var/log/cloudstack/agent/agent.log +AGENTLOGDIR=/var/log/cloudstack/agent/ +AGENTSYSCONFDIR=/etc/cloudstack/agent +APISERVERLOG=/var/log/cloudstack/management/apilog.log +AWSAPILOG=/var/log/cloudstack/awsapi/awsapi.log BINDIR=/usr/bin -COMMONLIBDIR=/usr/share/java +COMMONLIBDIR=/usr/share/cloudstack-common CONFIGUREVARS= DEPSCLASSPATH= DOCDIR= -IPALOCATORLOG=/var/log/cloud/management/ipallocator.log -JAVADIR=/usr/share/java +IPALOCATORLOG=/var/log/cloudstack/management/ipallocator.log +JAVADIR=/usr/share/cloudstack-management/webapps/client/WEB-INF/lib LIBEXECDIR=/usr/libexec LOCKDIR=/var/lock MSCLASSPATH= -MSCONF=/etc/cloud/management -MSENVIRON=/usr/share/cloud/management -MSLOG=/var/log/cloud/management/management-server.log -MSLOGDIR=/var/log/cloud/management/ +MSCONF=/etc/cloudstack/management +MSENVIRON=/usr/share/cloudstack-management +MSLOG=/var/log/cloudstack/management/management-server.log +MSLOGDIR=/var/log/cloudstack/management/ MSMNTDIR=/var/lib/cloud/mnt MSUSER=cloud PIDDIR=/var/run PLUGINJAVADIR= PREMIUMJAVADIR= PYTHONDIR=/usr/lib/python2.6/site-packages/ -SERVERSYSCONFDIR=/etc/cloud/server -SETUPDATADIR=/usr/share/cloud/setup +SERVERSYSCONFDIR=/etc/cloudstack/server +SETUPDATADIR=/usr/share/cloudstack-management/setup SYSCONFDIR=/etc SYSTEMCLASSPATH= SYSTEMJARS= USAGECLASSPATH= -USAGELOG=/var/log/cloud/usage -USAGESYSCONFDIR=/etc/cloud/usage +USAGELOG=/var/log/cloudstack/usage +USAGESYSCONFDIR=/etc/cloudstack/usage PACKAGE=cloud diff --git a/patches/pom.xml b/patches/pom.xml index a662bcb9791..00eec02ddc9 100644 --- a/patches/pom.xml +++ b/patches/pom.xml @@ -17,7 +17,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT install diff --git a/patches/systemvm/debian/config/etc/init.d/cloud-early-config b/patches/systemvm/debian/config/etc/init.d/cloud-early-config index 78231fbfd44..408264b478a 100755 --- a/patches/systemvm/debian/config/etc/init.d/cloud-early-config +++ b/patches/systemvm/debian/config/etc/init.d/cloud-early-config @@ -108,14 +108,17 @@ get_boot_params() { sed -i "s/%/ /g" /var/cache/cloud/cmdline ;; kvm) - # KVM needs to mount another disk, to get cmdline - mkdir -p $EXTRA_MOUNT - mount /dev/vdb $EXTRA_MOUNT - cp -f $EXTRA_MOUNT/cmdline /var/cache/cloud/cmdline - cp -f $EXTRA_MOUNT/authorized_keys /var/cache/cloud/authorized_keys - privkey=/var/cache/cloud/authorized_keys - umount $EXTRA_MOUNT - cp -f $privkey /root/.ssh/ && chmod go-rwx /root/.ssh/authorized_keys + while read line; do + if [[ $line == cmdline:* ]]; then + cmd=${line//cmdline:/} + echo $cmd > /var/cache/cloud/cmdline + elif [[ $line == pubkey:* ]]; then + pubkey=${line//pubkey:/} + echo $pubkey > /var/cache/cloud/authorized_keys + echo $pubkey > /root/.ssh/authorized_keys + fi + done < /dev/vport0p1 + chmod go-rwx /root/.ssh/authorized_keys ;; vmware) vmtoolsd --cmd 'machine.id.get' > /var/cache/cloud/cmdline @@ -363,6 +366,18 @@ setup_common() { echo "nameserver $NS2" >> /etc/dnsmasq-resolv.conf echo "nameserver $NS2" >> /etc/resolv.conf fi + + if [ -n "$IP6_NS1" ] + then + echo "nameserver $IP6_NS1" >> /etc/dnsmasq-resolv.conf + echo "nameserver $IP6_NS1" >> /etc/resolv.conf + fi + if [ -n "$IP6_NS2" ] + then + echo "nameserver $IP6_NS2" >> /etc/dnsmasq-resolv.conf + echo "nameserver $IP6_NS2" >> /etc/resolv.conf + fi + if [ -n "$MGMTNET" -a -n "$LOCAL_GW" ] then ip route add $MGMTNET via $LOCAL_GW dev eth1 @@ -410,7 +425,6 @@ setup_dnsmasq() { sed -r -i s/^[#]?domain=.*$/domain=$DOMAIN/ /etc/dnsmasq.conf #answer all local domain queries sed -i -e "s/^[#]*local=.*$/local=\/$DOMAIN\//" /etc/dnsmasq.conf - fi if [ -n "$DNS_SEARCH_ORDER" ] @@ -432,6 +446,9 @@ setup_dnsmasq() { if [ $DHCP_RANGE_IP6 ] then sed -i -e "s/^dhcp-range_ip6=.*$/dhcp-range=$DHCP_RANGE_IP6,static/" /etc/dnsmasq.conf + # For nondefault6 tagged host, don't send dns-server information + sed -i /nondefault6/d /etc/dnsmasq.conf + echo "dhcp-option=nondefault6,option6:dns-server" >> /etc/dnsmasq.conf else sed -i -e "s/^dhcp-range_ip6=.*$//" /etc/dnsmasq.conf fi @@ -453,17 +470,23 @@ setup_dnsmasq() { fi sed -i -e "/^[#]*dhcp-option=option:router.*$/d" /etc/dnsmasq.conf [ $DEFAULT_GW ] && echo "dhcp-option=option:router,$DEFAULT_GW" >> /etc/dnsmasq.conf - NS=$NS1 - [ -n "$NS2" ] && NS=$NS1,$NS2 - #for now set up ourself as the dns server as well - sed -i -e "/^[#]*dhcp-option=6.*$/d" /etc/dnsmasq.conf - if [ "$USE_EXTERNAL_DNS" == "true" ] + + [ $ETH0_IP ] && [ $NS1 ] && NS="$NS1," + [ $ETH0_IP ] && [ $NS2 ] && NS="$NS$NS2," + [ $ETH0_IP6 ] && [ $IP6_NS1 ] && NS6="[$IP6_NS1]," + [ $ETH0_IP6 ] && [ $IP6_NS2 ] && NS6="$NS6[$IP6_NS2]," + #for now set up ourself as the dns server as well + sed -i -e "/^[#]*dhcp-option=6,.*$/d" /etc/dnsmasq.conf + sed -i -e "/^[#]*dhcp-option=option6:dns-server,.*$/d" /etc/dnsmasq.conf + if [ "$USE_EXTERNAL_DNS" != "true" ] then - [ $ETH0_IP ] && echo "dhcp-option=6,$NS" >> /etc/dnsmasq.conf - else - [ $ETH0_IP ] && echo "dhcp-option=6,$INTERNAL_DNS,$NS" >> /etc/dnsmasq.conf + [ $ETH0_IP ] && NS="$INTERNAL_DNS,$NS" + [ $ETH0_IP6 ] && NS6="[::],$NS6" fi - [ $ETH0_IP6 ] && echo "dhcp-option=option6:dns-server,[::]" >> /etc/dnsmasq.conf + NS=${NS%?} + NS6=${NS6%?} + [ $ETH0_IP ] && echo "dhcp-option=6,$NS" >> /etc/dnsmasq.conf + [ $ETH0_IP6 ] && echo "dhcp-option=option6:dns-server,$NS6" >> /etc/dnsmasq.conf } setup_sshd(){ @@ -570,9 +593,20 @@ setup_redundant_router() { fi } +setup_aesni() { + if [ `grep aes /proc/cpuinfo | wc -l` -gt 0 ] + then + modprobe aesni_intel + if [ `lsmod | grep aesni_intel | wc -l` -gt 0 ] + then + echo aesni_intel >> /etc/modules + fi + fi +} + setup_router() { log_it "Setting up virtual router system vm" - + oldmd5= [ -f "/etc/udev/rules.d/70-persistent-net.rules" ] && oldmd5=$(md5sum "/etc/udev/rules.d/70-persistent-net.rules" | awk '{print $1}') @@ -620,10 +654,8 @@ setup_router() { fi fi - - + setup_aesni setup_dnsmasq - setup_apache2 $ETH0_IP sed -i /gateway/d /etc/hosts @@ -960,6 +992,12 @@ for i in $CMDLINE dns2) NS2=$VALUE ;; + ip6dns1) + IP6_NS1=$VALUE + ;; + ip6dns2) + IP6_NS2=$VALUE + ;; domain) DOMAIN=$VALUE ;; diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vpc_guestnw.sh b/patches/systemvm/debian/config/opt/cloud/bin/vpc_guestnw.sh index c909cf796d1..31003454ec0 100755 --- a/patches/systemvm/debian/config/opt/cloud/bin/vpc_guestnw.sh +++ b/patches/systemvm/debian/config/opt/cloud/bin/vpc_guestnw.sh @@ -129,6 +129,18 @@ desetup_passwdsvcs() { } create_guest_network() { + # need to wait for eth device to appear before configuring it + timer=0 + while ! `grep -q $dev /proc/net/dev` ; do + logger -t cloud "$(basename $0):Waiting for interface $dev to appear, $timer seconds" + sleep 1; + if [ $timer -gt 15 ]; then + logger -t cloud "$(basename $0):interface $dev never appeared" + break + fi + timer=$[timer + 1] + done + logger -t cloud " $(basename $0): Create network on interface $dev, gateway $gw, network $ip/$mask " # setup ip configuration sudo ip addr add dev $dev $ip/$mask brd + diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vpc_ipassoc.sh b/patches/systemvm/debian/config/opt/cloud/bin/vpc_ipassoc.sh index 53e739d02d6..f2f8a49339e 100755 --- a/patches/systemvm/debian/config/opt/cloud/bin/vpc_ipassoc.sh +++ b/patches/systemvm/debian/config/opt/cloud/bin/vpc_ipassoc.sh @@ -56,6 +56,18 @@ remove_routing() { } add_an_ip () { + # need to wait for eth device to appear before configuring it + timer=0 + while ! `grep -q $ethDev /proc/net/dev` ; do + logger -t cloud "$(basename $0):Waiting for interface $ethDev to appear, $timer seconds" + sleep 1; + if [ $timer -gt 15 ]; then + logger -t cloud "$(basename $0):interface $ethDev never appeared" + break + fi + timer=$[timer + 1] + done + logger -t cloud "$(basename $0):Adding ip $pubIp on interface $ethDev" sudo ip link show $ethDev | grep "state DOWN" > /dev/null local old_state=$? diff --git a/patches/systemvm/debian/config/root/edithosts.sh b/patches/systemvm/debian/config/root/edithosts.sh index 257de926724..9f21f206172 100755 --- a/patches/systemvm/debian/config/root/edithosts.sh +++ b/patches/systemvm/debian/config/root/edithosts.sh @@ -27,7 +27,7 @@ # $6 : comma separated static routes usage() { - printf "Usage: %s: -m -4 -6 -h -d -n -s -u \n" $(basename $0) >&2 + printf "Usage: %s: -m -4 -6 -h -d -n -s -u [-N]\n" $(basename $0) >&2 } mac= @@ -38,8 +38,9 @@ dflt= dns= routes= duid= +nondefault= -while getopts 'm:4:h:d:n:s:6:u:' OPTION +while getopts 'm:4:h:d:n:s:6:u:N' OPTION do case $OPTION in m) mac="$OPTARG" @@ -58,6 +59,8 @@ do ;; s) routes="$OPTARG" ;; + N) nondefault=1 + ;; ?) usage exit 2 ;; @@ -120,7 +123,12 @@ then fi if [ $ipv6 ] then - echo "id:$duid,[$ipv6],$host,infinite" >>$DHCP_HOSTS + if [ $nondefault ] + then + echo "id:$duid,set:nondefault6,[$ipv6],$host,infinite" >>$DHCP_HOSTS + else + echo "id:$duid,[$ipv6],$host,infinite" >>$DHCP_HOSTS + fi fi #delete leases to supplied mac and ip addresses @@ -176,8 +184,8 @@ then if [ "$dflt" == "0.0.0.0" ] then logger -t cloud "$0: unset default router for $ipv4" + logger -t cloud "$0: unset dns server for $ipv4" echo "$tag,3" >> $DHCP_OPTS - logger -t cloud "$0: setting dns server for $ipv4 to $dns" echo "$tag,6" >> $DHCP_OPTS echo "$tag,15" >> $DHCP_OPTS fi diff --git a/patches/systemvm/debian/config/root/func.sh b/patches/systemvm/debian/config/root/func.sh index 86317a06843..8cc96082cc2 100644 --- a/patches/systemvm/debian/config/root/func.sh +++ b/patches/systemvm/debian/config/root/func.sh @@ -23,7 +23,8 @@ # $2 timeout seconds getLockFile() { __locked=0 - __LOCKFILE="/tmp/$1-$$.lock" + __TS=`date +%s%N` + __LOCKFILE="/tmp/$__TS-$$-$1.lock" if [ $2 ] then __TIMEOUT=$2 @@ -49,7 +50,7 @@ getLockFile() { for i in `seq 1 $(($__TIMEOUT * 10))` do - currlock=`ls -tr /tmp/$1-*.lock | head -n1` + currlock=`ls /tmp/*-$1.lock | head -n1` if [ $currlock -ef $__LOCKFILE ] then __locked=1 @@ -77,7 +78,7 @@ getLockFile() { # $1 lock filename # $2 locked(1) or not(0) releaseLockFile() { - __LOCKFILE="/tmp/$1-$$.lock" + __LOCKFILE="/tmp/*-$$-$1.lock" __locked=$2 if [ "$__locked" == "1" ] then diff --git a/patches/systemvm/debian/config/root/redundant_router/arping_gateways.sh.templ b/patches/systemvm/debian/config/root/redundant_router/arping_gateways.sh.templ index 176bce22559..931c95901c8 100644 --- a/patches/systemvm/debian/config/root/redundant_router/arping_gateways.sh.templ +++ b/patches/systemvm/debian/config/root/redundant_router/arping_gateways.sh.templ @@ -22,6 +22,8 @@ do while read line do ip=`echo $line|cut -d " " -f 2|cut -d "/" -f 1` - arping -I $i -A $ip -c 2 >> [RROUTER_LOG] 2>&1 + arping -I $i -A $ip -c 1 >> [RROUTER_LOG] 2>&1 + arping -I $i -A $ip -c 1 >> [RROUTER_LOG] 2>&1 done < /tmp/iplist_$i done < /tmp/iflist +sleep 1 diff --git a/patches/systemvm/debian/config/root/redundant_router/backup.sh.templ b/patches/systemvm/debian/config/root/redundant_router/backup.sh.templ index 03111b557a5..7a1bd44584a 100644 --- a/patches/systemvm/debian/config/root/redundant_router/backup.sh.templ +++ b/patches/systemvm/debian/config/root/redundant_router/backup.sh.templ @@ -28,6 +28,7 @@ fi echo To backup called >> [RROUTER_LOG] [RROUTER_BIN_PATH]/disable_pubip.sh >> [RROUTER_LOG] 2>&1 echo Disable public ip $? >> [RROUTER_LOG] +[RROUTER_BIN_PATH]/services.sh stop >> [RROUTER_LOG] 2>&1 [RROUTER_BIN_PATH]/primary-backup.sh backup >> [RROUTER_LOG] 2>&1 echo Switch conntrackd mode backup $? >> [RROUTER_LOG] echo Status: BACKUP >> [RROUTER_LOG] diff --git a/patches/systemvm/debian/config/root/redundant_router/check_heartbeat.sh.templ b/patches/systemvm/debian/config/root/redundant_router/check_heartbeat.sh.templ index 908c0d8f06f..7a980bdfb8c 100755 --- a/patches/systemvm/debian/config/root/redundant_router/check_heartbeat.sh.templ +++ b/patches/systemvm/debian/config/root/redundant_router/check_heartbeat.sh.templ @@ -22,7 +22,7 @@ then lasttime=$(cat [RROUTER_BIN_PATH]/keepalived.ts2) thistime=$(cat [RROUTER_BIN_PATH]/keepalived.ts) diff=$(($thistime - $lasttime)) - if [ $diff -gt 100 ] + if [ $diff -lt 30] then echo Keepalived process is dead! >> [RROUTER_LOG] service keepalived stop >> [RROUTER_LOG] 2>&1 diff --git a/plugins/acl/static-role-based/pom.xml b/plugins/acl/static-role-based/pom.xml index a2e8d05d48e..e40cecb9d65 100644 --- a/plugins/acl/static-role-based/pom.xml +++ b/plugins/acl/static-role-based/pom.xml @@ -26,7 +26,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/api/discovery/pom.xml b/plugins/api/discovery/pom.xml index 1cfc5c2eaf2..5d9ad75ea3a 100644 --- a/plugins/api/discovery/pom.xml +++ b/plugins/api/discovery/pom.xml @@ -26,7 +26,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java b/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java old mode 100644 new mode 100755 index 30123c79df2..b3714883964 --- a/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java +++ b/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java @@ -69,8 +69,11 @@ public class ApiDiscoveryServiceImpl implements ApiDiscoveryService { long startTime = System.nanoTime(); s_apiNameDiscoveryResponseMap = new HashMap(); Set> cmdClasses = new HashSet>(); - for(PluggableService service: _services) - cmdClasses.addAll(service.getCommands()); + for(PluggableService service: _services) { + s_logger.debug(String.format("getting api commands of service: %s", service.getClass().getName())); + cmdClasses.addAll(service.getCommands()); + } + cmdClasses.addAll(this.getCommands()); cacheResponseMap(cmdClasses); long endTime = System.nanoTime(); s_logger.info("Api Discovery Service: Annotation, docstrings, api relation graph processed in " + (endTime - startTime) / 1000000.0 + " ms"); diff --git a/plugins/api/rate-limit/pom.xml b/plugins/api/rate-limit/pom.xml index 1f0330916a9..5645f0b3a32 100644 --- a/plugins/api/rate-limit/pom.xml +++ b/plugins/api/rate-limit/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/api/rate-limit/src/org/apache/cloudstack/api/command/admin/ratelimit/ResetApiLimitCmd.java b/plugins/api/rate-limit/src/org/apache/cloudstack/api/command/admin/ratelimit/ResetApiLimitCmd.java index 5a7ac863abc..7ec53163c91 100644 --- a/plugins/api/rate-limit/src/org/apache/cloudstack/api/command/admin/ratelimit/ResetApiLimitCmd.java +++ b/plugins/api/rate-limit/src/org/apache/cloudstack/api/command/admin/ratelimit/ResetApiLimitCmd.java @@ -29,6 +29,8 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.ratelimit.ApiRateLimitService; import org.apache.log4j.Logger; +import com.cloud.configuration.Config; +import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.user.Account; import com.cloud.user.UserContext; @@ -43,6 +45,9 @@ public class ResetApiLimitCmd extends BaseCmd { @Inject ApiRateLimitService _apiLimitService; + @Inject + ConfigurationDao _configDao; + ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// @@ -89,6 +94,10 @@ public class ResetApiLimitCmd extends BaseCmd { @Override public void execute(){ + boolean apiLimitEnabled = Boolean.parseBoolean(_configDao.getValue(Config.ApiLimitEnabled.key())); + if ( !apiLimitEnabled ){ + throw new ServerApiException(ApiErrorCode.UNSUPPORTED_ACTION_ERROR, "This api is only available when api.throttling.enabled = true."); + } boolean result = _apiLimitService.resetApiLimit(this.accountId); if (result) { SuccessResponse response = new SuccessResponse(getCommandName()); diff --git a/plugins/api/rate-limit/src/org/apache/cloudstack/api/command/user/ratelimit/GetApiLimitCmd.java b/plugins/api/rate-limit/src/org/apache/cloudstack/api/command/user/ratelimit/GetApiLimitCmd.java index 1afa9322d75..ba92e8b60c8 100644 --- a/plugins/api/rate-limit/src/org/apache/cloudstack/api/command/user/ratelimit/GetApiLimitCmd.java +++ b/plugins/api/rate-limit/src/org/apache/cloudstack/api/command/user/ratelimit/GetApiLimitCmd.java @@ -21,6 +21,7 @@ import java.util.List; import org.apache.cloudstack.api.ACL; import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.BaseListCmd; import org.apache.cloudstack.api.Parameter; @@ -35,6 +36,9 @@ import org.apache.log4j.Logger; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.ratelimit.ApiRateLimitService; + +import com.cloud.configuration.Config; +import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InvalidParameterValueException; @@ -55,6 +59,9 @@ public class GetApiLimitCmd extends BaseCmd { @Inject ApiRateLimitService _apiLimitService; + @Inject + ConfigurationDao _configDao; + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -76,6 +83,10 @@ public class GetApiLimitCmd extends BaseCmd { @Override public void execute(){ + boolean apiLimitEnabled = Boolean.parseBoolean(_configDao.getValue(Config.ApiLimitEnabled.key())); + if ( !apiLimitEnabled ){ + throw new ServerApiException(ApiErrorCode.UNSUPPORTED_ACTION_ERROR, "This api is only available when api.throttling.enabled = true."); + } Account caller = UserContext.current().getCaller(); ApiLimitResponse response = _apiLimitService.searchApiLimit(caller); response.setResponseName(getCommandName()); diff --git a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/ApiRateLimitService.java b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/ApiRateLimitService.java index c5b715019b6..ad421b673cb 100644 --- a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/ApiRateLimitService.java +++ b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/ApiRateLimitService.java @@ -22,7 +22,6 @@ import com.cloud.utils.component.PluggableService; /** * Provide API rate limit service - * @author minc * */ public interface ApiRateLimitService extends PluggableService{ @@ -34,4 +33,6 @@ public interface ApiRateLimitService extends PluggableService{ public void setTimeToLive(int timeToLive); public void setMaxAllowed(int max); + + public void setEnabled(boolean enabled); } diff --git a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/ApiRateLimitServiceImpl.java b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/ApiRateLimitServiceImpl.java index 1f84ca18bbb..7d1b43ae6d5 100644 --- a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/ApiRateLimitServiceImpl.java +++ b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/ApiRateLimitServiceImpl.java @@ -49,6 +49,11 @@ import org.springframework.stereotype.Component; public class ApiRateLimitServiceImpl extends AdapterBase implements APIChecker, ApiRateLimitService { private static final Logger s_logger = Logger.getLogger(ApiRateLimitServiceImpl.class); + /** + * True if api rate limiting is enabled + */ + private boolean enabled = false; + /** * Fixed time duration where api rate limit is set, in seconds */ @@ -73,6 +78,10 @@ public class ApiRateLimitServiceImpl extends AdapterBase implements APIChecker, if (_store == null) { // get global configured duration and max values + String isEnabled = _configDao.getValue(Config.ApiLimitEnabled.key()); + if ( isEnabled != null ){ + enabled = Boolean.parseBoolean(isEnabled); + } String duration = _configDao.getValue(Config.ApiLimitInterval.key()); if (duration != null) { timeToLive = Integer.parseInt(duration); @@ -139,7 +148,11 @@ public class ApiRateLimitServiceImpl extends AdapterBase implements APIChecker, @Override - public boolean checkAccess(User user, String apiCommandName) throws PermissionDeniedException, RequestLimitException { + public boolean checkAccess(User user, String apiCommandName) throws PermissionDeniedException { + // check if api rate limiting is enabled or not + if (!enabled){ + return true; + } Long accountId = user.getAccountId(); Account account = _accountService.getAccount(accountId); if ( _accountService.isRootAdmin(account.getType())){ @@ -192,5 +205,11 @@ public class ApiRateLimitServiceImpl extends AdapterBase implements APIChecker, } + @Override + public void setEnabled(boolean enabled) { + this.enabled = enabled; + + } + } diff --git a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/EhcacheLimitStore.java b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/EhcacheLimitStore.java index 659cf81b0e6..ee7c528bd07 100644 --- a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/EhcacheLimitStore.java +++ b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/EhcacheLimitStore.java @@ -23,7 +23,6 @@ import net.sf.ehcache.constructs.blocking.LockTimeoutException; /** * A Limit store implementation using Ehcache. - * @author minc * */ public class EhcacheLimitStore implements LimitStore { diff --git a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/LimitStore.java b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/LimitStore.java index a5e086b3029..373d9652ee9 100644 --- a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/LimitStore.java +++ b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/LimitStore.java @@ -20,7 +20,6 @@ import com.cloud.user.Account; /** * Interface to define how an api limit store should work. - * @author minc * */ public interface LimitStore { diff --git a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/StoreEntry.java b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/StoreEntry.java index 76e8a2d9281..05a7029dcb0 100644 --- a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/StoreEntry.java +++ b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/StoreEntry.java @@ -18,7 +18,6 @@ package org.apache.cloudstack.ratelimit; /** * Interface for each entry in LimitStore. - * @author minc * */ public interface StoreEntry { diff --git a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/StoreEntryImpl.java b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/StoreEntryImpl.java index e8143e52370..9f10fe68a41 100644 --- a/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/StoreEntryImpl.java +++ b/plugins/api/rate-limit/src/org/apache/cloudstack/ratelimit/StoreEntryImpl.java @@ -20,7 +20,6 @@ import java.util.concurrent.atomic.AtomicInteger; /** * Implementation of limit store entry. - * @author minc * */ public class StoreEntryImpl implements StoreEntry { diff --git a/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/ApiRateLimitTest.java b/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/ApiRateLimitTest.java index 1a77a4ef3a6..3c6cadfc33c 100644 --- a/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/ApiRateLimitTest.java +++ b/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/ApiRateLimitTest.java @@ -55,6 +55,7 @@ public class ApiRateLimitTest { when(_configDao.getValue(Config.ApiLimitInterval.key())).thenReturn(null); when(_configDao.getValue(Config.ApiLimitMax.key())).thenReturn(null); when(_configDao.getValue(Config.ApiLimitCacheSize.key())).thenReturn(null); + when(_configDao.getValue(Config.ApiLimitEnabled.key())).thenReturn("true"); // enable api rate limiting _limitService._configDao = _configDao; _limitService.configure("ApiRateLimitTest", Collections. emptyMap()); @@ -106,6 +107,8 @@ public class ApiRateLimitTest { + " accesses take less than a second to perform", isUnderLimit(key)); } + + @Test public void canDoReasonableNumberOfApiAccessPerSecond() throws Exception { int allowedRequests = 200; @@ -232,4 +235,26 @@ public class ApiRateLimitTest { } + @Test + public void disableApiLimit() throws Exception { + try { + int allowedRequests = 200; + _limitService.setMaxAllowed(allowedRequests); + _limitService.setTimeToLive(1); + _limitService.setEnabled(false); + + User key = createFakeUser(); + + for (int i = 0; i < allowedRequests + 1; i++) { + assertTrue("We should allow more than " + allowedRequests + " requests per second when api throttling is disabled.", + isUnderLimit(key)); + } + } finally { + _limitService.setEnabled(true); // enable api throttling to avoid + // impacting other testcases + } + + } + + } diff --git a/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/APITest.java b/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/APITest.java index 7701b1515b0..e75e852f0b7 100644 --- a/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/APITest.java +++ b/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/APITest.java @@ -37,8 +37,6 @@ import com.google.gson.Gson; /** * Base class for API Test * - * @author Min Chen - * */ public abstract class APITest { diff --git a/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/LoginResponse.java b/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/LoginResponse.java index 719f39c0a5e..61a178033af 100644 --- a/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/LoginResponse.java +++ b/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/LoginResponse.java @@ -24,8 +24,6 @@ import com.google.gson.annotations.SerializedName; /** * Login Response object * - * @author Min Chen - * */ public class LoginResponse extends BaseResponse { diff --git a/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/RateLimitIntegrationTest.java b/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/RateLimitIntegrationTest.java index 72d354c6c77..f9352333d12 100644 --- a/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/RateLimitIntegrationTest.java +++ b/plugins/api/rate-limit/test/org/apache/cloudstack/ratelimit/integration/RateLimitIntegrationTest.java @@ -34,9 +34,6 @@ import com.cloud.utils.exception.CloudRuntimeException; /** * Test fixture to do integration rate limit test. * Currently we commented out this test suite since it requires a real MS and Db running. - * - * @author Min Chen - * */ public class RateLimitIntegrationTest extends APITest { diff --git a/plugins/deployment-planners/user-concentrated-pod/pom.xml b/plugins/deployment-planners/user-concentrated-pod/pom.xml index 78829356170..df7c660630e 100644 --- a/plugins/deployment-planners/user-concentrated-pod/pom.xml +++ b/plugins/deployment-planners/user-concentrated-pod/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/deployment-planners/user-dispersing/pom.xml b/plugins/deployment-planners/user-dispersing/pom.xml index 33f6582e72f..0e5dbd58eb6 100644 --- a/plugins/deployment-planners/user-dispersing/pom.xml +++ b/plugins/deployment-planners/user-dispersing/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/event-bus/rabbitmq/pom.xml b/plugins/event-bus/rabbitmq/pom.xml index 6a47983a9b5..bd4d0977c04 100644 --- a/plugins/event-bus/rabbitmq/pom.xml +++ b/plugins/event-bus/rabbitmq/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/event-bus/rabbitmq/src/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java b/plugins/event-bus/rabbitmq/src/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java index ce0930d115d..1c0c6bef6f2 100644 --- a/plugins/event-bus/rabbitmq/src/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java +++ b/plugins/event-bus/rabbitmq/src/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java @@ -40,13 +40,17 @@ import java.util.concurrent.Executors; public class RabbitMQEventBus extends ManagerBase implements EventBus { // details of AMQP server - private static String _amqpHost; - private static Integer _port; - private static String _username; - private static String _password; + private static String amqpHost; + private static Integer port; + private static String username; + private static String password; // AMQP exchange name where all CloudStack events will be published - private static String _amqpExchangeName; + private static String amqpExchangeName; + + private String name; + + private static Integer retryInterval; // hashmap to book keep the registered subscribers private static ConcurrentHashMap> _subscribers; @@ -58,59 +62,76 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { private static boolean _autoAck = true; private ExecutorService executorService; - private String _name; private static DisconnectHandler disconnectHandler; - private static Integer _retryInterval; private static final Logger s_logger = Logger.getLogger(RabbitMQEventBus.class); @Override public boolean configure(String name, Map params) throws ConfigurationException { - _amqpHost = (String) params.get("server"); - if (_amqpHost == null || _amqpHost.isEmpty()) { - throw new ConfigurationException("Unable to get the AMQP server details"); - } - - _username = (String) params.get("username"); - if (_username == null || _username.isEmpty()) { - throw new ConfigurationException("Unable to get the username details"); - } - - _password = (String) params.get("password"); - if (_password == null || _password.isEmpty()) { - throw new ConfigurationException("Unable to get the password details"); - } - - _amqpExchangeName = (String) params.get("exchangename"); - if (_amqpExchangeName == null || _amqpExchangeName.isEmpty()) { - throw new ConfigurationException("Unable to get the _exchange details on the AMQP server"); - } - try { - String portStr = (String) params.get("port"); - if (portStr == null || portStr.isEmpty()) { + if (amqpHost == null || amqpHost.isEmpty()) { + throw new ConfigurationException("Unable to get the AMQP server details"); + } + + if (username == null || username.isEmpty()) { + throw new ConfigurationException("Unable to get the username details"); + } + + if (password == null || password.isEmpty()) { + throw new ConfigurationException("Unable to get the password details"); + } + + if (amqpExchangeName == null || amqpExchangeName.isEmpty()) { + throw new ConfigurationException("Unable to get the _exchange details on the AMQP server"); + } + + if (port == null) { throw new ConfigurationException("Unable to get the port details of AMQP server"); } - _port = Integer.parseInt(portStr); - String retryIntervalStr = (String) params.get("retryinterval"); - if (retryIntervalStr == null || retryIntervalStr.isEmpty()) { - // default to 10s to try out reconnect - retryIntervalStr = "10000"; + if (retryInterval == null) { + retryInterval = 10000;// default to 10s to try out reconnect } - _retryInterval = Integer.parseInt(retryIntervalStr); + } catch (NumberFormatException e) { throw new ConfigurationException("Invalid port number/retry interval"); } _subscribers = new ConcurrentHashMap>(); - executorService = Executors.newCachedThreadPool(); disconnectHandler = new DisconnectHandler(); - _name = name; + return true; } + public void setServer(String amqpHost) { + this.amqpHost = amqpHost; + } + + public void setUsername(String username) { + this.username = username; + } + + public void setPassword(String password) { + this.password = password; + } + + public void setPort(Integer port) { + this.port = port; + } + + public void setName(String name) { + this.name = name; + } + + public void setExchange(String exchange) { + this.amqpExchangeName = exchange; + } + + public void setRetryInterval(Integer retryInterval) { + this.retryInterval = retryInterval; + } + /** Call to subscribe to interested set of events * * @param topic defines category and type of the events being subscribed to @@ -141,9 +162,9 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { Channel channel = createChannel(connection); // create a queue and bind it to the exchange with binding key formed from event topic - createExchange(channel, _amqpExchangeName); + createExchange(channel, amqpExchangeName); channel.queueDeclare(queueName, false, false, false, null); - channel.queueBind(queueName, _amqpExchangeName, bindingKey); + channel.queueBind(queueName, amqpExchangeName, bindingKey); // register a callback handler to receive the events that a subscriber subscribed to channel.basicConsume(queueName, _autoAck, queueName, @@ -216,8 +237,8 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { try { Connection connection = getConnection(); Channel channel = createChannel(connection); - createExchange(channel, _amqpExchangeName); - publishEventToExchange(channel, _amqpExchangeName, routingKey, eventDescription); + createExchange(channel, amqpExchangeName); + publishEventToExchange(channel, amqpExchangeName, routingKey, eventDescription); channel.close(); } catch (AlreadyClosedException e) { closeConnection(); @@ -315,11 +336,11 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { private synchronized Connection createConnection() throws Exception { try { ConnectionFactory factory = new ConnectionFactory(); - factory.setUsername(_username); - factory.setPassword(_password); + factory.setUsername(username); + factory.setPassword(password); factory.setVirtualHost("/"); - factory.setHost(_amqpHost); - factory.setPort(_port); + factory.setHost(amqpHost); + factory.setPort(port); Connection connection = factory.newConnection(); connection.addShutdownListener(disconnectHandler); _connection = connection; @@ -481,7 +502,7 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { while (!connected) { try { - Thread.sleep(_retryInterval); + Thread.sleep(retryInterval); } catch (InterruptedException ie) { // ignore timer interrupts } @@ -504,9 +525,9 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { * with binding key formed from event topic */ Channel channel = createChannel(connection); - createExchange(channel, _amqpExchangeName); + createExchange(channel, amqpExchangeName); channel.queueDeclare(subscriberId, false, false, false, null); - channel.queueBind(subscriberId, _amqpExchangeName, bindingKey); + channel.queueBind(subscriberId, amqpExchangeName, bindingKey); // register a callback handler to receive the events that a subscriber subscribed to channel.basicConsume(subscriberId, _autoAck, subscriberId, diff --git a/plugins/file-systems/netapp/pom.xml b/plugins/file-systems/netapp/pom.xml index e1c8866d15d..0e6f427da36 100644 --- a/plugins/file-systems/netapp/pom.xml +++ b/plugins/file-systems/netapp/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/host-allocators/random/pom.xml b/plugins/host-allocators/random/pom.xml index ba7e1ae1e65..6fc76fe8dad 100644 --- a/plugins/host-allocators/random/pom.xml +++ b/plugins/host-allocators/random/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/hypervisors/baremetal/pom.xml b/plugins/hypervisors/baremetal/pom.xml index 600eedb1440..328bd963c91 100755 --- a/plugins/hypervisors/baremetal/pom.xml +++ b/plugins/hypervisors/baremetal/pom.xml @@ -21,7 +21,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml cloud-plugin-hypervisor-baremetal diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalCmdbVO.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalCmdbVO.java deleted file mode 100755 index ee3848a5e9d..00000000000 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalCmdbVO.java +++ /dev/null @@ -1,104 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -// -// Automatically generated by addcopyright.py at 01/29/2013 -package com.cloud.baremetal.database; - -import java.util.UUID; - -import javax.persistence.Column; -import javax.persistence.Entity; -import javax.persistence.GeneratedValue; -import javax.persistence.GenerationType; -import javax.persistence.Id; -import javax.persistence.Table; - -@Entity -@Table(name="baremetal_cmdb") -public class BaremetalCmdbVO { - @Id - @GeneratedValue(strategy = GenerationType.IDENTITY) - @Column(name = "id") - private long id; - - @Column(name="uuid") - private String uuid; - - @Column(name="zone_id") - private long zoneId; - - @Column(name="url") - private String url; - - @Column(name="password") - private String password; - - @Column(name="username") - private String username; - - public BaremetalCmdbVO() { - uuid = UUID.randomUUID().toString(); - } - - public long getId() { - return id; - } - - public void setId(long id) { - this.id = id; - } - - public String getUuid() { - return uuid; - } - - public void setUuid(String uuid) { - this.uuid = uuid; - } - - public long getZoneId() { - return zoneId; - } - - public void setZoneId(long zoneId) { - this.zoneId = zoneId; - } - - public String getUrl() { - return url; - } - - public void setUrl(String url) { - this.url = url; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } -} diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/AddBaremetalHostCmd.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/AddBaremetalHostCmd.java index 5222d103699..f07b212173f 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/AddBaremetalHostCmd.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/AddBaremetalHostCmd.java @@ -18,17 +18,24 @@ // Automatically generated by addcopyright.py at 01/29/2013 package com.cloud.baremetal.manager; +import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.command.admin.host.AddHostCmd; - +import org.apache.cloudstack.api.response.HostResponse; +@APICommand(name="addBaremetalHost", description="add a baremetal host", responseObject = HostResponse.class) public class AddBaremetalHostCmd extends AddHostCmd { @Parameter(name=ApiConstants.IP_ADDRESS, type=CommandType.STRING, description="ip address intentionally allocated to this host after provisioning") private String vmIpAddress; public AddBaremetalHostCmd() { + } + + @Override + public void execute(){ this.getFullUrlParams().put(ApiConstants.BAREMETAL_DISCOVER_NAME, BareMetalDiscoverer.class.getName()); + super.execute(); } public String getVmIpAddress() { diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalDiscoverer.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalDiscoverer.java index 9b0a5104889..28c83753c09 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalDiscoverer.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalDiscoverer.java @@ -276,5 +276,13 @@ public class BareMetalDiscoverer extends DiscovererBase implements Discoverer, R return new DeleteHostAnswer(true); } + + @Override + protected HashMap buildConfigParams(HostVO host) { + HashMap params = super.buildConfigParams(host); + params.put("hostId", host.getId()); + params.put("ipaddress", host.getPrivateIpAddress()); + return params; + } } diff --git a/server/src/com/cloud/deploy/BareMetalPlanner.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalPlanner.java similarity index 85% rename from server/src/com/cloud/deploy/BareMetalPlanner.java rename to plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalPlanner.java index 829a4662e12..97b2840f419 100755 --- a/server/src/com/cloud/deploy/BareMetalPlanner.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BareMetalPlanner.java @@ -14,7 +14,7 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package com.cloud.deploy; +package com.cloud.baremetal.manager; import java.util.List; import java.util.Map; @@ -23,17 +23,20 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.*; +import com.cloud.dc.ClusterDetailsDao; import org.apache.log4j.Logger; import com.cloud.capacity.CapacityManager; import com.cloud.configuration.Config; import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.dc.ClusterVO; -import com.cloud.dc.DataCenter; -import com.cloud.dc.Pod; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; +import com.cloud.deploy.DeployDestination; +import com.cloud.deploy.DeploymentPlan; +import com.cloud.deploy.DeploymentPlanner; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.host.Host; import com.cloud.host.HostVO; @@ -57,16 +60,14 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { @Inject protected ConfigurationDao _configDao; @Inject protected CapacityManager _capacityMgr; @Inject protected ResourceManager _resourceMgr; + @Inject protected ClusterDetailsDao _clusterDetailsDao; @Override public DeployDestination plan(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid) throws InsufficientServerCapacityException { VirtualMachine vm = vmProfile.getVirtualMachine(); - ServiceOffering offering = vmProfile.getServiceOffering(); + ServiceOffering offering = vmProfile.getServiceOffering(); String hostTag = null; - - String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); - float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1); - + String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag); if (vm.getLastHostId() != null && haVmTag == null) { @@ -122,7 +123,13 @@ public class BareMetalPlanner extends AdapterBase implements DeploymentPlanner { return null; } for (HostVO h : hosts) { - if (_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false, cpuOverprovisioningFactor, true)) { + long cluster_id = h.getClusterId(); + ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,"cpuOvercommitRatio") ; + ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio"); + Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); + Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); + + if (_capacityMgr.checkIfHostHasCapacity(h.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true)) { s_logger.debug("Find host " + h.getId() + " has enough capacity"); DataCenter dc = _dcDao.findById(h.getDataCenterId()); Pod pod = _podDao.findById(h.getPodId()); diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalManager.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalManager.java index 1599050453a..6467c945795 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalManager.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalManager.java @@ -20,8 +20,9 @@ package com.cloud.baremetal.manager; import com.cloud.network.Network.Provider; import com.cloud.utils.component.Manager; +import com.cloud.utils.component.PluggableService; -public interface BaremetalManager extends Manager { +public interface BaremetalManager extends Manager, PluggableService { public static final String EchoSecurityGroupAgent = "EchoSecurityGroupAgent"; public static final String ExternalBaremetalSystemUrl = "ExternalBaremetalSystemUrl"; public static final String DO_PXE = "doPxe"; diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalManagerImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalManagerImpl.java index b07a6bbf273..b41d6ca0426 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalManagerImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalManagerImpl.java @@ -18,6 +18,8 @@ // Automatically generated by addcopyright.py at 01/29/2013 package com.cloud.baremetal.manager; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import javax.ejb.Local; @@ -110,4 +112,11 @@ public class BaremetalManagerImpl extends ManagerBase implements BaremetalManage return true; } + + @Override + public List> getCommands() { + List> cmds = new ArrayList>(); + cmds.add(AddBaremetalHostCmd.class); + return cmds; + } } diff --git a/server/src/com/cloud/vm/dao/RandomlyIncreasingVMInstanceDaoImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalPlannerSelector.java old mode 100644 new mode 100755 similarity index 57% rename from server/src/com/cloud/vm/dao/RandomlyIncreasingVMInstanceDaoImpl.java rename to plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalPlannerSelector.java index cc5c5368a81..9daee3f3fe7 --- a/server/src/com/cloud/vm/dao/RandomlyIncreasingVMInstanceDaoImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/manager/BaremetalPlannerSelector.java @@ -1,36 +1,39 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.vm.dao; - - -import javax.ejb.Local; - -import org.apache.log4j.Logger; -import org.springframework.context.annotation.Primary; -import org.springframework.stereotype.Component; - -@Local(value = { UserVmDao.class }) -public class RandomlyIncreasingVMInstanceDaoImpl extends UserVmDaoImpl { - - public static final Logger s_logger = Logger.getLogger(RandomlyIncreasingVMInstanceDaoImpl.class); - - @Override - public K getNextInSequence(final Class clazz, final String name) { - return getRandomlyIncreasingNextInSequence(clazz, name); - } - -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.baremetal.manager; + +import java.util.Map; + +import javax.ejb.Local; +import javax.naming.ConfigurationException; + +import com.cloud.deploy.AbstractDeployPlannerSelector; +import com.cloud.deploy.DeployPlannerSelector; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.vm.UserVmVO; +@Local(value = {DeployPlannerSelector.class}) +public class BaremetalPlannerSelector extends AbstractDeployPlannerSelector{ + + @Override + public String selectPlanner(UserVmVO vm) { + if (vm.getHypervisorType() == HypervisorType.BareMetal) { + return "BareMetalPlanner"; + } + return null; + } + +} diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/AddBaremetalDhcpCmd.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/AddBaremetalDhcpCmd.java index 8a3d4d74191..c74983222f2 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/AddBaremetalDhcpCmd.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/AddBaremetalDhcpCmd.java @@ -20,11 +20,14 @@ package com.cloud.baremetal.networkservice; import javax.inject.Inject; +import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseAsyncCmd; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.BaseCmd.CommandType; +import org.apache.cloudstack.api.response.PhysicalNetworkResponse; +import org.apache.cloudstack.api.response.PodResponse; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.log4j.Logger; @@ -37,7 +40,7 @@ import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.user.UserContext; - +@APICommand(name="addBaremetalDhcp", description="adds a baremetal dhcp server", responseObject = BaremetalDhcpResponse.class) public class AddBaremetalDhcpCmd extends BaseAsyncCmd { private static final String s_name = "addexternaldhcpresponse"; public static final Logger s_logger = Logger.getLogger(AddBaremetalDhcpCmd.class); @@ -47,10 +50,10 @@ public class AddBaremetalDhcpCmd extends BaseAsyncCmd { ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - @Parameter(name=ApiConstants.PHYSICAL_NETWORK_ID, type=CommandType.LONG, required=true, description="the Physical Network ID") + @Parameter(name=ApiConstants.PHYSICAL_NETWORK_ID, type=CommandType.UUID, entityType=PhysicalNetworkResponse.class, required=true, description="the Physical Network ID") private Long physicalNetworkId; - @Parameter(name=ApiConstants.POD_ID, type=CommandType.LONG, required = true, description="Pod Id") + @Parameter(name=ApiConstants.POD_ID, type=CommandType.UUID, entityType=PodResponse.class, required = true, description="Pod Id") private Long podId; @Parameter(name=ApiConstants.DHCP_SERVER_TYPE, type=CommandType.STRING, required = true, description="Type of dhcp device") diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/AddBaremetalKickStartPxeCmd.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/AddBaremetalKickStartPxeCmd.java index 4c3d0b22576..596a86dac8f 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/AddBaremetalKickStartPxeCmd.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/AddBaremetalKickStartPxeCmd.java @@ -18,10 +18,11 @@ // Automatically generated by addcopyright.py at 01/29/2013 package com.cloud.baremetal.networkservice; +import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseCmd.CommandType; import org.apache.cloudstack.api.Parameter; - +@APICommand(name="addBaremetalPxeKickStartServer", description="add a baremetal pxe server", responseObject = BaremetalPxeKickStartResponse.class) public class AddBaremetalKickStartPxeCmd extends AddBaremetalPxeCmd { @Parameter(name=ApiConstants.TFTP_DIR, type=CommandType.STRING, required = true, description="Tftp root directory of PXE server") private String tftpDir; diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/AddBaremetalPxeCmd.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/AddBaremetalPxeCmd.java index cd8da4a58b9..63e11478e4c 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/AddBaremetalPxeCmd.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/AddBaremetalPxeCmd.java @@ -20,11 +20,14 @@ package com.cloud.baremetal.networkservice; import javax.inject.Inject; +import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseAsyncCmd; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.BaseCmd.CommandType; +import org.apache.cloudstack.api.response.PhysicalNetworkResponse; +import org.apache.cloudstack.api.response.PodResponse; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.log4j.Logger; @@ -37,7 +40,6 @@ import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.user.UserContext; - public class AddBaremetalPxeCmd extends BaseAsyncCmd { private static final String s_name = "addexternalpxeresponse"; public static final Logger s_logger = Logger.getLogger(AddBaremetalPxeCmd.class); @@ -46,10 +48,10 @@ public class AddBaremetalPxeCmd extends BaseAsyncCmd { ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - @Parameter(name=ApiConstants.PHYSICAL_NETWORK_ID, type=CommandType.LONG, required=true, description="the Physical Network ID") + @Parameter(name=ApiConstants.PHYSICAL_NETWORK_ID, type=CommandType.UUID, entityType=PhysicalNetworkResponse.class, required=true, description="the Physical Network ID") private Long physicalNetworkId; - @Parameter(name=ApiConstants.POD_ID, type=CommandType.LONG, description="Pod Id") + @Parameter(name=ApiConstants.POD_ID, type=CommandType.UUID, entityType=PodResponse.class, description="Pod Id") private Long podId; @Parameter(name=ApiConstants.URL, type=CommandType.STRING, required = true, description="URL of the external pxe device") diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/AddBaremetalPxePingServerCmd.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/AddBaremetalPxePingServerCmd.java index 70796f3499f..01cafd435da 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/AddBaremetalPxePingServerCmd.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/AddBaremetalPxePingServerCmd.java @@ -18,9 +18,11 @@ // Automatically generated by addcopyright.py at 01/29/2013 package com.cloud.baremetal.networkservice; +import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.Parameter; +@APICommand(name="addBaremetalPxePingServer", description="add a baremetal ping pxe server", responseObject = BaremetalPxePingResponse.class) public class AddBaremetalPxePingServerCmd extends AddBaremetalPxeCmd { @Parameter(name=ApiConstants.PING_STORAGE_SERVER_IP, type=CommandType.STRING, required = true, description="PING storage server ip") diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalPingServiceImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalPingServiceImpl.java index 3e21750132e..d6b96a81807 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalPingServiceImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalPingServiceImpl.java @@ -297,4 +297,10 @@ public class BareMetalPingServiceImpl extends BareMetalPxeServiceBase implements } return responses; } + + + @Override + public String getPxeServiceType() { + return BaremetalPxeManager.BaremetalPxeType.PING.toString(); + } } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpManagerImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpManagerImpl.java index af9e103f77b..f87bf71ca13 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpManagerImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpManagerImpl.java @@ -319,6 +319,9 @@ public class BaremetalDhcpManagerImpl extends ManagerBase implements BaremetalDh @Override public List> getCommands() { - return null; + List> cmds = new ArrayList>(); + cmds.add(AddBaremetalDhcpCmd.class); + cmds.add(ListBaremetalDhcpCmd.class); + return cmds; } } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpResponse.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpResponse.java index 952ac41a701..1875d3947a0 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpResponse.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpResponse.java @@ -18,12 +18,16 @@ // Automatically generated by addcopyright.py at 01/29/2013 package com.cloud.baremetal.networkservice; +import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; +import com.cloud.baremetal.database.BaremetalDhcpVO; import com.cloud.serializer.Param; import com.google.gson.annotations.SerializedName; +@EntityReference(value=BaremetalDhcpVO.class) public class BaremetalDhcpResponse extends BaseResponse { @SerializedName(ApiConstants.ID) @Param(description="device id of ") private String id; diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java index 938b3ac1d46..58c6e862d9f 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartPxeResource.java @@ -32,6 +32,7 @@ import com.cloud.agent.api.Command; import com.cloud.agent.api.PingCommand; import com.cloud.agent.api.PingRoutingCommand; import com.cloud.agent.api.routing.VmDataCommand; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; import com.cloud.utils.ssh.SSHCmdHelper; import com.cloud.vm.VirtualMachine.State; @@ -70,27 +71,27 @@ public class BaremetalKickStartPxeResource extends BaremetalPxeResourceBase { String prepareScript = "scripts/network/ping/prepare_kickstart_bootfile.py"; String prepareScriptPath = Script.findScript("", prepareScript); if (prepareScriptPath == null) { - throw new ConfigurationException("Can not find prepare_kickstart_bootfile.py at " + prepareScriptPath); + throw new ConfigurationException("Can not find prepare_kickstart_bootfile.py at " + prepareScript); } scp.put(prepareScriptPath, "/usr/bin/", "0755"); String cpScript = "scripts/network/ping/prepare_kickstart_kernel_initrd.py"; String cpScriptPath = Script.findScript("", cpScript); if (cpScriptPath == null) { - throw new ConfigurationException("Can not find prepare_kickstart_kernel_initrd.py at " + cpScriptPath); + throw new ConfigurationException("Can not find prepare_kickstart_kernel_initrd.py at " + cpScript); } scp.put(cpScriptPath, "/usr/bin/", "0755"); String userDataScript = "scripts/network/ping/baremetal_user_data.py"; String userDataScriptPath = Script.findScript("", userDataScript); if (userDataScriptPath == null) { - throw new ConfigurationException("Can not find baremetal_user_data.py at " + userDataScriptPath); + throw new ConfigurationException("Can not find baremetal_user_data.py at " + userDataScript); } scp.put(userDataScriptPath, "/usr/bin/", "0755"); return true; } catch (Exception e) { - throw new ConfigurationException(e.getMessage()); + throw new CloudRuntimeException(e); } finally { if (sshConnection != null) { sshConnection.close(); @@ -174,7 +175,7 @@ public class BaremetalKickStartPxeResource extends BaremetalPxeResourceBase { } String copyTo = String.format("%s/%s", _tftpDir, cmd.getTemplateUuid()); - String script = String.format("python /usr/bin/prepare_kickstart_kernel_initrd.py %s %s", cmd.getRepo(), copyTo); + String script = String.format("python /usr/bin/prepare_kickstart_kernel_initrd.py %s %s %s", cmd.getKernel(), cmd.getInitrd(), copyTo); if (!SSHCmdHelper.sshExecuteCmd(sshConnection, script)) { return new Answer(cmd, false, "prepare kickstart at pxe server " + _ip + " failed, command:" + script); diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java index 617893fc16c..8a5ac78729e 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java @@ -95,11 +95,36 @@ public class BaremetalKickStartServiceImpl extends BareMetalPxeServiceBase imple try { String tpl = profile.getTemplate().getUrl(); assert tpl != null : "How can a null template get here!!!"; - String[] tpls = tpl.split(";"); - assert tpls.length == 2 : "Template is not correctly encoded. " + tpl; + String[] tpls = tpl.split(";"); + CloudRuntimeException err = new CloudRuntimeException(String.format("template url[%s] is not correctly encoded. it must be in format of ks=http_link_to_kickstartfile;kernel=nfs_path_to_pxe_kernel;initrd=nfs_path_to_pxe_initrd", tpl)); + if (tpls.length != 3) { + throw err; + } + + String ks = null; + String kernel = null; + String initrd = null; + + for (String t : tpls) { + String[] kv = t.split("="); + if (kv.length != 2) { + throw err; + } + if (kv[0].equals("ks")) { + ks = kv[1]; + } else if (kv[0].equals("kernel")) { + kernel = kv[1]; + } else if (kv[0].equals("initrd")) { + initrd = kv[1]; + } else { + throw err; + } + } + PrepareKickstartPxeServerCommand cmd = new PrepareKickstartPxeServerCommand(); - cmd.setKsFile(tpls[0]); - cmd.setRepo(tpls[1]); + cmd.setKsFile(ks); + cmd.setInitrd(initrd); + cmd.setKernel(kernel); cmd.setMac(nic.getMacAddress()); cmd.setTemplateUuid(template.getUuid()); Answer aws = _agentMgr.send(pxeVo.getHostId(), cmd); @@ -233,6 +258,11 @@ public class BaremetalKickStartServiceImpl extends BareMetalPxeServiceBase imple responses.add(getApiResponse(vo)); } return responses; + } + + @Override + public String getPxeServiceType() { + return BaremetalPxeManager.BaremetalPxeType.KICK_START.toString(); } } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeElement.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeElement.java index bc4bcd3d53a..7b8d528b4b4 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeElement.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeElement.java @@ -166,13 +166,11 @@ public class BaremetalPxeElement extends AdapterBase implements NetworkElement { @Override public boolean destroy(Network network, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - // TODO Auto-generated method stub - return false; + return true; } @Override public boolean verifyServicesCombination(Set services) { - // TODO Auto-generated method stub - return false; + return true; } } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeKickStartResponse.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeKickStartResponse.java index 09c6cc6769d..64f22e0947e 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeKickStartResponse.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeKickStartResponse.java @@ -19,10 +19,13 @@ package com.cloud.baremetal.networkservice; import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.EntityReference; +import com.cloud.baremetal.database.BaremetalPxeVO; import com.cloud.serializer.Param; import com.google.gson.annotations.SerializedName; - + +@EntityReference(value=BaremetalPxeVO.class) public class BaremetalPxeKickStartResponse extends BaremetalPxeResponse { @SerializedName(ApiConstants.TFTP_DIR) @Param(description="Tftp root directory of PXE server") private String tftpDir; diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeManagerImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeManagerImpl.java index 6e3963def53..6288f918567 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeManagerImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeManagerImpl.java @@ -23,6 +23,7 @@ package com.cloud.baremetal.networkservice; +import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -53,7 +54,6 @@ import com.cloud.resource.UnableDeleteHostException; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.uservm.UserVm; import com.cloud.utils.StringUtils; -import com.cloud.utils.component.AdapterBase; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.SearchCriteria2; @@ -106,12 +106,13 @@ public class BaremetalPxeManagerImpl extends ManagerBase implements BaremetalPxe } protected BaremetalPxeService getServiceByType(String type) { - BaremetalPxeService _service; - _service = AdapterBase.getAdapterByName(_services, type); - if (_service == null) { - throw new CloudRuntimeException("Cannot find PXE service for " + type); - } - return _service; + for (BaremetalPxeService service : _services) { + if (service.getPxeServiceType().equals(type)) { + return service; + } + } + + throw new CloudRuntimeException("Cannot find PXE service for " + type); } @Override @@ -236,7 +237,10 @@ public class BaremetalPxeManagerImpl extends ManagerBase implements BaremetalPxe @Override public List> getCommands() { - // TODO Auto-generated method stub - return null; + List> cmds = new ArrayList>(); + cmds.add(AddBaremetalKickStartPxeCmd.class); + cmds.add(AddBaremetalPxePingServerCmd.class); + cmds.add(ListBaremetalPxePingServersCmd.class); + return cmds; } } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxePingResponse.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxePingResponse.java index adbf0530e00..81bd2511355 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxePingResponse.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxePingResponse.java @@ -19,10 +19,12 @@ package com.cloud.baremetal.networkservice; import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.EntityReference; +import com.cloud.baremetal.database.BaremetalPxeVO; import com.cloud.serializer.Param; import com.google.gson.annotations.SerializedName; - +@EntityReference(value=BaremetalPxeVO.class) public class BaremetalPxePingResponse extends BaremetalPxeResponse { @SerializedName(ApiConstants.PING_STORAGE_SERVER_IP) @Param(description="PING storage server ip") private String pingStorageServerIp; diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeService.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeService.java old mode 100644 new mode 100755 index 8504f82a86f..9fd560f2bc5 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeService.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeService.java @@ -46,6 +46,8 @@ public interface BaremetalPxeService extends Adapter { List listPxeServers(ListBaremetalPxePingServersCmd cmd); + String getPxeServiceType(); + public static final String PXE_PARAM_TYPE = "type"; public static final String PXE_PARAM_ZONE = "zone"; public static final String PXE_PARAM_POD = "pod"; diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalUserdataElement.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalUserdataElement.java index ae582544323..3d9f5819582 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalUserdataElement.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalUserdataElement.java @@ -149,7 +149,7 @@ public class BaremetalUserdataElement extends AdapterBase implements NetworkElem @Override public boolean canEnableIndividualServices() { // TODO Auto-generated method stub - return false; + return true; } @@ -162,14 +162,12 @@ public class BaremetalUserdataElement extends AdapterBase implements NetworkElem @Override public boolean destroy(Network network, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - // TODO Auto-generated method stub - return false; + return true; } @Override public boolean verifyServicesCombination(Set services) { - // TODO Auto-generated method stub - return false; + return true; } } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/ListBaremetalDhcpCmd.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/ListBaremetalDhcpCmd.java index 14b74339fbf..1dc46a8f044 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/ListBaremetalDhcpCmd.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/ListBaremetalDhcpCmd.java @@ -22,6 +22,7 @@ import java.util.List; import javax.inject.Inject; +import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseCmd; @@ -37,7 +38,7 @@ import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; - +@APICommand(name="listBaremetalDhcp", description="list baremetal dhcp servers", responseObject = BaremetalDhcpResponse.class) public class ListBaremetalDhcpCmd extends BaseListCmd { private static final Logger s_logger = Logger.getLogger(ListBaremetalDhcpCmd.class); private static final String s_name = "listexternaldhcpresponse"; diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/ListBaremetalPxePingServersCmd.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/ListBaremetalPxePingServersCmd.java index b4c569f0969..0b418f01d7a 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/ListBaremetalPxePingServersCmd.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/ListBaremetalPxePingServersCmd.java @@ -22,6 +22,7 @@ import java.util.List; import javax.inject.Inject; +import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseCmd; @@ -37,7 +38,7 @@ import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; - +@APICommand(name="listBaremetalPxePingServer", description="list baremetal ping pxe server", responseObject = BaremetalPxePingResponse.class) public class ListBaremetalPxePingServersCmd extends BaseListCmd { private static final Logger s_logger = Logger.getLogger(ListBaremetalPxePingServersCmd.class); private static final String s_name = "listpingpxeserverresponse"; diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/PrepareKickstartPxeServerCommand.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/PrepareKickstartPxeServerCommand.java index 89515475062..25dfeb70d30 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/PrepareKickstartPxeServerCommand.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/PrepareKickstartPxeServerCommand.java @@ -22,10 +22,11 @@ import com.cloud.agent.api.Command; public class PrepareKickstartPxeServerCommand extends Command { private String ksFile; - private String repo; private String templateUuid; private String mac; - private String ksDevice; + private String ksDevice; + private String kernel; + private String initrd; @Override public boolean executeInSequence() { @@ -39,15 +40,23 @@ public class PrepareKickstartPxeServerCommand extends Command { public void setKsFile(String ksFile) { this.ksFile = ksFile; } - - public String getRepo() { - return repo; - } - - public void setRepo(String repo) { - this.repo = repo; - } - + + public String getKernel() { + return kernel; + } + + public void setKernel(String kernel) { + this.kernel = kernel; + } + + public String getInitrd() { + return initrd; + } + + public void setInitrd(String initrd) { + this.initrd = initrd; + } + public String getTemplateUuid() { return templateUuid; } diff --git a/plugins/hypervisors/kvm/pom.xml b/plugins/hypervisors/kvm/pom.xml index 8fc8f739460..013a58d8b7c 100644 --- a/plugins/hypervisors/kvm/pom.xml +++ b/plugins/hypervisors/kvm/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml @@ -41,7 +41,7 @@ org.libvirt libvirt - 0.4.9 + ${cs.libvirt-java.version} @@ -50,68 +50,23 @@ test - maven-assembly-plugin - 2.3 - - kvm-agent - false - - agent-descriptor.xml - - + org.apache.maven.plugins + maven-dependency-plugin + 2.5.1 - make-agent + copy-dependencies package - single + copy-dependencies + + ${project.build.directory}/dependencies + runtime + - - maven-resources-plugin - 2.6 - - - copy-resources - - package - - copy-resources - - - dist - - - target - - kvm-agent.zip - - - - - - - - - org.apache.maven.plugins - maven-dependency-plugin - 2.5.1 - - - copy-dependencies - package - - copy-dependencies - - - target/dependencies - runtime - - - - diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/KVMHABase.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/KVMHABase.java index af89d9b18a9..d067b35902f 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/KVMHABase.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/KVMHABase.java @@ -23,6 +23,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import org.apache.log4j.Logger; import org.libvirt.LibvirtException; import org.libvirt.StoragePool; import org.libvirt.StoragePoolInfo; @@ -33,6 +34,7 @@ import com.cloud.utils.script.OutputInterpreter.AllLinesParser; import com.cloud.utils.script.Script; public class KVMHABase { + private static final Logger s_logger = Logger.getLogger(KVMHABase.class); private long _timeout = 60000; /* 1 minutes */ protected static String _heartBeatPath; protected long _heartBeatUpdateTimeout = 60000; @@ -124,14 +126,14 @@ public class KVMHABase { } poolName = pool.getName(); } catch (LibvirtException e) { - + s_logger.debug("Ignoring libvirt error.", e); } finally { try { if (pool != null) { pool.free(); } } catch (LibvirtException e) { - + s_logger.debug("Ignoring libvirt error.", e); } } diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 552afb1e665..0a25bab2a10 100755 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -41,6 +41,8 @@ import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.HashSet; import java.util.Properties; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; @@ -186,6 +188,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InputDef; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InterfaceDef; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InterfaceDef.hostNicType; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.SerialDef; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.VirtioSerialDef; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.TermPolicy; import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk.PhysicalDiskFormat; @@ -252,7 +255,7 @@ ServerResource { private String _modifyVlanPath; private String _versionstringpath; - private String _patchdomrPath; + private String _patchViaSocketPath; private String _createvmPath; private String _manageSnapshotPath; private String _resizeVolumePath; @@ -276,7 +279,11 @@ ServerResource { private String _mountPoint = "/mnt"; StorageLayer _storage; private KVMStoragePoolManager _storagePoolMgr; - private VifDriver _vifDriver; + + private VifDriver _defaultVifDriver; + private Map _trafficTypeVifDrivers; + protected static final String DEFAULT_OVS_VIF_DRIVER_CLASS_NAME = "com.cloud.hypervisor.kvm.resource.OvsVifDriver"; + protected static final String DEFAULT_BRIDGE_VIF_DRIVER_CLASS_NAME = "com.cloud.hypervisor.kvm.resource.BridgeVifDriver"; private static final class KeyValueInterpreter extends OutputInterpreter { private final Map map = new HashMap(); @@ -514,10 +521,10 @@ ServerResource { throw new ConfigurationException("Unable to find versions.sh"); } - _patchdomrPath = Script.findScript(kvmScriptsDir + "/patch/", - "rundomrpre.sh"); - if (_patchdomrPath == null) { - throw new ConfigurationException("Unable to find rundomrpre.sh"); + _patchViaSocketPath = Script.findScript(kvmScriptsDir + "/patch/", + "patchviasocket.pl"); + if (_patchViaSocketPath == null) { + throw new ConfigurationException("Unable to find patchviasocket.pl"); } _heartBeatPath = Script.findScript(kvmScriptsDir, "kvmheartbeat.sh"); @@ -685,7 +692,7 @@ ServerResource { _hvVersion = conn.getVersion(); _hvVersion = (_hvVersion % 1000000) / 1000; } catch (LibvirtException e) { - + s_logger.trace("Ignoring libvirt error.", e); } String[] info = NetUtils.getNetworkParams(_privateNic); @@ -698,10 +705,7 @@ ServerResource { _sysvmISOPath = (String) params.get("systemvm.iso.path"); if (_sysvmISOPath == null) { - String[] isoPaths = { "/usr/lib64/cloud/agent/vms/systemvm.iso", - "/usr/lib/cloud/agent/vms/systemvm.iso", - "/usr/lib64/cloud/common/vms/systemvm.iso", - "/usr/lib/cloud/common/vms/systemvm.iso" }; + String[] isoPaths = {"/usr/share/cloudstack-common/vms/systemvm.iso"}; for (String isoPath : isoPaths) { if (_storage.exists(isoPath)) { _sysvmISOPath = isoPath; @@ -760,8 +764,8 @@ ServerResource { if (tokens.length == 2) { try { _migrateSpeed = Integer.parseInt(tokens[0]); - } catch (Exception e) { - + } catch (NumberFormatException e) { + s_logger.trace("Ignoring migrateSpeed extraction error.", e); } s_logger.debug("device " + _pifs.get("public") + " has speed: " + String.valueOf(_migrateSpeed)); } @@ -778,33 +782,95 @@ ServerResource { params.put("libvirt.host.bridges", bridges); params.put("libvirt.host.pifs", _pifs); - // Load the vif driver - String vifDriverName = (String) params.get("libvirt.vif.driver"); - if (vifDriverName == null) { - if (_bridgeType == BridgeType.OPENVSWITCH) { - s_logger.info("No libvirt.vif.driver specififed. Defaults to OvsVifDriver."); - vifDriverName = "com.cloud.hypervisor.kvm.resource.OvsVifDriver"; - } else { - s_logger.info("No libvirt.vif.driver specififed. Defaults to BridgeVifDriver."); - vifDriverName = "com.cloud.hypervisor.kvm.resource.BridgeVifDriver"; - } - } - params.put("libvirt.computing.resource", this); + configureVifDrivers(params); + + return true; + } + + protected void configureVifDrivers(Map params) + throws ConfigurationException { + final String LIBVIRT_VIF_DRIVER = "libvirt.vif.driver"; + + _trafficTypeVifDrivers = new HashMap(); + + // Load the default vif driver + String defaultVifDriverName = (String) params.get(LIBVIRT_VIF_DRIVER); + if (defaultVifDriverName == null) { + if (_bridgeType == BridgeType.OPENVSWITCH) { + s_logger.info("No libvirt.vif.driver specified. Defaults to OvsVifDriver."); + defaultVifDriverName = DEFAULT_OVS_VIF_DRIVER_CLASS_NAME; + } else { + s_logger.info("No libvirt.vif.driver specified. Defaults to BridgeVifDriver."); + defaultVifDriverName = DEFAULT_BRIDGE_VIF_DRIVER_CLASS_NAME; + } + } + _defaultVifDriver = getVifDriverClass(defaultVifDriverName, params); + + // Load any per-traffic-type vif drivers + for (Map.Entry entry : params.entrySet()) + { + String k = entry.getKey(); + String vifDriverPrefix = LIBVIRT_VIF_DRIVER + "."; + + if(k.startsWith(vifDriverPrefix)){ + // Get trafficType + String trafficTypeSuffix = k.substring(vifDriverPrefix.length()); + + // Does this suffix match a real traffic type? + TrafficType trafficType = TrafficType.getTrafficType(trafficTypeSuffix); + if(!trafficType.equals(TrafficType.None)){ + // Get vif driver class name + String vifDriverClassName = (String) entry.getValue(); + // if value is null, ignore + if(vifDriverClassName != null){ + // add traffic type to vif driver mapping to Map + _trafficTypeVifDrivers.put(trafficType, + getVifDriverClass(vifDriverClassName, params)); + } + } + } + } + } + + protected VifDriver getVifDriverClass(String vifDriverClassName, Map params) + throws ConfigurationException { + VifDriver vifDriver; + try { - Class clazz = Class.forName(vifDriverName); - _vifDriver = (VifDriver) clazz.newInstance(); - _vifDriver.configure(params); + Class clazz = Class.forName(vifDriverClassName); + vifDriver = (VifDriver) clazz.newInstance(); + vifDriver.configure(params); } catch (ClassNotFoundException e) { throw new ConfigurationException("Unable to find class for libvirt.vif.driver " + e); } catch (InstantiationException e) { throw new ConfigurationException("Unable to instantiate class for libvirt.vif.driver " + e); - } catch (Exception e) { - throw new ConfigurationException("Failed to initialize libvirt.vif.driver " + e); + } catch (IllegalAccessException e) { + throw new ConfigurationException("Unable to instantiate class for libvirt.vif.driver " + e); + } + return vifDriver; + } + + protected VifDriver getVifDriver(TrafficType trafficType){ + VifDriver vifDriver = _trafficTypeVifDrivers.get(trafficType); + + if(vifDriver == null){ + vifDriver = _defaultVifDriver; } - return true; + return vifDriver; + } + + protected List getAllVifDrivers(){ + Set vifDrivers = new HashSet(); + + vifDrivers.add(_defaultVifDriver); + vifDrivers.addAll(_trafficTypeVifDrivers.values()); + + ArrayList vifDriverList = new ArrayList(vifDrivers); + + return vifDriverList; } private void getPifs() { @@ -858,7 +924,7 @@ ServerResource { private String getPif(String bridge) { String pif = matchPifFileInDirectory(bridge); - File vlanfile = new File("/proc/net/vlan" + pif); + File vlanfile = new File("/proc/net/vlan/" + pif); if (vlanfile.isFile()) { pif = Script.runSimpleBashScript("grep ^Device\\: /proc/net/vlan/" @@ -948,13 +1014,11 @@ ServerResource { return vnetId; } - private void patchSystemVm(String cmdLine, String dataDiskPath, - String vmName) throws InternalErrorException { + private void passCmdLine(String vmName, String cmdLine) + throws InternalErrorException { + final Script command = new Script(_patchViaSocketPath, _timeout, s_logger); String result; - final Script command = new Script(_patchdomrPath, _timeout, s_logger); - command.add("-l", vmName); - command.add("-t", "all"); - command.add("-d", dataDiskPath); + command.add("-n",vmName); command.add("-p", cmdLine.replaceAll(" ", "%")); result = command.execute(); if (result != null) { @@ -977,7 +1041,6 @@ ServerResource { protected String startVM(Connect conn, String vmName, String domainXML) throws LibvirtException, InternalErrorException { - Domain dm = null; try { /* We create a transient domain here. When this method gets @@ -987,12 +1050,11 @@ ServerResource { This also makes sure we never have any old "garbage" defined in libvirt which might haunt us. */ - dm = conn.domainCreateXML(domainXML, 0); + conn.domainCreateXML(domainXML, 0); } catch (final LibvirtException e) { s_logger.warn("Failed to start domain " + vmName + ": " - + e.getMessage()); + + e.getMessage(), e); } - return null; } @@ -1002,6 +1064,7 @@ ServerResource { Connect conn = LibvirtConnection.getConnection(); conn.close(); } catch (LibvirtException e) { + s_logger.trace("Ignoring libvirt error.", e); } return true; @@ -1395,24 +1458,6 @@ ServerResource { pool.deletePhysicalDisk(vol.getPath()); String vmName = cmd.getVmName(); String poolPath = pool.getLocalPath(); - - /* if vol is a root disk for a system vm, try to remove accompanying patch disk as well - this is a bit tricky since the patchdisk is only a LibvirtComputingResource construct - and not tracked anywhere in cloudstack */ - if (vol.getType() == Volume.Type.ROOT && vmName.matches("^[rsv]-\\d+-.+$")) { - File patchVbd = new File(poolPath + File.separator + vmName + "-patchdisk"); - if(patchVbd.exists()){ - try { - _storagePoolMgr.deleteVbdByPath(vol.getPoolType(),patchVbd.getAbsolutePath()); - } catch(CloudRuntimeException e) { - s_logger.warn("unable to destroy patch disk '" + patchVbd.getAbsolutePath() + - "' while removing root disk for " + vmName + " : " + e); - } - } else { - s_logger.debug("file '" +patchVbd.getAbsolutePath()+ "' not found"); - } - } - return new Answer(cmd, true, "Success"); } catch (CloudRuntimeException e) { s_logger.debug("Failed to delete volume: " + e.toString()); @@ -1446,15 +1491,14 @@ ServerResource { } Domain vm = getDomain(conn, vmName); - vm.attachDevice(_vifDriver.plug(nicTO, "Other PV (32-bit)").toString()); + vm.attachDevice(getVifDriver(nicTO.getType()).plug(nicTO, "Other PV (32-bit)").toString()); } private PlugNicAnswer execute(PlugNicCommand cmd) { - Connect conn; NicTO nic = cmd.getNic(); String vmName = cmd.getVmName(); try { - conn = LibvirtConnection.getConnection(); + Connect conn = LibvirtConnection.getConnection(); Domain vm = getDomain(conn, vmName); List pluggedNics = getInterfaces(conn, vmName); Integer nicnum = 0; @@ -1465,9 +1509,13 @@ ServerResource { } nicnum++; } - vm.attachDevice(_vifDriver.plug(nic, "Other PV (32-bit)").toString()); + vm.attachDevice(getVifDriver(nic.getType()).plug(nic, "Other PV (32-bit)").toString()); return new PlugNicAnswer(cmd, true, "success"); - } catch (Exception e) { + } catch (LibvirtException e) { + String msg = " Plug Nic failed due to " + e.toString(); + s_logger.warn(msg, e); + return new PlugNicAnswer(cmd, false, msg); + } catch (InternalErrorException e) { String msg = " Plug Nic failed due to " + e.toString(); s_logger.warn(msg, e); return new PlugNicAnswer(cmd, false, msg); @@ -1489,7 +1537,7 @@ ServerResource { } } return new UnPlugNicAnswer(cmd, true, "success"); - } catch (Exception e) { + } catch (LibvirtException e) { String msg = " Unplug Nic failed due to " + e.toString(); s_logger.warn(msg, e); return new UnPlugNicAnswer(cmd, false, msg); @@ -1543,7 +1591,7 @@ ServerResource { return new SetupGuestNetworkAnswer(cmd, false, "Creating guest network failed due to " + result); } return new SetupGuestNetworkAnswer(cmd, true, "success"); - } catch (Exception e) { + } catch (LibvirtException e) { String msg = "Creating guest network failed due to " + e.toString(); s_logger.warn(msg, e); return new SetupGuestNetworkAnswer(cmd, false, msg); @@ -1583,7 +1631,7 @@ ServerResource { } return new SetNetworkACLAnswer(cmd, true, results); - } catch (Exception e) { + } catch (LibvirtException e) { String msg = "SetNetworkACL failed due to " + e.toString(); s_logger.error(msg, e); return new SetNetworkACLAnswer(cmd, false, results); @@ -1628,7 +1676,7 @@ ServerResource { return new SetSourceNatAnswer(cmd, false, "KVM plugin \"vpc_snat\" failed:"+result); } return new SetSourceNatAnswer(cmd, true, "success"); - } catch (Exception e) { + } catch (LibvirtException e) { String msg = "Ip SNAT failure due to " + e.toString(); s_logger.error(msg, e); return new SetSourceNatAnswer(cmd, false, msg); @@ -1673,7 +1721,10 @@ ServerResource { results[i++] = ip.getPublicIp() + " - success"; } - } catch (Exception e) { + } catch (LibvirtException e) { + s_logger.error("Ip Assoc failure on applying one ip due to exception: ", e); + results[i++] = IpAssocAnswer.errorResult; + } catch (InternalErrorException e) { s_logger.error("Ip Assoc failure on applying one ip due to exception: ", e); results[i++] = IpAssocAnswer.errorResult; } @@ -1753,7 +1804,7 @@ ServerResource { vm = getDomain(conn, cmd.getVmName()); state = vm.getInfo().state; } catch (LibvirtException e) { - + s_logger.trace("Ignoring libvirt error.", e); } } @@ -1872,7 +1923,7 @@ ServerResource { vm = getDomain(conn, cmd.getVmName()); state = vm.getInfo().state; } catch (LibvirtException e) { - + s_logger.trace("Ignoring libvirt error.", e); } } @@ -2314,7 +2365,7 @@ ServerResource { Connect conn = LibvirtConnection.getConnection(); Integer vncPort = getVncPort(conn, cmd.getName()); return new GetVncPortAnswer(cmd, _privateIp, 5900 + vncPort); - } catch (Exception e) { + } catch (LibvirtException e) { return new GetVncPortAnswer(cmd, e.toString()); } } @@ -2435,16 +2486,13 @@ ServerResource { } catch (final LibvirtException e) { s_logger.warn("Can't get vm state " + vmName + e.getMessage() + "retry:" + retry); - } catch (Exception e) { - s_logger.warn("Can't get vm state " + vmName + e.getMessage() - + "retry:" + retry); } finally { try { if (vms != null) { vms.free(); } - } catch (final LibvirtException e) { - + } catch (final LibvirtException l) { + s_logger.trace("Ignoring libvirt error.", l); } } } @@ -2537,9 +2585,6 @@ ServerResource { } catch (LibvirtException e) { s_logger.debug("Can't migrate domain: " + e.getMessage()); result = e.getMessage(); - } catch (Exception e) { - s_logger.debug("Can't migrate domain: " + e.getMessage()); - result = e.getMessage(); } finally { try { if (dm != null) { @@ -2552,7 +2597,7 @@ ServerResource { destDomain.free(); } } catch (final LibvirtException e) { - + s_logger.trace("Ignoring libvirt error.", e); } } @@ -2563,7 +2608,11 @@ ServerResource { } else { destroy_network_rules_for_vm(conn, vmName); for (InterfaceDef iface : ifaces) { - _vifDriver.unplug(iface); + // We don't know which "traffic type" is associated with + // each interface at this point, so inform all vif drivers + for(VifDriver vifDriver : getAllVifDrivers()){ + vifDriver.unplug(iface); + } } cleanupVM(conn, vmName, getVnetId(VirtualMachineName.getVnet(vmName))); @@ -2583,7 +2632,7 @@ ServerResource { try { Connect conn = LibvirtConnection.getConnection(); for (NicTO nic : nics) { - _vifDriver.plug(nic, null); + getVifDriver(nic.getType()).plug(nic, null); } /* setup disks, e.g for iso */ @@ -2725,8 +2774,8 @@ ServerResource { Integer vncPort = null; try { vncPort = getVncPort(conn, cmd.getVmName()); - } catch (Exception e) { - + } catch (LibvirtException e) { + s_logger.trace("Ignoring libvirt error.", e); } get_rule_logs_for_vms(); return new RebootAnswer(cmd, null, vncPort); @@ -2797,7 +2846,11 @@ ServerResource { } } for (InterfaceDef iface: ifaces) { - _vifDriver.unplug(iface); + // We don't know which "traffic type" is associated with + // each interface at this point, so inform all vif drivers + for(VifDriver vifDriver : getAllVifDrivers()){ + vifDriver.unplug(iface); + } } } @@ -2924,12 +2977,34 @@ ServerResource { vm.addComp(guest); GuestResourceDef grd = new GuestResourceDef(); - grd.setMemorySize(vmTO.getMinRam() / 1024); + + if (vmTO.getMinRam() != vmTO.getMaxRam()){ + grd.setMemBalloning(true); + grd.setCurrentMem((int)vmTO.getMinRam()/1024); + grd.setMemorySize((int)vmTO.getMaxRam()/1024); + } + else{ + grd.setMemorySize(vmTO.getMaxRam() / 1024); + } grd.setVcpuNum(vmTO.getCpus()); vm.addComp(grd); CpuTuneDef ctd = new CpuTuneDef(); - ctd.setShares(vmTO.getCpus() * vmTO.getSpeed()); + /** + A 4.0.X/4.1.X management server doesn't send the correct JSON + command for getMinSpeed, it only sends a 'speed' field. + + So if getMinSpeed() returns null we fall back to getSpeed(). + + This way a >4.1 agent can work communicate a <=4.1 management server + + This change is due to the overcommit feature in 4.2 + */ + if (vmTO.getMinSpeed() != null) { + ctd.setShares(vmTO.getCpus() * vmTO.getMinSpeed()); + } else { + ctd.setShares(vmTO.getCpus() * vmTO.getSpeed()); + } vm.addComp(ctd); FeaturesDef features = new FeaturesDef(); @@ -2958,6 +3033,11 @@ ServerResource { SerialDef serial = new SerialDef("pty", null, (short) 0); devices.addDevice(serial); + if (vmTO.getType() != VirtualMachine.Type.User) { + VirtioSerialDef vserial = new VirtioSerialDef(vmTO.getName(), null); + devices.addDevice(vserial); + } + ConsoleDef console = new ConsoleDef("pty", null, null, (short) 0); devices.addDevice(console); @@ -3021,10 +3101,27 @@ ServerResource { } } + // pass cmdline info to system vms + if (vmSpec.getType() != VirtualMachine.Type.User) { + passCmdLine(vmName, vmSpec.getBootArgs() ); + } + state = State.Running; return new StartAnswer(cmd); - } catch (Exception e) { - s_logger.warn("Exception ", e); + } catch (LibvirtException e) { + s_logger.warn("LibvirtException ", e); + if (conn != null) { + handleVmStartFailure(conn, vmName, vm); + } + return new StartAnswer(cmd, e.getMessage()); + } catch (InternalErrorException e) { + s_logger.warn("InternalErrorException ", e); + if (conn != null) { + handleVmStartFailure(conn, vmName, vm); + } + return new StartAnswer(cmd, e.getMessage()); + } catch (URISyntaxException e) { + s_logger.warn("URISyntaxException ", e); if (conn != null) { handleVmStartFailure(conn, vmName, vm); } @@ -3136,8 +3233,6 @@ ServerResource { iso.defISODisk(_sysvmISOPath); vm.getDevices().addDevice(iso); } - - createPatchVbd(conn, vmName, vm, vmSpec); } } @@ -3151,68 +3246,10 @@ ServerResource { return null; } - private void createPatchVbd(Connect conn, String vmName, LibvirtVMDef vm, - VirtualMachineTO vmSpec) throws LibvirtException, - InternalErrorException { - - List disks = vm.getDevices().getDisks(); - DiskDef rootDisk = disks.get(0); - VolumeTO rootVol = getVolume(vmSpec, Volume.Type.ROOT); - String patchName = vmName + "-patchdisk"; - KVMStoragePool pool = _storagePoolMgr.getStoragePool( - rootVol.getPoolType(), - rootVol.getPoolUuid()); - String patchDiskPath = pool.getLocalPath() + "/" + patchName; - - List phyDisks = pool.listPhysicalDisks(); - boolean foundDisk = false; - - for (KVMPhysicalDisk phyDisk : phyDisks) { - if (phyDisk.getPath().equals(patchDiskPath)) { - foundDisk = true; - break; - } - } - - if (!foundDisk) { - s_logger.debug("generating new patch disk for " + vmName + " since none was found"); - KVMPhysicalDisk disk = pool.createPhysicalDisk(patchName, KVMPhysicalDisk.PhysicalDiskFormat.RAW, - 10L * 1024 * 1024); - } else { - s_logger.debug("found existing patch disk at " + patchDiskPath + " using it for " + vmName); - } - - /* Format/create fs on this disk */ - final Script command = new Script(_createvmPath, _timeout, s_logger); - command.add("-f", patchDiskPath); - String result = command.execute(); - if (result != null) { - s_logger.debug("Failed to create data disk: " + result); - throw new InternalErrorException("Failed to create data disk: " - + result); - } - - /* add patch disk */ - DiskDef patchDisk = new DiskDef(); - - if (pool.getType() == StoragePoolType.CLVM) { - patchDisk.defBlockBasedDisk(patchDiskPath, 1, rootDisk.getBusType()); - } else { - patchDisk.defFileBasedDisk(patchDiskPath, 1, rootDisk.getBusType(), - DiskDef.diskFmtType.RAW); - } - - disks.add(patchDisk); - - String bootArgs = vmSpec.getBootArgs(); - - patchSystemVm(bootArgs, patchDiskPath, vmName); - } - private void createVif(LibvirtVMDef vm, NicTO nic) throws InternalErrorException, LibvirtException { vm.getDevices().addDevice( - _vifDriver.plug(nic, vm.getGuestOSType()).toString()); + getVifDriver(nic.getType()).plug(nic, vm.getGuestOSType()).toString()); } protected CheckSshAnswer execute(CheckSshCommand cmd) { @@ -3224,14 +3261,10 @@ ServerResource { s_logger.debug("Ping command port, " + privateIp + ":" + cmdPort); } - try { - String result = _virtRouterResource.connect(privateIp, cmdPort); - if (result != null) { - return new CheckSshAnswer(cmd, "Can not ping System vm " - + vmName + "due to:" + result); - } - } catch (Exception e) { - return new CheckSshAnswer(cmd, e); + String result = _virtRouterResource.connect(privateIp, cmdPort); + if (result != null) { + return new CheckSshAnswer(cmd, "Can not ping System vm " + + vmName + "due to:" + result); } if (s_logger.isDebugEnabled()) { @@ -3378,14 +3411,12 @@ ServerResource { + e.getMessage()); } throw e; - } catch (Exception e) { - throw new InternalErrorException(e.toString()); } finally { if (dm != null) { try { dm.free(); } catch (LibvirtException l) { - + s_logger.trace("Ignoring libvirt error.", l); } } } @@ -3603,22 +3634,21 @@ ServerResource { return convertToState(vps); } } catch (final LibvirtException e) { - s_logger.trace(e.getMessage()); - } catch (Exception e) { - s_logger.trace(e.getMessage()); + s_logger.trace("Ignoring libvirt error.", e); } finally { try { if (dm != null) { dm.free(); } - } catch (final LibvirtException e) { - + } catch (final LibvirtException l) { + s_logger.trace("Ignoring libvirt error.", l); } } try { Thread.sleep(1000); } catch (InterruptedException e) { + s_logger.trace("Ignoring InterruptedException.", e); } } return State.Stopped; @@ -3656,7 +3686,7 @@ ServerResource { dm.free(); } } catch (final LibvirtException e) { - + s_logger.trace("Ignoring libvirt error.", e); } } } @@ -3711,7 +3741,7 @@ ServerResource { dm.free(); } } catch (LibvirtException e) { - + s_logger.trace("Ignoring libvirt error.", e); } } } @@ -3731,15 +3761,13 @@ ServerResource { vmStates.put(vmName, state); } catch (final LibvirtException e) { s_logger.warn("Unable to get vms", e); - } catch (Exception e) { - s_logger.warn("Unable to get vms", e); } finally { try { if (dm != null) { dm.free(); } } catch (LibvirtException e) { - + s_logger.trace("Ignoring libvirt error.", e); } } } @@ -3790,7 +3818,7 @@ ServerResource { } } } catch (LibvirtException e) { - + s_logger.trace("Ignoring libvirt error.", e); } if (isSnapshotSupported()) { @@ -3837,7 +3865,7 @@ ServerResource { } catch (LibvirtException e) { s_logger.warn("Failed to create vm", e); msg = e.getMessage(); - } catch (Exception e) { + } catch (InternalErrorException e) { s_logger.warn("Failed to create vm", e); msg = e.getMessage(); } finally { @@ -3846,7 +3874,7 @@ ServerResource { dm.free(); } } catch (LibvirtException e) { - + s_logger.trace("Ignoring libvirt error.", e); } } @@ -3876,15 +3904,13 @@ ServerResource { break; } catch (LibvirtException e) { s_logger.debug("Failed to get vm status:" + e.getMessage()); - } catch (Exception e) { - s_logger.debug("Failed to get vm status:" + e.getMessage()); } finally { try { if (dm != null) { dm.free(); } } catch (LibvirtException l) { - + s_logger.trace("Ignoring libvirt error.", l); } } } @@ -3939,15 +3965,13 @@ ServerResource { } catch (InterruptedException ie) { s_logger.debug("Interrupted sleep"); return ie.getMessage(); - } catch (Exception e) { - s_logger.debug("Failed to stop VM :" + vmName + " :", e); - return e.getMessage(); } finally { try { if (dm != null) { dm.free(); } } catch (LibvirtException e) { + s_logger.trace("Ignoring libvirt error.", e); } } @@ -3993,7 +4017,7 @@ ServerResource { dm.free(); } } catch (LibvirtException l) { - + s_logger.trace("Ignoring libvirt error.", l); } } } @@ -4009,7 +4033,7 @@ ServerResource { } } } catch (LibvirtException e) { - + s_logger.trace("Ignoring libvirt error.", e); } return false; } @@ -4034,8 +4058,7 @@ ServerResource { parser.parseDomainXML(xmlDesc); return parser.getDescription(); } catch (LibvirtException e) { - return null; - } catch (Exception e) { + s_logger.trace("Ignoring libvirt error.", e); return null; } finally { try { @@ -4043,7 +4066,7 @@ ServerResource { dm.free(); } } catch (LibvirtException l) { - + s_logger.trace("Ignoring libvirt error.", l); } } } @@ -4141,16 +4164,13 @@ ServerResource { } catch (LibvirtException e) { s_logger.debug("Failed to get dom xml: " + e.toString()); return new ArrayList(); - } catch (Exception e) { - s_logger.debug("Failed to get dom xml: " + e.toString()); - return new ArrayList(); } finally { try { if (dm != null) { dm.free(); } } catch (LibvirtException e) { - + s_logger.trace("Ignoring libvirt error.", e); } } } @@ -4167,16 +4187,13 @@ ServerResource { } catch (LibvirtException e) { s_logger.debug("Failed to get dom xml: " + e.toString()); return new ArrayList(); - } catch (Exception e) { - s_logger.debug("Failed to get dom xml: " + e.toString()); - return new ArrayList(); } finally { try { if (dm != null) { dm.free(); } } catch (LibvirtException e) { - + s_logger.trace("Ignoring libvirt error.", e); } } } @@ -4524,8 +4541,7 @@ ServerResource { conn = LibvirtConnection.getConnection(); success = default_network_rules_for_systemvm(conn, cmd.getVmName()); } catch (LibvirtException e) { - // TODO Auto-generated catch block - e.printStackTrace(); + s_logger.trace("Ignoring libvirt error.", e); } return new Answer(cmd, success, ""); diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java index acfd9cf1fe8..c93aeeb2dd6 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java @@ -116,6 +116,7 @@ public class LibvirtVMDef { private int _currentMem = -1; private String _memBacking; private int _vcpu = -1; + private boolean _memBalloning= false; public void setMemorySize(long mem) { _mem = mem; @@ -133,6 +134,10 @@ public class LibvirtVMDef { _vcpu = vcpu; } + public void setMemBalloning(boolean turnon){ + _memBalloning = turnon; + } + @Override public String toString() { StringBuilder resBuidler = new StringBuilder(); @@ -145,6 +150,9 @@ public class LibvirtVMDef { resBuidler.append("" + "<" + _memBacking + "/>" + "\n"); } + if (_memBalloning){ + resBuidler.append("\n" + "\n" + "\n"); + } if (_vcpu != -1) { resBuidler.append("" + _vcpu + "\n"); } @@ -838,6 +846,31 @@ public class LibvirtVMDef { } } + public static class VirtioSerialDef { + private final String _name; + private String _path; + + public VirtioSerialDef(String name, String path) { + _name = name; + _path = path; + } + + @Override + public String toString() { + StringBuilder virtioSerialBuilder = new StringBuilder(); + if(_path == null) { + _path = "/var/lib/libvirt/qemu"; + } + virtioSerialBuilder.append("\n"); + virtioSerialBuilder.append("\n"); + virtioSerialBuilder.append("\n"); + virtioSerialBuilder.append("
\n"); + virtioSerialBuilder.append("\n"); + return virtioSerialBuilder.toString(); + } + } + public static class GraphicDef { private final String _type; private short _port = -2; diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtXMLParser.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtXMLParser.java index 3a614037d85..dd0d7724c4a 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtXMLParser.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtXMLParser.java @@ -19,6 +19,7 @@ package com.cloud.hypervisor.kvm.resource; import java.io.IOException; import java.io.StringReader; +import javax.xml.parsers.ParserConfigurationException; import javax.xml.parsers.SAXParser; import javax.xml.parsers.SAXParserFactory; @@ -40,13 +41,14 @@ public class LibvirtXMLParser extends DefaultHandler { protected boolean _initialized = false; public LibvirtXMLParser() { - try { _sp = s_spf.newSAXParser(); _initialized = true; - } catch (Exception ex) { + } catch (ParserConfigurationException e) { + s_logger.trace("Ignoring xml parser error.", e); + } catch (SAXException e) { + s_logger.trace("Ignoring xml parser error.", e); } - } public boolean parseDomainXML(String domXML) { diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java index d350ef994fb..070c1327ba3 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java @@ -293,58 +293,6 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { return parser.parseStorageVolumeXML(volDefXML); } - public StoragePool createFileBasedStoragePool(Connect conn, - String localStoragePath, String uuid) { - if (!(_storageLayer.exists(localStoragePath) && _storageLayer - .isDirectory(localStoragePath))) { - return null; - } - - File path = new File(localStoragePath); - if (!(path.canWrite() && path.canRead() && path.canExecute())) { - return null; - } - - StoragePool pool = null; - - try { - pool = conn.storagePoolLookupByUUIDString(uuid); - } catch (LibvirtException e) { - - } - - if (pool == null) { - LibvirtStoragePoolDef spd = new LibvirtStoragePoolDef(poolType.DIR, - uuid, uuid, null, null, localStoragePath); - try { - pool = conn.storagePoolDefineXML(spd.toString(), 0); - pool.create(0); - } catch (LibvirtException e) { - if (pool != null) { - try { - pool.destroy(); - pool.undefine(); - } catch (LibvirtException e1) { - } - pool = null; - } - throw new CloudRuntimeException(e.toString()); - } - } - - try { - StoragePoolInfo spi = pool.getInfo(); - if (spi.state != StoragePoolState.VIR_STORAGE_POOL_RUNNING) { - pool.create(0); - } - - } catch (LibvirtException e) { - throw new CloudRuntimeException(e.toString()); - } - - return pool; - } - @Override public KVMStoragePool getStoragePool(String uuid) { StoragePool storage = null; diff --git a/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java b/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java new file mode 100644 index 00000000000..018f2f5330e --- /dev/null +++ b/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package com.cloud.hypervisor.kvm.resource; + +import org.junit.Test; +import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.hypervisor.kvm.resource.LibvirtVMDef; +import com.cloud.template.VirtualMachineTemplate.BootloaderType; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachine.Type; +import java.util.Random; +import static org.junit.Assert.assertEquals; + +public class LibvirtComputingResourceTest { + + String _hyperVisorType = "kvm"; + Random _random = new Random(); + + /** + This test tests if the Agent can handle a vmSpec coming + from a <=4.1 management server. + + The overcommit feature has not been merged in there and thus + only 'speed' is set. + */ + @Test + public void testCreateVMFromSpecLegacy() { + int id = _random.nextInt(65534); + String name = "test-instance-1"; + + int cpus = _random.nextInt(7) + 1; + int speed = 1024; + int minRam = 256 * 1024; + int maxRam = 512 * 1024; + + String os = "Ubuntu"; + boolean haEnabled = false; + boolean limitCpuUse = false; + + String vncAddr = "1.2.3.4"; + String vncPassword = "mySuperSecretPassword"; + + LibvirtComputingResource lcr = new LibvirtComputingResource(); + VirtualMachineTO to = new VirtualMachineTO(id, name, VirtualMachine.Type.User, cpus, speed, minRam, maxRam, BootloaderType.HVM, os, false, false, vncPassword); + to.setVncAddr(vncAddr); + + LibvirtVMDef vm = lcr.createVMFromSpec(to); + vm.setHvsType(_hyperVisorType); + + String vmStr = "\n"; + vmStr += "" + name + "\n"; + vmStr += "b0f0a72d-7efb-3cad-a8ff-70ebf30b3af9\n"; + vmStr += "" + os + "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "" + maxRam / 1024 + "\n"; + vmStr += "" + minRam / 1024 + "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "" + cpus + "\n"; + vmStr += "\n"; + vmStr += "hvm\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "" + (cpus * speed) + "\n"; + vmStr += "\n"; + vmStr += "restart\n"; + vmStr += "destroy\n"; + vmStr += "destroy\n"; + vmStr += "\n"; + + assertEquals(vmStr, vm.toString()); + } + + /** + This test tests if the Agent can handle a vmSpec coming + from a >4.1 management server. + + It tests if the Agent can handle a vmSpec with overcommit + data like minSpeed and maxSpeed in there + */ + @Test + public void testCreateVMFromSpec() { + int id = _random.nextInt(65534); + String name = "test-instance-1"; + + int cpus = _random.nextInt(7) + 1; + int minSpeed = 1024; + int maxSpeed = 2048; + int minRam = 256 * 1024; + int maxRam = 512 * 1024; + + String os = "Ubuntu"; + boolean haEnabled = false; + boolean limitCpuUse = false; + + String vncAddr = "1.2.3.4"; + String vncPassword = "mySuperSecretPassword"; + + LibvirtComputingResource lcr = new LibvirtComputingResource(); + VirtualMachineTO to = new VirtualMachineTO(id, name, VirtualMachine.Type.User, cpus, minSpeed, maxSpeed, minRam, maxRam, BootloaderType.HVM, os, false, false, vncPassword); + to.setVncAddr(vncAddr); + + LibvirtVMDef vm = lcr.createVMFromSpec(to); + vm.setHvsType(_hyperVisorType); + + String vmStr = "\n"; + vmStr += "" + name + "\n"; + vmStr += "b0f0a72d-7efb-3cad-a8ff-70ebf30b3af9\n"; + vmStr += "" + os + "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "" + maxRam / 1024 + "\n"; + vmStr += "" + minRam / 1024 + "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "" + cpus + "\n"; + vmStr += "\n"; + vmStr += "hvm\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "\n"; + vmStr += "" + (cpus * minSpeed) + "\n"; + vmStr += "\n"; + vmStr += "restart\n"; + vmStr += "destroy\n"; + vmStr += "destroy\n"; + vmStr += "\n"; + + assertEquals(vmStr, vm.toString()); + } +} \ No newline at end of file diff --git a/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtVifDriverTest.java b/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtVifDriverTest.java new file mode 100644 index 00000000000..7d47e6e81de --- /dev/null +++ b/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtVifDriverTest.java @@ -0,0 +1,226 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package com.cloud.hypervisor.kvm.resource; + +import com.cloud.network.Networks.TrafficType; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource.BridgeType; + +import org.junit.Before; +import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.util.HashMap; +import java.util.Map; +import javax.naming.ConfigurationException; + + +import static org.mockito.Mockito.*; + +public class LibvirtVifDriverTest { + private LibvirtComputingResource res; + + private Map assertions; + + final String LIBVIRT_VIF_DRIVER = "libvirt.vif.driver"; + final String FAKE_VIF_DRIVER_CLASS_NAME = "com.cloud.hypervisor.kvm.resource.FakeVifDriver"; + final String NONEXISTENT_VIF_DRIVER_CLASS_NAME = "com.cloud.hypervisor.kvm.resource.NonExistentVifDriver"; + + private VifDriver fakeVifDriver, bridgeVifDriver, ovsVifDriver; + + @Before + public void setUp() { + // Use a spy because we only want to override getVifDriverClass + LibvirtComputingResource resReal = new LibvirtComputingResource(); + res = spy(resReal); + + try{ + bridgeVifDriver = + (VifDriver) Class.forName(LibvirtComputingResource.DEFAULT_BRIDGE_VIF_DRIVER_CLASS_NAME).newInstance(); + ovsVifDriver = + (VifDriver) Class.forName(LibvirtComputingResource.DEFAULT_OVS_VIF_DRIVER_CLASS_NAME).newInstance(); + + // Instantiating bridge vif driver again as the fake vif driver + // is good enough, as this is a separate instance + fakeVifDriver = + (VifDriver) Class.forName(LibvirtComputingResource.DEFAULT_BRIDGE_VIF_DRIVER_CLASS_NAME).newInstance(); + + doReturn(bridgeVifDriver).when(res) + .getVifDriverClass(eq(LibvirtComputingResource.DEFAULT_BRIDGE_VIF_DRIVER_CLASS_NAME), anyMap()); + doReturn(ovsVifDriver).when(res) + .getVifDriverClass(eq(LibvirtComputingResource.DEFAULT_OVS_VIF_DRIVER_CLASS_NAME), anyMap()); + doReturn(fakeVifDriver).when(res) + .getVifDriverClass(eq(FAKE_VIF_DRIVER_CLASS_NAME), anyMap()); + + } catch (final ConfigurationException ex){ + fail("Unexpected ConfigurationException while configuring VIF drivers: " + ex.getMessage()); + } catch (final Exception ex){ + fail("Unexpected Exception while configuring VIF drivers"); + } + + assertions = new HashMap(); + } + + + // Helper function + // Configure LibvirtComputingResource using params + private void configure (Map params) + throws ConfigurationException{ + res.configureVifDrivers(params); + } + + // Helper function + private void checkAssertions(){ + // Check the defined assertions + for (Map.Entry assertion : assertions.entrySet()){ + assertEquals(res.getVifDriver(assertion.getKey()), + assertion.getValue()); + } + } + + // Helper when all answers should be the same + private void checkAllSame(VifDriver vifDriver) + throws ConfigurationException { + + for(TrafficType trafficType : TrafficType.values()){ + assertions.put(trafficType, vifDriver); + } + + checkAssertions(); + } + + @Test + public void testDefaults() + throws ConfigurationException { + // If no special vif driver settings, all traffic types should + // map to the default vif driver for the bridge type + Map params = new HashMap(); + + res._bridgeType = BridgeType.NATIVE; + configure(params); + checkAllSame(bridgeVifDriver); + + res._bridgeType = BridgeType.OPENVSWITCH; + configure(params); + checkAllSame(ovsVifDriver); + } + + @Test + public void testDefaultsWhenExplicitlySet() + throws ConfigurationException { + + Map params = new HashMap(); + + // Switch res' bridge type for test purposes + params.put(LIBVIRT_VIF_DRIVER, LibvirtComputingResource.DEFAULT_BRIDGE_VIF_DRIVER_CLASS_NAME); + res._bridgeType = BridgeType.NATIVE; + configure(params); + checkAllSame(bridgeVifDriver); + + params.clear(); + params.put(LIBVIRT_VIF_DRIVER, LibvirtComputingResource.DEFAULT_OVS_VIF_DRIVER_CLASS_NAME); + res._bridgeType = BridgeType.OPENVSWITCH; + configure(params); + checkAllSame(ovsVifDriver); + } + + @Test + public void testWhenExplicitlySetDifferentDefault() + throws ConfigurationException { + + // Tests when explicitly set vif driver to OVS when using regular bridges and vice versa + Map params = new HashMap(); + + // Switch res' bridge type for test purposes + params.put(LIBVIRT_VIF_DRIVER, LibvirtComputingResource.DEFAULT_OVS_VIF_DRIVER_CLASS_NAME); + res._bridgeType = BridgeType.NATIVE; + configure(params); + checkAllSame(ovsVifDriver); + + params.clear(); + params.put(LIBVIRT_VIF_DRIVER, LibvirtComputingResource.DEFAULT_BRIDGE_VIF_DRIVER_CLASS_NAME); + res._bridgeType = BridgeType.OPENVSWITCH; + configure(params); + checkAllSame(bridgeVifDriver); + } + + @Test + public void testOverrideSomeTrafficTypes() + throws ConfigurationException { + + Map params = new HashMap(); + params.put(LIBVIRT_VIF_DRIVER + "." + "Public", FAKE_VIF_DRIVER_CLASS_NAME); + params.put(LIBVIRT_VIF_DRIVER + "." + "Guest", + LibvirtComputingResource.DEFAULT_OVS_VIF_DRIVER_CLASS_NAME); + res._bridgeType = BridgeType.NATIVE; + configure(params); + + // Initially, set all traffic types to use default + for(TrafficType trafficType : TrafficType.values()){ + assertions.put(trafficType, bridgeVifDriver); + } + + assertions.put(TrafficType.Public, fakeVifDriver); + assertions.put(TrafficType.Guest, ovsVifDriver); + + checkAssertions(); + } + + @Test + public void testBadTrafficType() + throws ConfigurationException { + Map params = new HashMap(); + params.put(LIBVIRT_VIF_DRIVER + "." + "NonExistentTrafficType", FAKE_VIF_DRIVER_CLASS_NAME); + res._bridgeType = BridgeType.NATIVE; + configure(params); + + // Set all traffic types to use default, because bad traffic type should be ignored + for(TrafficType trafficType : TrafficType.values()){ + assertions.put(trafficType, bridgeVifDriver); + } + + checkAssertions(); + } + + @Test + public void testEmptyTrafficType() + throws ConfigurationException { + Map params = new HashMap(); + params.put(LIBVIRT_VIF_DRIVER + ".", FAKE_VIF_DRIVER_CLASS_NAME); + res._bridgeType = BridgeType.NATIVE; + configure(params); + + // Set all traffic types to use default, because bad traffic type should be ignored + for(TrafficType trafficType : TrafficType.values()){ + assertions.put(trafficType, bridgeVifDriver); + } + + checkAssertions(); + } + + @Test(expected=ConfigurationException.class) + public void testBadVifDriverClassName() + throws ConfigurationException { + Map params = new HashMap(); + params.put(LIBVIRT_VIF_DRIVER + "." + "Public", NONEXISTENT_VIF_DRIVER_CLASS_NAME); + res._bridgeType = BridgeType.NATIVE; + configure(params); + } +} diff --git a/plugins/hypervisors/ovm/pom.xml b/plugins/hypervisors/ovm/pom.xml index 5700c14d4eb..84beff0d4eb 100644 --- a/plugins/hypervisors/ovm/pom.xml +++ b/plugins/hypervisors/ovm/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/hypervisors/simulator/pom.xml b/plugins/hypervisors/simulator/pom.xml index a1ab9c08639..ff1664ad85f 100644 --- a/plugins/hypervisors/simulator/pom.xml +++ b/plugins/hypervisors/simulator/pom.xml @@ -22,7 +22,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml org.apache.cloudstack diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java index 2178651403e..8542de3bd9f 100755 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java @@ -62,7 +62,9 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; +import org.springframework.stereotype.Component; +@Component @Local(value = { MockAgentManager.class }) public class MockAgentManagerImpl extends ManagerBase implements MockAgentManager { private static final Logger s_logger = Logger.getLogger(MockAgentManagerImpl.class); diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java index f445bb32900..859acc85958 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java @@ -97,7 +97,9 @@ import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachine.State; +import org.springframework.stereotype.Component; +@Component @Local(value = { MockStorageManager.class }) public class MockStorageManagerImpl extends ManagerBase implements MockStorageManager { private static final Logger s_logger = Logger.getLogger(MockStorageManagerImpl.class); diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java index 60e1a61a0bd..c0ccbe43978 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java @@ -17,58 +17,12 @@ package com.cloud.agent.manager; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; - -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.BumpUpPriorityCommand; -import com.cloud.agent.api.CheckRouterAnswer; -import com.cloud.agent.api.CheckRouterCommand; -import com.cloud.agent.api.CheckVirtualMachineAnswer; -import com.cloud.agent.api.CheckVirtualMachineCommand; -import com.cloud.agent.api.CleanupNetworkRulesCmd; -import com.cloud.agent.api.GetDomRVersionAnswer; -import com.cloud.agent.api.GetDomRVersionCmd; -import com.cloud.agent.api.GetVmStatsAnswer; -import com.cloud.agent.api.GetVmStatsCommand; -import com.cloud.agent.api.GetVncPortAnswer; -import com.cloud.agent.api.GetVncPortCommand; -import com.cloud.agent.api.MigrateAnswer; -import com.cloud.agent.api.MigrateCommand; -import com.cloud.agent.api.NetworkUsageAnswer; -import com.cloud.agent.api.NetworkUsageCommand; -import com.cloud.agent.api.PrepareForMigrationAnswer; -import com.cloud.agent.api.PrepareForMigrationCommand; -import com.cloud.agent.api.RebootAnswer; -import com.cloud.agent.api.RebootCommand; -import com.cloud.agent.api.SecurityGroupRuleAnswer; -import com.cloud.agent.api.SecurityGroupRulesCmd; -import com.cloud.agent.api.StartAnswer; -import com.cloud.agent.api.StartCommand; -import com.cloud.agent.api.StopAnswer; -import com.cloud.agent.api.StopCommand; -import com.cloud.agent.api.VmStatsEntry; +import com.cloud.agent.api.*; import com.cloud.agent.api.check.CheckSshAnswer; import com.cloud.agent.api.check.CheckSshCommand; import com.cloud.agent.api.proxy.CheckConsoleProxyLoadCommand; import com.cloud.agent.api.proxy.WatchConsoleProxyLoadCommand; -import com.cloud.agent.api.routing.DhcpEntryCommand; -import com.cloud.agent.api.routing.IpAssocCommand; -import com.cloud.agent.api.routing.LoadBalancerConfigCommand; -import com.cloud.agent.api.routing.NetworkElementCommand; -import com.cloud.agent.api.routing.SavePasswordCommand; -import com.cloud.agent.api.routing.SetFirewallRulesCommand; -import com.cloud.agent.api.routing.SetPortForwardingRulesCommand; -import com.cloud.agent.api.routing.SetStaticNatRulesCommand; -import com.cloud.agent.api.routing.VmDataCommand; +import com.cloud.agent.api.routing.*; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.network.Networks.TrafficType; @@ -86,7 +40,18 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine.State; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +@Component @Local(value = { MockVmManager.class }) public class MockVmManagerImpl extends ManagerBase implements MockVmManager { private static final Logger s_logger = Logger.getLogger(MockVmManagerImpl.class); @@ -383,7 +348,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { @Override public Answer startVM(StartCommand cmd, SimulatorInfo info) { VirtualMachineTO vm = cmd.getVirtualMachine(); - String result = startVM(vm.getName(), vm.getNics(), vm.getCpus()* vm.getSpeed(), vm.getMaxRam(), vm.getBootArgs(), info.getHostUuid()); + String result = startVM(vm.getName(), vm.getNics(), vm.getCpus()* vm.getMaxSpeed(), vm.getMaxRam(), vm.getBootArgs(), info.getHostUuid()); if (result != null) { return new StartAnswer(cmd, result); } else { diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java index 41443572efd..c234cc5cb2e 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java @@ -16,73 +16,12 @@ // under the License. package com.cloud.agent.manager; -import java.util.HashMap; -import java.util.Map; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; - -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.AttachIsoCommand; -import com.cloud.agent.api.AttachVolumeCommand; -import com.cloud.agent.api.BackupSnapshotCommand; -import com.cloud.agent.api.BumpUpPriorityCommand; -import com.cloud.agent.api.CheckHealthCommand; -import com.cloud.agent.api.CheckNetworkCommand; -import com.cloud.agent.api.CheckRouterCommand; -import com.cloud.agent.api.CheckVirtualMachineCommand; -import com.cloud.agent.api.CleanupNetworkRulesCmd; -import com.cloud.agent.api.ClusterSyncCommand; -import com.cloud.agent.api.Command; -import com.cloud.agent.api.ComputeChecksumCommand; -import com.cloud.agent.api.CreatePrivateTemplateFromSnapshotCommand; -import com.cloud.agent.api.CreatePrivateTemplateFromVolumeCommand; -import com.cloud.agent.api.CreateStoragePoolCommand; -import com.cloud.agent.api.CreateVolumeFromSnapshotCommand; -import com.cloud.agent.api.DeleteSnapshotBackupCommand; -import com.cloud.agent.api.DeleteStoragePoolCommand; -import com.cloud.agent.api.GetDomRVersionCmd; -import com.cloud.agent.api.GetHostStatsCommand; -import com.cloud.agent.api.GetStorageStatsCommand; -import com.cloud.agent.api.GetVmStatsCommand; -import com.cloud.agent.api.GetVncPortCommand; -import com.cloud.agent.api.MaintainCommand; -import com.cloud.agent.api.ManageSnapshotCommand; -import com.cloud.agent.api.MigrateCommand; -import com.cloud.agent.api.ModifyStoragePoolCommand; -import com.cloud.agent.api.NetworkUsageCommand; -import com.cloud.agent.api.PingTestCommand; -import com.cloud.agent.api.PrepareForMigrationCommand; -import com.cloud.agent.api.RebootCommand; -import com.cloud.agent.api.SecStorageSetupCommand; -import com.cloud.agent.api.SecStorageVMSetupCommand; -import com.cloud.agent.api.SecurityGroupRulesCmd; -import com.cloud.agent.api.StartCommand; -import com.cloud.agent.api.StopCommand; -import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.agent.api.*; import com.cloud.agent.api.check.CheckSshCommand; import com.cloud.agent.api.proxy.CheckConsoleProxyLoadCommand; import com.cloud.agent.api.proxy.WatchConsoleProxyLoadCommand; -import com.cloud.agent.api.routing.DhcpEntryCommand; -import com.cloud.agent.api.routing.IpAssocCommand; -import com.cloud.agent.api.routing.LoadBalancerConfigCommand; -import com.cloud.agent.api.routing.SavePasswordCommand; -import com.cloud.agent.api.routing.SetFirewallRulesCommand; -import com.cloud.agent.api.routing.SetPortForwardingRulesCommand; -import com.cloud.agent.api.routing.SetStaticNatRulesCommand; -import com.cloud.agent.api.routing.VmDataCommand; -import com.cloud.agent.api.storage.CopyVolumeCommand; -import com.cloud.agent.api.storage.CreateCommand; -import com.cloud.agent.api.storage.DeleteTemplateCommand; -import com.cloud.agent.api.storage.DestroyCommand; -import com.cloud.agent.api.storage.DownloadCommand; -import com.cloud.agent.api.storage.DownloadProgressCommand; -import com.cloud.agent.api.storage.ListTemplateCommand; -import com.cloud.agent.api.storage.ListVolumeCommand; -import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand; +import com.cloud.agent.api.routing.*; +import com.cloud.agent.api.storage.*; import com.cloud.simulator.MockConfigurationVO; import com.cloud.simulator.MockHost; import com.cloud.simulator.MockVMVO; @@ -95,7 +34,16 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine.State; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import java.util.HashMap; +import java.util.Map; + +@Component @Local(value = { SimulatorManager.class }) public class SimulatorManagerImpl extends ManagerBase implements SimulatorManager { private static final Logger s_logger = Logger.getLogger(SimulatorManagerImpl.class); diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/AgentRoutingResource.java b/plugins/hypervisors/simulator/src/com/cloud/resource/AgentRoutingResource.java index 721e5f70222..46df50c2133 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/AgentRoutingResource.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/AgentRoutingResource.java @@ -184,7 +184,7 @@ public class AgentRoutingResource extends AgentStorageResource { throws IllegalArgumentException { VirtualMachineTO vmSpec = cmd.getVirtualMachine(); String vmName = vmSpec.getName(); - if (this.totalCpu < (vmSpec.getCpus() * vmSpec.getSpeed() + this.usedCpu) || + if (this.totalCpu < (vmSpec.getCpus() * vmSpec.getMaxSpeed() + this.usedCpu) || this.totalMem < (vmSpec.getMaxRam() + this.usedMem)) { return new StartAnswer(cmd, "Not enough resource to start the vm"); } @@ -199,9 +199,9 @@ public class AgentRoutingResource extends AgentStorageResource { return new StartAnswer(cmd, result.getDetails()); } - this.usedCpu += vmSpec.getCpus() * vmSpec.getSpeed(); + this.usedCpu += vmSpec.getCpus() * vmSpec.getMaxSpeed(); this.usedMem += vmSpec.getMaxRam(); - _runningVms.put(vmName, new Pair(Long.valueOf(vmSpec.getCpus() * vmSpec.getSpeed()), vmSpec.getMaxRam())); + _runningVms.put(vmName, new Pair(Long.valueOf(vmSpec.getCpus() * vmSpec.getMaxSpeed()), vmSpec.getMaxRam())); state = State.Running; } finally { diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java index 5cb094184ba..00fe356103b 100755 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorDiscoverer.java @@ -53,7 +53,7 @@ import com.cloud.storage.VMTemplateZoneVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateHostDao; import com.cloud.storage.dao.VMTemplateZoneDao; - +import org.springframework.stereotype.Component; @Local(value = Discoverer.class) public class SimulatorDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java index cd0cd2725c9..1dd71c5c27f 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorSecondaryDiscoverer.java @@ -42,7 +42,9 @@ import com.cloud.storage.SnapshotVO; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.secondary.SecondaryStorageDiscoverer; import com.cloud.utils.exception.CloudRuntimeException; +import org.springframework.stereotype.Component; +@Component @Local(value=Discoverer.class) public class SimulatorSecondaryDiscoverer extends SecondaryStorageDiscoverer implements ResourceStateAdapter, Listener { private static final Logger s_logger = Logger.getLogger(SimulatorSecondaryDiscoverer.class); diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/SimulatorGuru.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/SimulatorGuru.java index c9d308023ed..57a38f1d3d8 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/SimulatorGuru.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/SimulatorGuru.java @@ -16,9 +16,6 @@ // under the License. package com.cloud.simulator; -import javax.ejb.Local; -import javax.inject.Inject; - import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorGuru; @@ -28,6 +25,10 @@ import com.cloud.storage.dao.GuestOSDao; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; +import javax.ejb.Local; +import javax.inject.Inject; + + @Local(value=HypervisorGuru.class) public class SimulatorGuru extends HypervisorGuruBase implements HypervisorGuru { @Inject GuestOSDao _guestOsDao; diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockConfigurationDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockConfigurationDaoImpl.java index bd1b48dfde8..fd825b751ed 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockConfigurationDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockConfigurationDaoImpl.java @@ -16,18 +16,19 @@ // under the License. package com.cloud.simulator.dao; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.util.Formatter; - -import javax.ejb.Local; - import com.cloud.simulator.MockConfigurationVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import org.springframework.stereotype.Component; +import javax.ejb.Local; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.util.Formatter; + +@Component @Local(value={MockConfigurationDao.class}) public class MockConfigurationDaoImpl extends GenericDaoBase implements MockConfigurationDao { private SearchBuilder _searchByDcIdName; diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockHostDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockHostDaoImpl.java index 8a566d79ed2..4b60bc02d47 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockHostDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockHostDaoImpl.java @@ -16,14 +16,16 @@ // under the License. package com.cloud.simulator.dao; -import javax.ejb.Local; - import com.cloud.simulator.MockHost; import com.cloud.simulator.MockHostVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import org.springframework.stereotype.Component; +import javax.ejb.Local; + +@Component @Local(value={MockHostDao.class}) public class MockHostDaoImpl extends GenericDaoBase implements MockHostDao { protected final SearchBuilder GuidSearch; diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockSecStorageDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockSecStorageDaoImpl.java index 65a375f5843..d4903244179 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockSecStorageDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockSecStorageDaoImpl.java @@ -16,13 +16,15 @@ // under the License. package com.cloud.simulator.dao; -import javax.ejb.Local; - import com.cloud.simulator.MockSecStorageVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import org.springframework.stereotype.Component; +import javax.ejb.Local; + +@Component @Local(value={MockSecStorageDao.class}) public class MockSecStorageDaoImpl extends GenericDaoBase implements MockSecStorageDao { protected final SearchBuilder urlSearch; diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockSecurityRulesDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockSecurityRulesDaoImpl.java index 8831efef2ec..d35607e0ebc 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockSecurityRulesDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockSecurityRulesDaoImpl.java @@ -16,16 +16,18 @@ // under the License. package com.cloud.simulator.dao; -import java.util.List; -import java.util.Map; - -import javax.ejb.Local; -import javax.naming.ConfigurationException; - import com.cloud.simulator.MockSecurityRulesVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import org.springframework.stereotype.Component; + +import javax.ejb.Local; +import javax.naming.ConfigurationException; +import java.util.List; +import java.util.Map; + +@Component @Local(value={MockSecurityRulesDao.class}) public class MockSecurityRulesDaoImpl extends GenericDaoBase implements MockSecurityRulesDao { protected SearchBuilder vmIdSearch; diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockStoragePoolDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockStoragePoolDaoImpl.java index 3a64d27e30d..0fc41abdc4c 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockStoragePoolDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockStoragePoolDaoImpl.java @@ -16,14 +16,16 @@ // under the License. package com.cloud.simulator.dao; -import javax.ejb.Local; - import com.cloud.simulator.MockStoragePoolVO; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import org.springframework.stereotype.Component; +import javax.ejb.Local; + +@Component @Local(value={MockStoragePoolDao.class}) public class MockStoragePoolDaoImpl extends GenericDaoBase implements MockStoragePoolDao { protected final SearchBuilder uuidSearch; diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVMDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVMDaoImpl.java index be7a98859e2..5a8c66d69ef 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVMDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVMDaoImpl.java @@ -16,14 +16,6 @@ // under the License. package com.cloud.simulator.dao; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - import com.cloud.simulator.MockHostVO; import com.cloud.simulator.MockVMVO; import com.cloud.utils.db.GenericDaoBase; @@ -31,7 +23,16 @@ import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.vm.VirtualMachine; +import org.springframework.stereotype.Component; +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +@Component @Local(value={MockVMDao.class}) public class MockVMDaoImpl extends GenericDaoBase implements MockVMDao { protected SearchBuilder GuidSearch; diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVolumeDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVolumeDaoImpl.java index a3a35179337..5d64a9fa246 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVolumeDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockVolumeDaoImpl.java @@ -16,10 +16,6 @@ // under the License. package com.cloud.simulator.dao; -import java.util.List; - -import javax.ejb.Local; - import com.cloud.simulator.MockVolumeVO; import com.cloud.simulator.MockVolumeVO.MockVolumeType; import com.cloud.utils.db.GenericDaoBase; @@ -27,7 +23,12 @@ import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; +import org.springframework.stereotype.Component; +import javax.ejb.Local; +import java.util.List; + +@Component @Local(value={MockVolumeDao.class}) public class MockVolumeDaoImpl extends GenericDaoBase implements MockVolumeDao { protected final SearchBuilder idTypeSearch; diff --git a/plugins/hypervisors/ucs/pom.xml b/plugins/hypervisors/ucs/pom.xml index 54cd68fd6b7..24bdc948e73 100755 --- a/plugins/hypervisors/ucs/pom.xml +++ b/plugins/hypervisors/ucs/pom.xml @@ -24,12 +24,12 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml org.apache.cloudstack cloud-plugin-hypervisor-ucs - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT Apache CloudStack Plugin - Hypervisor UCS http://maven.apache.org diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeDao.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeDao.java index ce42fe0eee2..581096d07e7 100644 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeDao.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeDao.java @@ -5,15 +5,16 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.database; import com.cloud.utils.db.GenericDao; diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeDaoImpl.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeDaoImpl.java index 8f65f6a21ae..ae0980c85a9 100644 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeDaoImpl.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeDaoImpl.java @@ -5,22 +5,25 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.database; import javax.ejb.Local; + +import org.springframework.stereotype.Component; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; -@Local(value = { UcsBladeDao.class }) +@Local(value = { UcsBladeDao.class }) @DB(txn = false) public class UcsBladeDaoImpl extends GenericDaoBase implements UcsBladeDao { diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeVO.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeVO.java old mode 100644 new mode 100755 index 527bc2706fd..a8c6fd7fb3f --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeVO.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeVO.java @@ -5,15 +5,16 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.database; import javax.persistence.Column; @@ -22,10 +23,13 @@ import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; + +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; @Entity @Table(name="ucs_blade") -public class UcsBladeVO { +public class UcsBladeVO implements InternalIdentity, Identity { @Id @GeneratedValue(strategy=GenerationType.IDENTITY) @Column(name="id") @@ -41,8 +45,11 @@ public class UcsBladeVO { private Long hostId; @Column(name="dn") - private String dn; - + private String dn; + + @Column(name="profile_dn") + private String profileDn; + public long getId() { return id; } @@ -81,5 +88,13 @@ public class UcsBladeVO { public void setUuid(String uuid) { this.uuid = uuid; - } + } + + public String getProfileDn() { + return profileDn; + } + + public void setProfileDn(String profileDn) { + this.profileDn = profileDn; + } } diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDao.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDao.java index fd6d762f1b7..84fe29cac8b 100644 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDao.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDao.java @@ -5,15 +5,16 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.database; import java.util.List; diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDaoImpl.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDaoImpl.java index bccc0faefa7..39f978221f8 100644 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDaoImpl.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDaoImpl.java @@ -5,23 +5,26 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.database; import javax.ejb.Local; + +import org.springframework.stereotype.Component; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; -@Local(value = { UcsManagerDao.class }) +@Local(value = { UcsManagerDao.class }) @DB(txn = false) public class UcsManagerDaoImpl extends GenericDaoBase implements UcsManagerDao { } diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerVO.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerVO.java index 281d6a54de6..cd8371ba7be 100644 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerVO.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerVO.java @@ -5,15 +5,16 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.database; import javax.persistence.Column; @@ -22,10 +23,13 @@ import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; - + +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + @Entity @Table(name="ucs_manager") -public class UcsManagerVO { +public class UcsManagerVO implements InternalIdentity, Identity { @Id @GeneratedValue(strategy=GenerationType.IDENTITY) @Column(name="id") diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/AddUcsManagerCmd.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/AddUcsManagerCmd.java old mode 100644 new mode 100755 index 078add9bdd5..ce8e40e7d65 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/AddUcsManagerCmd.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/AddUcsManagerCmd.java @@ -5,15 +5,16 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.manager; import javax.inject.Inject; @@ -25,6 +26,7 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.BaseCmd.CommandType; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.log4j.Logger; import com.cloud.exception.ConcurrentOperationException; @@ -35,33 +37,33 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.server.ManagementService; import com.cloud.user.Account; -@APICommand(description="Adds a Ucs manager", responseObject=AddUcsManagerResponse.class) +@APICommand(name="addUcsManager", description="Adds a Ucs manager", responseObject=UcsManagerResponse.class) public class AddUcsManagerCmd extends BaseCmd { public static final Logger s_logger = Logger.getLogger(AddUcsManagerCmd.class); @Inject private UcsManager mgr; - @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.LONG, description="the Zone id for the ucs manager", required=true) + @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, description="the Zone id for the ucs manager", entityType=ZoneResponse.class, required=true) private Long zoneId; @Parameter(name=ApiConstants.NAME, type=CommandType.STRING, description="the name of UCS manager") private String name; - @Parameter(name=ApiConstants.URL, type=CommandType.STRING, description="the name of UCS url") + @Parameter(name=ApiConstants.URL, type=CommandType.STRING, description="the name of UCS url", required=true) private String url; - @Parameter(name=ApiConstants.USERNAME, type=CommandType.STRING, description="the username of UCS") + @Parameter(name=ApiConstants.USERNAME, type=CommandType.STRING, description="the username of UCS", required=true) private String username; - @Parameter(name=ApiConstants.PASSWORD, type=CommandType.STRING, description="the password of UCS") + @Parameter(name=ApiConstants.PASSWORD, type=CommandType.STRING, description="the password of UCS", required=true) private String password; @Override public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { try { - AddUcsManagerResponse rsp = mgr.addUcsManager(this); + UcsManagerResponse rsp = mgr.addUcsManager(this); rsp.setObjectName("ucsmanager"); rsp.setResponseName(getCommandName()); this.setResponseObject(rsp); diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/AssociateUcsProfileToBladeCmd.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/AssociateUcsProfileToBladeCmd.java old mode 100644 new mode 100755 index cc59e4212a0..c4638281a46 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/AssociateUcsProfileToBladeCmd.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/AssociateUcsProfileToBladeCmd.java @@ -5,22 +5,25 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.manager; import javax.inject.Inject; import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.log4j.Logger; @@ -30,25 +33,26 @@ import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.user.Account; -@APICommand(description="associate a profile to a blade", responseObject=AssociateUcsProfileToBladesInClusterResponse.class) +@APICommand(name="associatesUscProfileToBlade", description="associate a profile to a blade", responseObject=UcsBladeResponse.class) public class AssociateUcsProfileToBladeCmd extends BaseCmd { public static final Logger s_logger = Logger.getLogger(AssociateUcsProfileToBladeCmd.class); @Inject private UcsManager mgr; - + + @Parameter(name=ApiConstants.UCS_MANAGER_ID, type=CommandType.UUID, description="ucs manager id", entityType=UcsManagerResponse.class, required=true) private Long ucsManagerId; + @Parameter(name=ApiConstants.UCS_PROFILE_DN, type=CommandType.STRING, description="profile dn", required=true) private String profileDn; + @Parameter(name=ApiConstants.UCS_BLADE_ID, type=CommandType.UUID, entityType=UcsBladeResponse.class, description="blade id", required=true) private Long bladeId; @Override public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { try { - mgr.associateProfileToBlade(this); - AssociateUcsProfileToBladesInClusterResponse rsp = new AssociateUcsProfileToBladesInClusterResponse(); + UcsBladeResponse rsp = mgr.associateProfileToBlade(this); rsp.setResponseName(getCommandName()); - rsp.setObjectName("associateucsprofiletobalde"); this.setResponseObject(rsp); } catch (Exception e) { s_logger.warn("Exception: ", e); diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsBladeCmd.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsBladeCmd.java new file mode 100755 index 00000000000..5440e4faee0 --- /dev/null +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsBladeCmd.java @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.ucs.manager; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.log4j.Logger; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; + +@APICommand(name="listUcsBlade", description="List ucs blades", responseObject=UcsBladeResponse.class) +public class ListUcsBladeCmd extends BaseListCmd { + public static final Logger s_logger = Logger.getLogger(ListUcsBladeCmd.class); + + @Inject + private UcsManager mgr; + + @Parameter(name=ApiConstants.UCS_MANAGER_ID, type=CommandType.UUID, description="ucs manager id", entityType=UcsManagerResponse.class, required=true) + private Long ucsManagerId; + + public UcsManager getMgr() { + return mgr; + } + + public void setMgr(UcsManager mgr) { + this.mgr = mgr; + } + + public Long getUcsManagerId() { + return ucsManagerId; + } + + public void setUcsManagerId(Long ucsManagerId) { + this.ucsManagerId = ucsManagerId; + } + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, + ResourceAllocationException, NetworkRuleConflictException { + try { + ListResponse response = mgr.listUcsBlades(this); + response.setResponseName(getCommandName()); + response.setObjectName("ucsblade"); + this.setResponseObject(response); + } catch (Exception e) { + s_logger.warn(e.getMessage(), e); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); + } + } + + @Override + public String getCommandName() { + return "listucsbladeresponse"; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } +} diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsManagerCmd.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsManagerCmd.java old mode 100644 new mode 100755 index 31662d951f8..862f6314338 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsManagerCmd.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsManagerCmd.java @@ -5,15 +5,24 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// +// Automatically generated by addcopyright.py at 02/28/2013 +// regarding copyright ownership. The ASF licenses this file +// "License"); you may not use this file except in compliance +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY package com.cloud.ucs.manager; import javax.inject.Inject; @@ -22,6 +31,7 @@ import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.BaseListCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.ListResponse; @@ -34,8 +44,8 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.server.ManagementService; import com.cloud.user.Account; -@APICommand(description="List ucs manager", responseObject=ListUcsManagerResponse.class) -public class ListUcsManagerCmd extends BaseCmd { +@APICommand(description="List ucs manager", responseObject=UcsManagerResponse.class) +public class ListUcsManagerCmd extends BaseListCmd { public static final Logger s_logger = Logger.getLogger(ListUcsManagerCmd.class); @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.LONG, description="the zone id", required=true) @@ -48,7 +58,7 @@ public class ListUcsManagerCmd extends BaseCmd { public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { try { - ListResponse response = mgr.listUcsManager(this); + ListResponse response = mgr.listUcsManager(this); response.setResponseName(getCommandName()); response.setObjectName("ucsmanager"); this.setResponseObject(response); diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsManagerResponse.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsManagerResponse.java deleted file mode 100644 index 450d59c7c18..00000000000 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsManagerResponse.java +++ /dev/null @@ -1,58 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.ucs.manager; - -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.BaseResponse; - -import com.cloud.serializer.Param; -import com.google.gson.annotations.SerializedName; - -public class ListUcsManagerResponse extends BaseResponse { - @SerializedName(ApiConstants.ID) @Param(description="id of ucs manager") - private String id; - - @SerializedName(ApiConstants.NAME) @Param(description="name of ucs manager") - private String name; - - @SerializedName(ApiConstants.ZONE_ID) @Param(description="zone id the ucs manager belongs to") - private String zoneId; - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - public String getZoneId() { - return zoneId; - } - - public void setZoneId(String zoneId) { - this.zoneId = zoneId; - } -} diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsProfileCmd.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsProfileCmd.java old mode 100644 new mode 100755 index 7cbbe14612e..c8a2cc57378 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsProfileCmd.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsProfileCmd.java @@ -5,15 +5,16 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.manager; import javax.inject.Inject; @@ -23,6 +24,7 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.BaseCmd.CommandType; +import org.apache.cloudstack.api.BaseListCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.ListResponse; @@ -35,13 +37,13 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.server.ManagementService; import com.cloud.user.Account; -@APICommand(description="List profile in ucs manager", responseObject=ListUcsProfileResponse.class) -public class ListUcsProfileCmd extends BaseCmd { +@APICommand(name="listUcsProfile", description="List profile in ucs manager", responseObject=UcsProfileResponse.class) +public class ListUcsProfileCmd extends BaseListCmd { public static final Logger s_logger = Logger.getLogger(ListUcsProfileCmd.class); @Inject UcsManager mgr; - @Parameter(name=ApiConstants.ID, type=CommandType.LONG, description="the id for the ucs manager", required=true) + @Parameter(name=ApiConstants.UCS_MANAGER_ID, type=CommandType.UUID, entityType=UcsManagerResponse.class, description="the id for the ucs manager", required=true) private Long ucsManagerId; public Long getUcsManagerId() { @@ -56,9 +58,9 @@ public class ListUcsProfileCmd extends BaseCmd { public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { try { - ListResponse response = mgr.listUcsProfiles(this); + ListResponse response = mgr.listUcsProfiles(this); response.setResponseName(getCommandName()); - response.setObjectName("ucsprofile"); + response.setObjectName("ucsprofiles"); this.setResponseObject(response); } catch (Exception e) { s_logger.warn("Exception: ", e); diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/StringTemplate.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/StringTemplate.java index 5e5ceb98d5a..72bbfcb6f21 100644 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/StringTemplate.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/StringTemplate.java @@ -5,15 +5,16 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.manager; import java.util.Map; diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsBladeResponse.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsBladeResponse.java new file mode 100755 index 00000000000..862b206a507 --- /dev/null +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsBladeResponse.java @@ -0,0 +1,84 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.ucs.manager; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; + +import com.cloud.serializer.Param; +import com.cloud.ucs.database.UcsBladeVO; +import com.google.gson.annotations.SerializedName; +@EntityReference(value=UcsBladeVO.class) +public class UcsBladeResponse extends BaseResponse { + @SerializedName(ApiConstants.ID) + @Param(description = "ucs blade id") + private String id; + @SerializedName(ApiConstants.UCS_MANAGER_ID) + @Param(description = "ucs manager id") + private String ucsManagerId; + @SerializedName(ApiConstants.HOST_ID) + @Param(description = "cloudstack host id this blade associates to") + private String hostId; + @SerializedName(ApiConstants.UCS_BLADE_DN) + @Param(description = "ucs blade dn") + private String dn; + @SerializedName(ApiConstants.UCS_PROFILE_DN) + @Param(description = "associated ucs profile dn") + private String associatedProfileDn; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getUcsManagerId() { + return ucsManagerId; + } + + public void setUcsManagerId(String ucsManagerId) { + this.ucsManagerId = ucsManagerId; + } + + public String getHostId() { + return hostId; + } + + public void setHostId(String hostId) { + this.hostId = hostId; + } + + public String getDn() { + return dn; + } + + public void setDn(String dn) { + this.dn = dn; + } + + public String getAssociatedProfileDn() { + return associatedProfileDn; + } + + public void setAssociatedProfileDn(String associatedProfileDn) { + this.associatedProfileDn = associatedProfileDn; + } + +} diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsCommands.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsCommands.java index 2dc4daacba0..eb999910992 100644 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsCommands.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsCommands.java @@ -5,15 +5,16 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.manager; import com.cloud.utils.xmlobject.XmlObject; @@ -83,7 +84,7 @@ public class UcsCommands { .putElement("statsPolicyName", "default") .putElement("status", "") .putElement("usrLbl", "") - .putElement("", "") + .putElement("uuid", "") .putElement("vconProfileName", "") .putElement("lsBinding", new XmlObject("lsBinding") .putElement("pnDn", bladeDn) diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsHttpClient.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsHttpClient.java index d887650580a..5c60601275e 100644 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsHttpClient.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsHttpClient.java @@ -5,15 +5,16 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.manager; import org.apache.commons.httpclient.HttpClient; @@ -39,7 +40,12 @@ public class UcsHttpClient { if (result != 200) { throw new CloudRuntimeException("Call failed: " + post.getResponseBodyAsString()); } - return post.getResponseBodyAsString(); + String res = post.getResponseBodyAsString(); + if (res.contains("errorCode")) { + String err = String.format("ucs call failed:\nsubmitted doc:%s\nresponse:%s\n", xml, res); + throw new CloudRuntimeException(err); + } + return res; } catch (Exception e) { throw new CloudRuntimeException(e.getMessage(), e); } finally { diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsManager.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsManager.java old mode 100644 new mode 100755 index 2e8040a0473..aa45f8c09fd --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsManager.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsManager.java @@ -5,27 +5,31 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.manager; import org.apache.cloudstack.api.response.ListResponse; import com.cloud.utils.component.Manager; +import com.cloud.utils.component.PluggableService; -public interface UcsManager extends Manager { - AddUcsManagerResponse addUcsManager(AddUcsManagerCmd cmd); +public interface UcsManager extends Manager, PluggableService { + UcsManagerResponse addUcsManager(AddUcsManagerCmd cmd); - ListResponse listUcsProfiles(ListUcsProfileCmd cmd); + ListResponse listUcsProfiles(ListUcsProfileCmd cmd); - ListResponse listUcsManager(ListUcsManagerCmd cmd); + ListResponse listUcsManager(ListUcsManagerCmd cmd); - void associateProfileToBlade(AssociateUcsProfileToBladeCmd cmd); + UcsBladeResponse associateProfileToBlade(AssociateUcsProfileToBladeCmd cmd); + + ListResponse listUcsBlades(ListUcsBladeCmd cmd); } diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsManagerImpl.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsManagerImpl.java old mode 100644 new mode 100755 index 356113d498e..f428e03b7c1 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsManagerImpl.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsManagerImpl.java @@ -5,15 +5,16 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.manager; import java.io.File; @@ -42,8 +43,11 @@ import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.org.Cluster; import com.cloud.resource.ResourceService; @@ -64,7 +68,6 @@ import com.cloud.utils.xmlobject.XmlObject; import com.cloud.utils.xmlobject.XmlObjectParser; @Local(value = { UcsManager.class }) -@Component public class UcsManagerImpl implements UcsManager { public static final Logger s_logger = Logger.getLogger(UcsManagerImpl.class); @@ -78,8 +81,15 @@ public class UcsManagerImpl implements UcsManager { private ClusterDetailsDao clusterDetailsDao; @Inject private UcsBladeDao bladeDao; + @Inject + private HostDao hostDao; + @Inject + private DataCenterDao dcDao; - private Map cookies = new HashMap(); + private Map cookies = new HashMap(); + private String name; + private int runLevel; + private Map params; @Override public boolean configure(String name, Map params) throws ConfigurationException { @@ -98,7 +108,7 @@ public class UcsManagerImpl implements UcsManager { @Override public String getName() { - return "UcsManager"; + return name; } private void discoverBlades(UcsManagerVO ucsMgrVo) { @@ -114,7 +124,7 @@ public class UcsManagerImpl implements UcsManager { @Override @DB - public AddUcsManagerResponse addUcsManager(AddUcsManagerCmd cmd) { + public UcsManagerResponse addUcsManager(AddUcsManagerCmd cmd) { UcsManagerVO vo = new UcsManagerVO(); vo.setUuid(UUID.randomUUID().toString()); vo.setPassword(cmd.getPassword()); @@ -127,7 +137,7 @@ public class UcsManagerImpl implements UcsManager { txn.start(); ucsDao.persist(vo); txn.commit(); - AddUcsManagerResponse rsp = new AddUcsManagerResponse(); + UcsManagerResponse rsp = new UcsManagerResponse(); rsp.setId(String.valueOf(vo.getId())); rsp.setName(vo.getName()); rsp.setUrl(vo.getUrl()); @@ -145,7 +155,9 @@ public class UcsManagerImpl implements UcsManager { UcsManagerVO mgrvo = ucsDao.findById(ucsMgrId); UcsHttpClient client = new UcsHttpClient(mgrvo.getUrl()); String login = UcsCommands.loginCmd(mgrvo.getUsername(), mgrvo.getPassword()); - cookie = client.call(login); + String ret = client.call(login); + XmlObject xo = XmlObjectParser.parseFromString(ret); + cookie = xo.get("outCookie"); cookies.put(ucsMgrId, cookie); } @@ -175,12 +187,12 @@ public class UcsManagerImpl implements UcsManager { } @Override - public ListResponse listUcsProfiles(ListUcsProfileCmd cmd) { + public ListResponse listUcsProfiles(ListUcsProfileCmd cmd) { List profiles = getUcsProfiles(cmd.getUcsManagerId()); - ListResponse response = new ListResponse(); - List rs = new ArrayList(); + ListResponse response = new ListResponse(); + List rs = new ArrayList(); for (UcsProfile p : profiles) { - ListUcsProfileResponse r = new ListUcsProfileResponse(); + UcsProfileResponse r = new UcsProfileResponse(); r.setObjectName("ucsprofile"); r.setDn(p.getDn()); rs.add(r); @@ -196,7 +208,7 @@ public class UcsManagerImpl implements UcsManager { String cmd = UcsCommands.cloneProfile(cookie, srcDn, newProfileName); String res = client.call(cmd); XmlObject xo = XmlObjectParser.parseFromString(res); - return xo.get("lsClone.outConfig.lsServer.dn"); + return xo.get("outConfig.lsServer.dn"); } private boolean isProfileAssociated(Long ucsMgrId, String dn) { @@ -206,11 +218,11 @@ public class UcsManagerImpl implements UcsManager { String cmd = UcsCommands.configResolveDn(cookie, dn); String res = client.call(cmd); XmlObject xo = XmlObjectParser.parseFromString(res); - return xo.get("outConfig.lsServer.assocState").equals("associated"); + return xo.get("outConfig.computeBlade.association").equals("associated"); } - + @Override - public void associateProfileToBlade(AssociateUcsProfileToBladeCmd cmd) { + public UcsBladeResponse associateProfileToBlade(AssociateUcsProfileToBladeCmd cmd) { SearchCriteriaService q = SearchCriteria2.create(UcsBladeVO.class); q.addAnd(q.getEntity().getUcsManagerId(), Op.EQ, cmd.getUcsManagerId()); q.addAnd(q.getEntity().getId(), Op.EQ, cmd.getBladeId()); @@ -248,57 +260,115 @@ public class UcsManagerImpl implements UcsManager { if (count >= timeout) { throw new CloudRuntimeException(String.format("associating profile[%s] to balde[%s] timeout after 600 seconds", pdn, bvo.getDn())); } + + bvo.setProfileDn(pdn); + bladeDao.update(bvo.getId(), bvo); + + UcsBladeResponse rsp = bladeVOToResponse(bvo); - s_logger.debug(String.format("successfully associated profile[%s] to blade[%s]", pdn, bvo.getDn())); - } + s_logger.debug(String.format("successfully associated profile[%s] to blade[%s]", pdn, bvo.getDn())); + return rsp; + } + + private String hostIdToUuid(Long hostId) { + if (hostId == null) { + return null; + } + HostVO vo = hostDao.findById(hostId); + return vo.getUuid(); + } + + private String zoneIdToUuid(Long zoneId) { + DataCenterVO vo = dcDao.findById(zoneId); + return vo.getUuid(); + } + + private String ucsManagerIdToUuid(Long ucsMgrId) { + UcsManagerVO vo = ucsDao.findById(ucsMgrId); + return vo.getUuid(); + } @Override - public ListResponse listUcsManager(ListUcsManagerCmd cmd) { + public ListResponse listUcsManager(ListUcsManagerCmd cmd) { SearchCriteriaService serv = SearchCriteria2.create(UcsManagerVO.class); serv.addAnd(serv.getEntity().getZoneId(), Op.EQ, cmd.getZoneId()); List vos = serv.list(); - List rsps = new ArrayList(vos.size()); + List rsps = new ArrayList(vos.size()); for (UcsManagerVO vo : vos) { - ListUcsManagerResponse rsp = new ListUcsManagerResponse(); + UcsManagerResponse rsp = new UcsManagerResponse(); rsp.setObjectName("ucsmanager"); - rsp.setId(String.valueOf(vo.getId())); - rsp.setName(vo.getName()); - rsp.setZoneId(String.valueOf(vo.getZoneId())); + rsp.setId(vo.getUuid()); + rsp.setName(vo.getName()); + rsp.setUrl(vo.getUrl()); + rsp.setZoneId(zoneIdToUuid(vo.getZoneId())); rsps.add(rsp); } - ListResponse response = new ListResponse(); + ListResponse response = new ListResponse(); response.setResponses(rsps); return response; + } + + private UcsBladeResponse bladeVOToResponse(UcsBladeVO vo) { + UcsBladeResponse rsp = new UcsBladeResponse(); + rsp.setObjectName("ucsblade"); + rsp.setId(vo.getUuid()); + rsp.setDn(vo.getDn()); + rsp.setHostId(hostIdToUuid(vo.getHostId())); + rsp.setUcsManagerId(ucsManagerIdToUuid(vo.getUcsManagerId())); + return rsp; + } + + public ListResponse listUcsBlades(ListUcsBladeCmd cmd) { + SearchCriteriaService serv = SearchCriteria2.create(UcsBladeVO.class); + serv.addAnd(serv.getEntity().getUcsManagerId(), Op.EQ, cmd.getUcsManagerId()); + List vos = serv.list(); + + List rsps = new ArrayList(vos.size()); + for (UcsBladeVO vo : vos) { + UcsBladeResponse rsp = bladeVOToResponse(vo); + rsps.add(rsp); + } + + ListResponse response = new ListResponse(); + response.setResponses(rsps); + + return response; } @Override - public void setName(String name) { - // TODO Auto-generated method stub - + public void setName(String name) { + this.name = name; } @Override - public void setConfigParams(Map params) { - // TODO Auto-generated method stub - + public void setConfigParams(Map params) { + this.params = params; } @Override public Map getConfigParams() { - // TODO Auto-generated method stub - return null; + return this.params; } @Override public int getRunLevel() { - // TODO Auto-generated method stub - return 0; + return runLevel; } @Override - public void setRunLevel(int level) { - // TODO Auto-generated method stub - + public void setRunLevel(int level) { + this.runLevel = level; + } + + @Override + public List> getCommands() { + List> cmds = new ArrayList>(); + cmds.add(ListUcsBladeCmd.class); + cmds.add(ListUcsManagerCmd.class); + cmds.add(ListUcsProfileCmd.class); + cmds.add(AddUcsManagerCmd.class); + cmds.add(AssociateUcsProfileToBladeCmd.class); + return cmds; } } diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/AddUcsManagerResponse.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsManagerResponse.java old mode 100644 new mode 100755 similarity index 88% rename from plugins/hypervisors/ucs/src/com/cloud/ucs/manager/AddUcsManagerResponse.java rename to plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsManagerResponse.java index 98dfd0455a2..6a82daea816 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/AddUcsManagerResponse.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsManagerResponse.java @@ -5,24 +5,28 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.manager; -import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.EntityReference; + import org.apache.cloudstack.api.BaseResponse; import com.cloud.serializer.Param; +import com.cloud.ucs.database.UcsManagerVO; import com.google.gson.annotations.SerializedName; - -public class AddUcsManagerResponse extends BaseResponse { +@EntityReference(value=UcsManagerVO.class) +public class UcsManagerResponse extends BaseResponse { @SerializedName(ApiConstants.ID) @Param(description="the ID of the ucs manager") private String id; diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsProfileResponse.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsProfileResponse.java old mode 100644 new mode 100755 similarity index 87% rename from plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsProfileResponse.java rename to plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsProfileResponse.java index c29d1d078d0..0621f6fa983 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/ListUcsProfileResponse.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsProfileResponse.java @@ -5,15 +5,16 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.manager; import org.apache.cloudstack.api.ApiConstants; @@ -22,9 +23,9 @@ import org.apache.cloudstack.api.BaseResponse; import com.cloud.serializer.Param; import com.google.gson.annotations.SerializedName; -public class ListUcsProfileResponse extends BaseResponse { - @SerializedName(ApiConstants.UCS_DN) @Param(description="the dn of ucs profile") - private String dn; +public class UcsProfileResponse extends BaseResponse { + @SerializedName(ApiConstants.UCS_DN) @Param(description="ucs profile dn") + private String dn; public String getDn() { return dn; diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/structure/ComputeBlade.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/structure/ComputeBlade.java index 468561187f0..e3b1cfda35c 100644 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/structure/ComputeBlade.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/structure/ComputeBlade.java @@ -5,15 +5,16 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.structure; import java.util.ArrayList; @@ -45,7 +46,7 @@ public class ComputeBlade { public static List fromXmString(String xmlstr) { XmlObject root = XmlObjectParser.parseFromString(xmlstr); - List lst = root.getAsList("configResolveClass.outConfigs.computeBlade"); + List lst = root.getAsList("outConfigs.computeBlade"); List blades = new ArrayList(); if (lst == null) { return blades; diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/structure/UcsProfile.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/structure/UcsProfile.java index 5cb8c39eef5..16cf35d539f 100644 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/structure/UcsProfile.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/structure/UcsProfile.java @@ -5,15 +5,16 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. +// package com.cloud.ucs.structure; import java.util.ArrayList; @@ -36,7 +37,9 @@ public class UcsProfile { List xos = xo.getAsList("outDns.dn"); if (xos != null) { for (XmlObject x : xos) { - UcsProfile p = UcsProfile.fromXmlObject(x); + //UcsProfile p = UcsProfile.fromXmlObject(x); + UcsProfile p = new UcsProfile(); + p.setDn(x.get("value").toString()); ps.add(p); } } diff --git a/plugins/hypervisors/vmware/pom.xml b/plugins/hypervisors/vmware/pom.xml index d990e89b388..468e0a50599 100644 --- a/plugins/hypervisors/vmware/pom.xml +++ b/plugins/hypervisors/vmware/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml @@ -38,18 +38,6 @@ ${cs.vmware.api.version} compile - - com.cloud.com.vmware - vmware-vim - ${cs.vmware.api.version} - compile - - - com.cloud.com.vmware - vmware-apputils - ${cs.vmware.api.version} - compile - org.apache.axis axis diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/guru/VMwareGuru.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/guru/VMwareGuru.java index 819d3999f92..bb7c29745d9 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/guru/VMwareGuru.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/guru/VMwareGuru.java @@ -1,3 +1,4 @@ + // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information @@ -129,6 +130,17 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru { } } } + + String diskDeviceType = details.get(VmDetailConstants.ROOK_DISK_CONTROLLER); + if (!(vm.getVirtualMachine() instanceof DomainRouterVO || vm.getVirtualMachine() instanceof ConsoleProxyVO + || vm.getVirtualMachine() instanceof SecondaryStorageVmVO)){ + // user vm + if (diskDeviceType != null){ + details.remove(VmDetailConstants.ROOK_DISK_CONTROLLER); + } + details.put(VmDetailConstants.ROOK_DISK_CONTROLLER, _vmwareMgr.getRootDiskController()); + } + to.setDetails(details); if(vm.getVirtualMachine() instanceof DomainRouterVO) { diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java index ddbce661239..94ba97d96a1 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.hypervisor.vmware; @@ -29,10 +29,13 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; +import org.apache.cloudstack.api.ApiConstants; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; import com.cloud.alert.AlertManager; +import com.cloud.configuration.Config; +import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenter.NetworkType; @@ -41,17 +44,23 @@ import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.exception.DiscoveredWithErrorException; import com.cloud.exception.DiscoveryException; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.hypervisor.vmware.manager.VmwareManager; import com.cloud.hypervisor.vmware.mo.ClusterMO; import com.cloud.hypervisor.vmware.mo.HostMO; +import com.cloud.hypervisor.vmware.mo.VirtualSwitchType; import com.cloud.hypervisor.vmware.resource.VmwareContextFactory; import com.cloud.hypervisor.vmware.resource.VmwareResource; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.network.NetworkModel; +import com.cloud.network.Networks.TrafficType; +import com.cloud.network.PhysicalNetwork; +import com.cloud.network.VmwareTrafficLabel; import com.cloud.network.dao.CiscoNexusVSMDeviceDao; import com.cloud.resource.Discoverer; import com.cloud.resource.DiscovererBase; @@ -69,6 +78,7 @@ import com.cloud.utils.UriUtils; import com.vmware.vim25.ClusterDasConfigInfo; import com.vmware.vim25.ManagedObjectReference; + @Local(value = Discoverer.class) public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer, ResourceStateAdapter { @@ -95,77 +105,158 @@ public class VmwareServerDiscoverer extends DiscovererBase implements CiscoNexusVSMDeviceDao _nexusDao; @Inject NetworkModel _netmgr; - + @Inject + HypervisorCapabilitiesDao _hvCapabilitiesDao; + protected Map _urlParams; + protected boolean useDVS = false; + protected boolean nexusDVS = false; + public VmwareServerDiscoverer() { s_logger.info("VmwareServerDiscoverer is constructed"); } @Override - public Map> find(long dcId, - Long podId, Long clusterId, URI url, String username, - String password, List hostTags) throws DiscoveryException { + public Map> find(long dcId, Long podId, Long clusterId, URI url, + String username, String password, List hostTags) throws DiscoveryException { - if (s_logger.isInfoEnabled()) - s_logger.info("Discover host. dc: " + dcId + ", pod: " + podId - + ", cluster: " + clusterId + ", uri host: " - + url.getHost()); + if(s_logger.isInfoEnabled()) + s_logger.info("Discover host. dc: " + dcId + ", pod: " + podId + ", cluster: " + clusterId + ", uri host: " + url.getHost()); - if (podId == null) { - if (s_logger.isInfoEnabled()) + if(podId == null) { + if(s_logger.isInfoEnabled()) s_logger.info("No pod is assigned, assuming that it is not for vmware and skip it to next discoverer"); return null; } ClusterVO cluster = _clusterDao.findById(clusterId); - if (cluster == null - || cluster.getHypervisorType() != HypervisorType.VMware) { - if (s_logger.isInfoEnabled()) + if(cluster == null || cluster.getHypervisorType() != HypervisorType.VMware) { + if(s_logger.isInfoEnabled()) s_logger.info("invalid cluster id or cluster is not for VMware hypervisors"); return null; } List hosts = _resourceMgr.listAllHostsInCluster(clusterId); - if (hosts.size() >= _vmwareMgr.getMaxHostsPerCluster()) { - String msg = "VMware cluster " - + cluster.getName() - + " is too big to add new host now. (current configured cluster size: " - + _vmwareMgr.getMaxHostsPerCluster() + ")"; + if (hosts != null && hosts.size() > 0) { + int maxHostsPerCluster = _hvCapabilitiesDao.getMaxHostsPerCluster(hosts.get(0).getHypervisorType(), hosts.get(0).getHypervisorVersion()); + if (hosts.size() > maxHostsPerCluster) { + String msg = "VMware cluster " + cluster.getName() + " is too big to add new host now. (current configured cluster size: " + maxHostsPerCluster + ")"; s_logger.error(msg); throw new DiscoveredWithErrorException(msg); } + } String privateTrafficLabel = null; String publicTrafficLabel = null; String guestTrafficLabel = null; Map vsmCredentials = null; - privateTrafficLabel = _netmgr.getDefaultManagementTrafficLabel(dcId, - HypervisorType.VMware); + VirtualSwitchType defaultVirtualSwitchType = VirtualSwitchType.StandardVirtualSwitch; + + String paramGuestVswitchType = null; + String paramGuestVswitchName = null; + String paramPublicVswitchType = null; + String paramPublicVswitchName = null; + + VmwareTrafficLabel guestTrafficLabelObj = new VmwareTrafficLabel(TrafficType.Guest); + VmwareTrafficLabel publicTrafficLabelObj = new VmwareTrafficLabel(TrafficType.Public); + Map clusterDetails = _clusterDetailsDao.findDetails(clusterId); + DataCenterVO zone = _dcDao.findById(dcId); + NetworkType zoneType = zone.getNetworkType(); + _readGlobalConfigParameters(); + + // Set default physical network end points for public and guest traffic + // Private traffic will be only on standard vSwitch for now. + if (useDVS) { + // Parse url parameters for type of vswitch and name of vswitch specified at cluster level + paramGuestVswitchType = _urlParams.get(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC); + paramGuestVswitchName = _urlParams.get(ApiConstants.VSWITCH_NAME_GUEST_TRAFFIC); + paramPublicVswitchType = _urlParams.get(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC); + paramPublicVswitchName = _urlParams.get(ApiConstants.VSWITCH_NAME_PUBLIC_TRAFFIC); + defaultVirtualSwitchType = getDefaultVirtualSwitchType(); + } + + // Zone level vSwitch Type depends on zone level traffic labels + // + // User can override Zone wide vswitch type (for public and guest) by providing following optional parameters in addClusterCmd + // param "guestvswitchtype" with valid values vmwaredvs, vmwaresvs, nexusdvs + // param "publicvswitchtype" with valid values vmwaredvs, vmwaresvs, nexusdvs + // + // Format of label is ,, + // If a field OR is not present leave it empty. + // Ex: 1) vswitch0 + // 2) dvswitch0,200,vmwaredvs + // 3) nexusepp0,300,nexusdvs + // 4) vswitch1,400,vmwaresvs + // 5) vswitch0 + // default vswitchtype is 'vmwaresvs'. + // 'vmwaresvs' is for vmware standard vswitch + // 'vmwaredvs' is for vmware distributed virtual switch + // 'nexusdvs' is for cisco nexus distributed virtual switch + // Get zone wide traffic labels for Guest traffic and Public traffic + guestTrafficLabel = _netmgr.getDefaultGuestTrafficLabel(dcId, HypervisorType.VMware); + + // Process traffic label information provided at zone level and cluster level + guestTrafficLabelObj = getTrafficInfo(TrafficType.Guest, guestTrafficLabel, defaultVirtualSwitchType, paramGuestVswitchType, paramGuestVswitchName, clusterId); + + if (zoneType == NetworkType.Advanced) { + // Get zone wide traffic label for Public traffic + publicTrafficLabel = _netmgr.getDefaultPublicTrafficLabel(dcId, HypervisorType.VMware); + + // Process traffic label information provided at zone level and cluster level + publicTrafficLabelObj = getTrafficInfo(TrafficType.Public, publicTrafficLabel, defaultVirtualSwitchType, paramPublicVswitchType, paramPublicVswitchName, clusterId); + + // Configuration Check: A physical network cannot be shared by different types of virtual switches. + // + // Check if different vswitch types are chosen for same physical network + // 1. Get physical network for guest traffic - multiple networks + // 2. Get physical network for public traffic - single network + // See if 2 is in 1 + // if no - pass + // if yes - compare publicTrafficLabelObj.getVirtualSwitchType() == guestTrafficLabelObj.getVirtualSwitchType() + // true - pass + // false - throw exception - fail cluster add operation + + List pNetworkListGuestTraffic = _netmgr.getPhysicalNtwksSupportingTrafficType(dcId, TrafficType.Guest); + List pNetworkListPublicTraffic = _netmgr.getPhysicalNtwksSupportingTrafficType(dcId, TrafficType.Public); + // Public network would be on single physical network hence getting first object of the list would suffice. + PhysicalNetwork pNetworkPublic = pNetworkListPublicTraffic.get(0); + if (pNetworkListGuestTraffic.contains(pNetworkPublic)) { + if (publicTrafficLabelObj.getVirtualSwitchType() != guestTrafficLabelObj.getVirtualSwitchType()) { + String msg = "Both public traffic and guest traffic is over same physical network " + pNetworkPublic + + ". And virtual switch type chosen for each traffic is different" + + ". A physical network cannot be shared by different types of virtual switches."; + s_logger.error(msg); + throw new InvalidParameterValueException(msg); + } + } + } else { + // Distributed virtual switch is not supported in Basic zone for now. + // Private / Management network traffic is not yet supported over distributed virtual switch. + if (guestTrafficLabelObj.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch) { + String msg = "Detected that Guest traffic is over Distributed virtual switch in Basic zone. Only Standard vSwitch is supported in Basic zone."; + s_logger.error(msg); + throw new DiscoveredWithErrorException(msg); + } + } + + privateTrafficLabel = _netmgr.getDefaultManagementTrafficLabel(dcId, HypervisorType.VMware); if (privateTrafficLabel != null) { - s_logger.info("Detected private network label : " - + privateTrafficLabel); + s_logger.info("Detected private network label : " + privateTrafficLabel); } - if (_vmwareMgr.getNexusVSwitchGlobalParameter()) { - DataCenterVO zone = _dcDao.findById(dcId); - NetworkType zoneType = zone.getNetworkType(); + if (nexusDVS) { if (zoneType != NetworkType.Basic) { - publicTrafficLabel = _netmgr.getDefaultPublicTrafficLabel(dcId, - HypervisorType.VMware); + publicTrafficLabel = _netmgr.getDefaultPublicTrafficLabel(dcId, HypervisorType.VMware); if (publicTrafficLabel != null) { - s_logger.info("Detected public network label : " - + publicTrafficLabel); + s_logger.info("Detected public network label : " + publicTrafficLabel); } } // Get physical network label - guestTrafficLabel = _netmgr.getDefaultGuestTrafficLabel(dcId, - HypervisorType.VMware); + guestTrafficLabel = _netmgr.getDefaultGuestTrafficLabel(dcId, HypervisorType.VMware); if (guestTrafficLabel != null) { - s_logger.info("Detected guest network label : " - + guestTrafficLabel); + s_logger.info("Detected guest network label : " + guestTrafficLabel); } - vsmCredentials = _vmwareMgr - .getNexusVSMCredentialsByClusterId(clusterId); + vsmCredentials = _vmwareMgr.getNexusVSMCredentialsByClusterId(clusterId); } VmwareContext context = null; @@ -176,7 +267,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements context.registerStockObject("privateTrafficLabel", privateTrafficLabel); - if (_vmwareMgr.getNexusVSwitchGlobalParameter()) { + if (nexusDVS) { if (vsmCredentials != null) { s_logger.info("Stocking credentials of Nexus VSM"); context.registerStockObject("vsmcredentials", @@ -198,8 +289,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements } ManagedObjectReference morCluster = null; - Map clusterDetails = _clusterDetailsDao - .findDetails(clusterId); + clusterDetails = _clusterDetailsDao.findDetails(clusterId); if (clusterDetails.get("url") != null) { URI uriFromCluster = new URI( UriUtils.encodeURIComponent(clusterDetails.get("url"))); @@ -215,8 +305,8 @@ public class VmwareServerDiscoverer extends DiscovererBase implements } else { ClusterMO clusterMo = new ClusterMO(context, morCluster); ClusterDasConfigInfo dasConfig = clusterMo.getDasConfig(); - if (dasConfig != null && dasConfig.getEnabled() != null - && dasConfig.getEnabled().booleanValue()) { + if (dasConfig != null && dasConfig.isEnabled() != null + && dasConfig.isEnabled().booleanValue()) { clusterDetails.put("NativeHA", "true"); _clusterDetailsDao.persist(clusterId, clusterDetails); } @@ -240,7 +330,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements details.put("url", hostMo.getHostName()); details.put("username", username); details.put("password", password); - String guid = morHost.getType() + ":" + morHost.get_value() + String guid = morHost.getType() + ":" + morHost.getValue() + "@" + url.getHost(); details.put("guid", guid); @@ -255,13 +345,8 @@ public class VmwareServerDiscoverer extends DiscovererBase implements params.put("private.network.vswitch.name", privateTrafficLabel); } - if (publicTrafficLabel != null) { - params.put("public.network.vswitch.name", - publicTrafficLabel); - } - if (guestTrafficLabel != null) { - params.put("guest.network.vswitch.name", guestTrafficLabel); - } + params.put("guestTrafficInfo", guestTrafficLabelObj); + params.put("publicTrafficInfo", publicTrafficLabelObj); VmwareResource resource = new VmwareResource(); try { @@ -301,7 +386,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements if (morCluster == null) { for (ManagedObjectReference morHost : morHosts) { ManagedObjectReference morParent = (ManagedObjectReference) context - .getServiceUtil().getDynamicProperty(morHost, "parent"); + .getVimClient().getDynamicProperty(morHost, "parent"); if (morParent.getType().equalsIgnoreCase( "ClusterComputeResource")) return false; @@ -309,12 +394,12 @@ public class VmwareServerDiscoverer extends DiscovererBase implements } else { for (ManagedObjectReference morHost : morHosts) { ManagedObjectReference morParent = (ManagedObjectReference) context - .getServiceUtil().getDynamicProperty(morHost, "parent"); + .getVimClient().getDynamicProperty(morHost, "parent"); if (!morParent.getType().equalsIgnoreCase( "ClusterComputeResource")) return false; - if (!morParent.get_value().equals(morCluster.get_value())) + if (!morParent.getValue().equals(morCluster.getValue())) return false; } } @@ -380,6 +465,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements } } + @Override public HostVO createHostVOForConnectedAgent(HostVO host, StartupCommand[] cmd) { @@ -415,6 +501,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements _resourceMgr.deleteRoutingHost(host, isForced, isForceDeleteStorage); return new DeleteHostAnswer(true); + } @Override @@ -423,4 +510,158 @@ public class VmwareServerDiscoverer extends DiscovererBase implements .getSimpleName()); return super.stop(); } + + private VmwareTrafficLabel getTrafficInfo(TrafficType trafficType, String zoneWideTrafficLabel, VirtualSwitchType defaultVirtualSwitchType, String vSwitchType, String vSwitchName, Long clusterId) { + VmwareTrafficLabel trafficLabelObj = null; + Map clusterDetails = null; + try { + trafficLabelObj = new VmwareTrafficLabel(zoneWideTrafficLabel, trafficType, defaultVirtualSwitchType); + } catch (InvalidParameterValueException e) { + s_logger.error("Failed to recognize virtual switch type specified for " + trafficType + + " traffic due to " + e.getMessage()); + throw e; + } + + if (defaultVirtualSwitchType.equals(VirtualSwitchType.StandardVirtualSwitch)|| (vSwitchType == null && vSwitchName == null)) { + // Case of no cluster level override configuration defined. + // Depend only on zone wide traffic label + // If global param for dvSwitch is false return default traffic info object with vmware standard vswitch + return trafficLabelObj; + } else { + // Need to persist cluster level override configuration to db + clusterDetails = _clusterDetailsDao.findDetails(clusterId); + } + + if (vSwitchName != null) { + trafficLabelObj.setVirtualSwitchName(vSwitchName); + if (trafficType == TrafficType.Guest) { + clusterDetails.put(ApiConstants.VSWITCH_NAME_GUEST_TRAFFIC, vSwitchName); + } else { + clusterDetails.put(ApiConstants.VSWITCH_NAME_PUBLIC_TRAFFIC, vSwitchName); + } + } + + if (vSwitchType != null) { + validateVswitchType(vSwitchType); + trafficLabelObj.setVirtualSwitchType(VirtualSwitchType.getType(vSwitchType)); + if (trafficType == TrafficType.Guest) { + clusterDetails.put(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC, vSwitchType); + } else { + clusterDetails.put(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC, vSwitchType); + } + } + + // Save cluster level override configuration to cluster details + _clusterDetailsDao.persist(clusterId, clusterDetails); + + return trafficLabelObj; + } + + private VmwareTrafficLabel getTrafficInfo(TrafficType trafficType, String zoneWideTrafficLabel, Map clusterDetails, VirtualSwitchType defVirtualSwitchType) { + VmwareTrafficLabel trafficLabelObj = null; + try { + trafficLabelObj = new VmwareTrafficLabel(zoneWideTrafficLabel, trafficType, defVirtualSwitchType); + } catch (InvalidParameterValueException e) { + s_logger.error("Failed to recognize virtual switch type specified for " + trafficType + + " traffic due to " + e.getMessage()); + throw e; + } + + if(defVirtualSwitchType.equals(VirtualSwitchType.StandardVirtualSwitch)) { + return trafficLabelObj; + } + + if (trafficType == TrafficType.Guest) { + if(clusterDetails.containsKey(ApiConstants.VSWITCH_NAME_GUEST_TRAFFIC)) { + trafficLabelObj.setVirtualSwitchName(clusterDetails.get(ApiConstants.VSWITCH_NAME_GUEST_TRAFFIC)); + } + if(clusterDetails.containsKey(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC)) { + trafficLabelObj.setVirtualSwitchType(VirtualSwitchType.getType(clusterDetails.get(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC))); + } + } else if (trafficType == TrafficType.Public) { + if(clusterDetails.containsKey(ApiConstants.VSWITCH_NAME_PUBLIC_TRAFFIC)) { + trafficLabelObj.setVirtualSwitchName(clusterDetails.get(ApiConstants.VSWITCH_NAME_PUBLIC_TRAFFIC)); + } + if(clusterDetails.containsKey(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC)) { + trafficLabelObj.setVirtualSwitchType(VirtualSwitchType.getType(clusterDetails.get(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC))); + } + } + + return trafficLabelObj; + } + + private void _readGlobalConfigParameters() { + String value; + if (_configDao != null) { + value = _configDao.getValue(Config.VmwareUseDVSwitch.key()); + useDVS = Boolean.parseBoolean(value); + value = _configDao.getValue(Config.VmwareUseNexusVSwitch.key()); + nexusDVS = Boolean.parseBoolean(value); + } + } + + @Override + protected HashMap buildConfigParams(HostVO host) { + HashMap params = super.buildConfigParams(host); + + Map clusterDetails = _clusterDetailsDao.findDetails(host.getClusterId()); + // Get zone wide traffic labels from guest traffic and public traffic + String guestTrafficLabel = _netmgr.getDefaultGuestTrafficLabel(host.getDataCenterId(), HypervisorType.VMware); + String publicTrafficLabel = _netmgr.getDefaultPublicTrafficLabel(host.getDataCenterId(), HypervisorType.VMware); + _readGlobalConfigParameters(); + VirtualSwitchType defaultVirtualSwitchType = getDefaultVirtualSwitchType(); + + params.put("guestTrafficInfo", getTrafficInfo(TrafficType.Guest, guestTrafficLabel, clusterDetails, defaultVirtualSwitchType)); + params.put("publicTrafficInfo", getTrafficInfo(TrafficType.Public, publicTrafficLabel, clusterDetails, defaultVirtualSwitchType)); + + return params; + } + + private VirtualSwitchType getDefaultVirtualSwitchType() { + if (nexusDVS) + return VirtualSwitchType.NexusDistributedVirtualSwitch; + else if(useDVS) + return VirtualSwitchType.VMwareDistributedVirtualSwitch; + else + return VirtualSwitchType.StandardVirtualSwitch; + } + + @Override + public ServerResource reloadResource(HostVO host) { + String resourceName = host.getResource(); + ServerResource resource = getResource(resourceName); + + if (resource != null) { + _hostDao.loadDetails(host); + + HashMap params = buildConfigParams(host); + try { + resource.configure(host.getName(), params); + } catch (ConfigurationException e) { + s_logger.warn("Unable to configure resource due to " + e.getMessage()); + return null; + } + if (!resource.start()) { + s_logger.warn("Unable to start the resource"); + return null; + } + } + return resource; + } + + private void validateVswitchType(String inputVswitchType) { + VirtualSwitchType vSwitchType = VirtualSwitchType.getType(inputVswitchType); + if (vSwitchType == VirtualSwitchType.None) { + s_logger.error("Unable to resolve " + inputVswitchType + " to a valid virtual switch type in VMware environment."); + throw new InvalidParameterValueException("Invalid virtual switch type : " + inputVswitchType); + } + } + + @Override + public void putParam(Map params) { + if (_urlParams == null) { + _urlParams = new HashMap(); + } + _urlParams.putAll(params); + } } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java index e1ca6ccac03..fb6d3d6667f 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManager.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.Map; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.vmware.manager.VmwareStorageManager; import com.cloud.hypervisor.vmware.mo.HostMO; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.utils.Pair; @@ -29,10 +30,6 @@ import com.vmware.vim25.ManagedObjectReference; public interface VmwareManager { public final String CONTEXT_STOCK_NAME = "vmwareMgr"; - // this limitation comes from the fact that we are using linked clone on shared VMFS storage, - // we need to limit the size of vCenter cluster, http://en.wikipedia.org/wiki/VMware_VMFS - public final int MAX_HOSTS_PER_CLUSTER = 8; - String composeWorkerName(); String getSystemVMIsoFileNameOnDatastore(); @@ -57,19 +54,16 @@ public interface VmwareManager { Pair getAddiionalVncPortRange(); - int getMaxHostsPerCluster(); int getRouterExtraPublicNics(); boolean beginExclusiveOperation(int timeOutSeconds); void endExclusiveOperation(); - boolean getNexusVSwitchGlobalParameter(); + boolean getFullCloneFlag(); Map getNexusVSMCredentialsByClusterId(Long clusterId); String getPrivateVSwitchName(long dcId, HypervisorType hypervisorType); - - String getPublicVSwitchName(long dcId, HypervisorType hypervisorType); - - String getGuestVSwitchName(long dcId, HypervisorType hypervisorType); + + public String getRootDiskController(); } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index 88e03f5a220..9d29abf8d35 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.hypervisor.vmware.manager; @@ -58,6 +58,7 @@ import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.hypervisor.vmware.VmwareCleanupMaid; import com.cloud.hypervisor.vmware.mo.DiskControllerType; import com.cloud.hypervisor.vmware.mo.HostFirewallSystemMO; @@ -66,6 +67,8 @@ import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper; import com.cloud.hypervisor.vmware.mo.TaskMO; import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType; import com.cloud.hypervisor.vmware.mo.VmwareHostType; +import com.cloud.utils.ssh.SshHelper; +import com.cloud.hypervisor.vmware.util.VmwareClient; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.network.CiscoNexusVSMDeviceVO; import com.cloud.network.NetworkModel; @@ -90,10 +93,11 @@ import com.cloud.utils.script.Script; import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.DomainRouterVO; import com.google.gson.Gson; -import com.vmware.apputils.vim25.ServiceUtil; +import com.vmware.vim25.AboutInfo; import com.vmware.vim25.HostConnectSpec; import com.vmware.vim25.ManagedObjectReference; + @Local(value = {VmwareManager.class}) public class VmwareManagerImpl extends ManagerBase implements VmwareManager, VmwareStorageMount, Listener { private static final Logger s_logger = Logger.getLogger(VmwareManagerImpl.class); @@ -119,27 +123,25 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw @Inject ClusterVSMMapDao _vsmMapDao; @Inject ConfigurationDao _configDao; @Inject ConfigurationServer _configServer; + @Inject HypervisorCapabilitiesDao _hvCapabilitiesDao; String _mountParent; StorageLayer _storage; + String _privateNetworkVSwitchName = "vSwitch0"; - String _privateNetworkVSwitchName; - String _publicNetworkVSwitchName; - String _guestNetworkVSwitchName; + int _portsPerDvPortGroup = 256; boolean _nexusVSwitchActive; + boolean _fullCloneFlag; String _serviceConsoleName; String _managemetPortGroupName; String _defaultSystemVmNicAdapterType = VirtualEthernetCardType.E1000.toString(); String _recycleHungWorker = "false"; int _additionalPortRangeStart; int _additionalPortRangeSize; - int _maxHostsPerCluster; int _routerExtraPublicNics = 2; - String _cpuOverprovisioningFactor = "1"; String _reserveCpu = "false"; - String _memOverprovisioningFactor = "1"; String _reserveMem = "false"; String _rootDiskController = DiskControllerType.ide.toString(); @@ -193,43 +195,12 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw _storage = new JavaStorageLayer(); _storage.configure("StorageLayer", params); } - value = _configDao.getValue(Config.VmwareUseNexusVSwitch.key()); - if(value == null) { - _nexusVSwitchActive = false; - } - else - { - _nexusVSwitchActive = Boolean.parseBoolean(value); - } - _privateNetworkVSwitchName = _configDao.getValue(Config.VmwarePrivateNetworkVSwitch.key()); - - if (_privateNetworkVSwitchName == null) { - if (_nexusVSwitchActive) { - _privateNetworkVSwitchName = "privateEthernetPortProfile"; - } else { - _privateNetworkVSwitchName = "vSwitch0"; - } - } - - _publicNetworkVSwitchName = _configDao.getValue(Config.VmwarePublicNetworkVSwitch.key()); - - if (_publicNetworkVSwitchName == null) { - if (_nexusVSwitchActive) { - _publicNetworkVSwitchName = "publicEthernetPortProfile"; - } else { - _publicNetworkVSwitchName = "vSwitch0"; - } - } - - _guestNetworkVSwitchName = _configDao.getValue(Config.VmwareGuestNetworkVSwitch.key()); - - if (_guestNetworkVSwitchName == null) { - if (_nexusVSwitchActive) { - _guestNetworkVSwitchName = "guestEthernetPortProfile"; - } else { - _guestNetworkVSwitchName = "vSwitch0"; - } + value = _configDao.getValue(Config.VmwareCreateFullClone.key()); + if (value == null) { + _fullCloneFlag = false; + } else { + _fullCloneFlag = Boolean.parseBoolean(value); } _serviceConsoleName = _configDao.getValue(Config.VmwareServiceConsole.key()); @@ -260,15 +231,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw _routerExtraPublicNics = NumbersUtil.parseInt(_configDao.getValue(Config.RouterExtraPublicNics.key()), 2); - _maxHostsPerCluster = NumbersUtil.parseInt(_configDao.getValue(Config.VmwarePerClusterHostMax.key()), VmwareManager.MAX_HOSTS_PER_CLUSTER); - _cpuOverprovisioningFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); - if(_cpuOverprovisioningFactor == null || _cpuOverprovisioningFactor.isEmpty()) - _cpuOverprovisioningFactor = "1"; - - _memOverprovisioningFactor = _configDao.getValue(Config.MemOverprovisioningFactor.key()); - if(_memOverprovisioningFactor == null || _memOverprovisioningFactor.isEmpty()) - _memOverprovisioningFactor = "1"; - _reserveCpu = _configDao.getValue(Config.VmwareReserveCpu.key()); if(_reserveCpu == null || _reserveCpu.isEmpty()) _reserveCpu = "false"; @@ -320,8 +282,8 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw } @Override - public boolean getNexusVSwitchGlobalParameter() { - return _nexusVSwitchActive; + public boolean getFullCloneFlag() { + return _fullCloneFlag; } @Override @@ -334,15 +296,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw return _netMgr.getDefaultManagementTrafficLabel(dcId, hypervisorType); } - @Override - public String getPublicVSwitchName(long dcId, HypervisorType hypervisorType) { - return _netMgr.getDefaultPublicTrafficLabel(dcId, hypervisorType); - } - - @Override - public String getGuestVSwitchName(long dcId, HypervisorType hypervisorType) { - return _netMgr.getDefaultGuestTrafficLabel(dcId, hypervisorType); - } private void prepareHost(HostMO hostMo, String privateTrafficLabel) throws Exception { // For ESX host, we need to enable host firewall to allow VNC access @@ -364,14 +317,10 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw } s_logger.info("Preparing network on host " + hostMo.getContext().toString() + " for " + privateTrafficLabel); - if(!_nexusVSwitchActive) { HypervisorHostHelper.prepareNetwork(vSwitchName, "cloud.private", hostMo, vlanId, null, null, 180000, false); - } - else { - HypervisorHostHelper.prepareNetwork(vSwitchName, "cloud.private", hostMo, vlanId, null, null, 180000); - } + } - + @Override public List addHostToPodCluster(VmwareContext serviceContext, long dcId, Long podId, Long clusterId, String hostInventoryPath) throws Exception { @@ -388,23 +337,29 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw List returnedHostList = new ArrayList(); if(mor.getType().equals("ComputeResource")) { - ManagedObjectReference[] hosts = (ManagedObjectReference[])serviceContext.getServiceUtil().getDynamicProperty(mor, "host"); - assert(hosts != null); + List hosts = (List)serviceContext.getVimClient().getDynamicProperty(mor, "host"); + assert(hosts != null && hosts.size() > 0); // For ESX host, we need to enable host firewall to allow VNC access - HostMO hostMo = new HostMO(serviceContext, hosts[0]); + HostMO hostMo = new HostMO(serviceContext, hosts.get(0)); + prepareHost(hostMo, privateTrafficLabel); - returnedHostList.add(hosts[0]); + returnedHostList.add(hosts.get(0)); return returnedHostList; } else if(mor.getType().equals("ClusterComputeResource")) { - ManagedObjectReference[] hosts = (ManagedObjectReference[])serviceContext.getServiceUtil().getDynamicProperty(mor, "host"); + List hosts = (List)serviceContext.getVimClient().getDynamicProperty(mor, "host"); assert(hosts != null); - if(hosts.length > _maxHostsPerCluster) { - String msg = "vCenter cluster size is too big (current configured cluster size: " + _maxHostsPerCluster + ")"; + if (hosts.size() > 0) { + AboutInfo about = (AboutInfo)(serviceContext.getVimClient().getDynamicProperty(hosts.get(0), "config.product")); + String version = about.getApiVersion(); + int maxHostsPerCluster = _hvCapabilitiesDao.getMaxHostsPerCluster(HypervisorType.VMware, version); + if (hosts.size() > maxHostsPerCluster) { + String msg = "vCenter cluster size is too big (current configured cluster size: " + maxHostsPerCluster + ")"; s_logger.error(msg); throw new DiscoveredWithErrorException(msg); } + } for(ManagedObjectReference morHost: hosts) { // For ESX host, we need to enable host firewall to allow VNC access @@ -420,7 +375,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw returnedHostList.add(mor); return returnedHostList; } else { - s_logger.error("Unsupport host type " + mor.getType() + ":" + mor.get_value() + " from inventory path: " + hostInventoryPath); + s_logger.error("Unsupport host type " + mor.getType() + ":" + mor.getValue() + " from inventory path: " + hostInventoryPath); return null; } } @@ -433,8 +388,8 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw private ManagedObjectReference addHostToVCenterCluster(VmwareContext serviceContext, ManagedObjectReference morCluster, String host, String userName, String password) throws Exception { - ServiceUtil serviceUtil = serviceContext.getServiceUtil(); - ManagedObjectReference morHost = serviceUtil.getDecendentMoRef(morCluster, "HostSystem", host); + VmwareClient vclient = serviceContext.getVimClient(); + ManagedObjectReference morHost = vclient.getDecendentMoRef(morCluster, "HostSystem", host); if(morHost == null) { HostConnectSpec hostSpec = new HostConnectSpec(); hostSpec.setUserName(userName); @@ -442,16 +397,16 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw hostSpec.setHostName(host); hostSpec.setForce(true); // forcely take over the host - ManagedObjectReference morTask = serviceContext.getService().addHost_Task(morCluster, hostSpec, true, null, null); - String taskResult = serviceUtil.waitForTask(morTask); - if(!taskResult.equals("sucess")) { + ManagedObjectReference morTask = serviceContext.getService().addHostTask(morCluster, hostSpec, true, null, null); + boolean taskResult = vclient.waitForTask(morTask); + if(!taskResult) { s_logger.error("Unable to add host " + host + " to vSphere cluster due to " + TaskMO.getTaskFailureInfo(serviceContext, morTask)); throw new CloudRuntimeException("Unable to add host " + host + " to vSphere cluster due to " + taskResult); } serviceContext.waitForTaskProgressDone(morTask); // init morHost after it has been created - morHost = serviceUtil.getDecendentMoRef(morCluster, "HostSystem", host); + morHost = vclient.getDecendentMoRef(morCluster, "HostSystem", host); if(morHost == null) { throw new CloudRuntimeException("Successfully added host into vSphere but unable to find it later on?!. Please make sure you are either using IP address or full qualified domain name for host"); } @@ -469,6 +424,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw @Override public String getSecondaryStorageStoreUrl(long dcId) { + List secStorageHosts = _ssvmMgr.listSecondaryStorageHostsInOneZone(dcId); if(secStorageHosts.size() > 0) return secStorageHosts.get(0).getStorageUrl(); @@ -495,18 +451,14 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw @Override public void setupResourceStartupParams(Map params) { - params.put("private.network.vswitch.name", _privateNetworkVSwitchName); - params.put("public.network.vswitch.name", _publicNetworkVSwitchName); - params.put("guest.network.vswitch.name", _guestNetworkVSwitchName); - params.put("vmware.use.nexus.vswitch", _nexusVSwitchActive); + params.put("vmware.create.full.clone", _fullCloneFlag); params.put("service.console.name", _serviceConsoleName); params.put("management.portgroup.name", _managemetPortGroupName); - params.put("cpu.overprovisioning.factor", _cpuOverprovisioningFactor); params.put("vmware.reserve.cpu", _reserveCpu); - params.put("mem.overprovisioning.factor", _memOverprovisioningFactor); params.put("vmware.reserve.mem", _reserveMem); params.put("vmware.root.disk.controller", _rootDiskController); params.put("vmware.recycle.hung.wokervm", _recycleHungWorker); + params.put("ports.per.dvportgroup", _portsPerDvPortGroup); } @Override @@ -543,6 +495,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw s_logger.info("Inject SSH key pairs before copying systemvm.iso into secondary storage"); _configServer.updateKeyPairs(); + try { FileUtil.copyfile(srcIso, destIso); } catch(IOException e) { @@ -579,29 +532,36 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw private File getSystemVMPatchIsoFile() { // locate systemvm.iso - URL url = this.getClass().getProtectionDomain().getCodeSource().getLocation(); - File file = new File(url.getFile()); - File isoFile = new File(file.getParent() + "/vms/systemvm.iso"); - if (!isoFile.exists()) { - isoFile = new File("/usr/lib64/cloud/common/" + "/vms/systemvm.iso"); - if (!isoFile.exists()) { - isoFile = new File("/usr/lib/cloud/common/" + "/vms/systemvm.iso"); - } + URL url = this.getClass().getClassLoader().getResource("vms/systemvm.iso"); + File isoFile = null; + if (url != null) { + isoFile = new File(url.getPath()); + } + + if(isoFile == null || !isoFile.exists()) { + isoFile = new File("/usr/share/cloudstack-common/vms/systemvm.iso"); + } + + assert(isoFile != null); + if(!isoFile.exists()) { + s_logger.error("Unable to locate systemvm.iso in your setup at " + isoFile.toString()); } return isoFile; } @Override public File getSystemVMKeyFile() { - URL url = this.getClass().getProtectionDomain().getCodeSource().getLocation(); - File file = new File(url.getFile()); - - File keyFile = new File(file.getParent(), "/scripts/vm/systemvm/id_rsa.cloud"); - if (!keyFile.exists()) { - keyFile = new File("/usr/lib64/cloud/common" + "/scripts/vm/systemvm/id_rsa.cloud"); - if (!keyFile.exists()) { - keyFile = new File("/usr/lib/cloud/common" + "/scripts/vm/systemvm/id_rsa.cloud"); - } + URL url = this.getClass().getClassLoader().getResource("scripts/vm/systemvm/id_rsa.cloud"); + File keyFile = null; + if ( url != null ){ + keyFile = new File(url.getPath()); + } + if (keyFile == null || !keyFile.exists()) { + keyFile = new File("/usr/share/cloudstack-common/scripts/vm/systemvm/id_rsa.cloud"); + } + assert(keyFile != null); + if(!keyFile.exists()) { + s_logger.error("Unable to locate id_rsa.cloud in your setup at " + keyFile.toString()); } return keyFile; } @@ -868,11 +828,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw return new Pair(_additionalPortRangeStart, _additionalPortRangeSize); } - @Override - public int getMaxHostsPerCluster() { - return this._maxHostsPerCluster; - } - @Override public int getRouterExtraPublicNics() { return this._routerExtraPublicNics; @@ -886,7 +841,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw vsmMapVO = _vsmMapDao.findByClusterId(clusterId); long vsmId = 0; if (vsmMapVO != null) { - vsmId = vsmMapVO.getVsmId(); + vsmId = vsmMapVO.getVsmId(); s_logger.info("vsmId is " + vsmId); nexusVSM = _nexusDao.findById(vsmId); s_logger.info("Fetching nexus vsm credentials from database."); @@ -894,7 +849,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw else { s_logger.info("Found empty vsmMapVO."); return null; - } + } Map nexusVSMCredentials = new HashMap(); if (nexusVSM != null) { @@ -905,4 +860,9 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw } return nexusVSMCredentials; } + + @Override + public String getRootDiskController() { + return _rootDiskController; + } } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java index c7b7cd3bcba..e11dd53f3c9 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.hypervisor.vmware.manager; @@ -21,6 +21,7 @@ import java.io.File; import java.io.FileOutputStream; import java.io.OutputStreamWriter; import java.rmi.RemoteException; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -83,24 +84,24 @@ import com.vmware.vim25.VirtualSCSISharing; public class VmwareStorageManagerImpl implements VmwareStorageManager { private static final Logger s_logger = Logger.getLogger(VmwareStorageManagerImpl.class); - + private final VmwareStorageMount _mountService; private final StorageLayer _storage = new JavaStorageLayer(); - + private int _timeout; - + public VmwareStorageManagerImpl(VmwareStorageMount mountService) { assert(mountService != null); _mountService = mountService; } - + public void configure(Map params) { s_logger.info("Configure VmwareStorageManagerImpl"); - + String value = (String)params.get("scripts.timeout"); _timeout = NumbersUtil.parseInt(value, 1440) * 1000; } - + @Override public Answer execute(VmwareHostService hostService, PrimaryStorageDownloadCommand cmd) { String secondaryStorageUrl = cmd.getSecondaryStorageUrl(); @@ -130,18 +131,18 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } templateName = cmd.getName(); } - + VmwareContext context = hostService.getServiceContext(cmd); try { VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); - - String templateUuidName = UUID.nameUUIDFromBytes((templateName + "@" + cmd.getPoolUuid() + "-" + hyperHost.getMor().get_value()).getBytes()).toString(); + + String templateUuidName = UUID.nameUUIDFromBytes((templateName + "@" + cmd.getPoolUuid() + "-" + hyperHost.getMor().getValue()).getBytes()).toString(); // truncate template name to 32 chars to ensure they work well with vSphere API's. - templateUuidName = templateUuidName.replace("-", ""); - + templateUuidName = templateUuidName.replace("-", ""); + DatacenterMO dcMo = new DatacenterMO(context, hyperHost.getHyperHostDatacenter()); VirtualMachineMO templateMo = VmwareHelper.pickOneVmOnRunningHost(dcMo.findVmByNameAndLabel(templateUuidName), true); - + if (templateMo == null) { if(s_logger.isInfoEnabled()) s_logger.info("Template " + templateName + " is not setup yet, setup template from secondary storage with uuid name: " + templateUuidName); @@ -167,7 +168,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { return new PrimaryStorageDownloadAnswer(msg); } } - + @Override public Answer execute(VmwareHostService hostService, BackupSnapshotCommand cmd) { Long accountId = cmd.getAccountId(); @@ -198,15 +199,15 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { if (vmMo == null) { if(s_logger.isDebugEnabled()) s_logger.debug("Unable to find owner VM for BackupSnapshotCommand on host " + hyperHost.getHyperHostName() + ", will try within datacenter"); - + vmMo = hyperHost.findVmOnPeerHyperHost(cmd.getVmName()); if(vmMo == null) { dsMo = new DatastoreMO(hyperHost.getContext(), morDs); - + workerVMName = hostService.getWorkerName(context, cmd, 0); - + // attach a volume to dummay wrapper VM for taking snapshot and exporting the VM for backup - if (!hyperHost.createBlankVm(workerVMName, 1, 512, 0, false, 4, 0, VirtualMachineGuestOsIdentifier._otherGuest.toString(), morDs, false)) { + if (!hyperHost.createBlankVm(workerVMName, 1, 512, 0, false, 4, 0, VirtualMachineGuestOsIdentifier.OTHER_GUEST.value(), morDs, false)) { String msg = "Unable to create worker VM to execute BackupSnapshotCommand"; s_logger.error(msg); throw new Exception(msg); @@ -216,17 +217,17 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { throw new Exception("Failed to find the newly create or relocated VM. vmName: " + workerVMName); } workerVm = vmMo; - + // attach volume to worker VM String datastoreVolumePath = String.format("[%s] %s.vmdk", dsMo.getName(), volumePath); vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs); - } - } - + } + } + if (!vmMo.createSnapshot(snapshotUuid, "Snapshot taken for " + cmd.getSnapshotName(), false, false)) { throw new Exception("Failed to take snapshot " + cmd.getSnapshotName() + " on vm: " + cmd.getVmName()); } - + snapshotBackupUuid = backupSnapshotToSecondaryStorage(vmMo, accountId, volumeId, cmd.getVolumePath(), snapshotUuid, secondaryStorageUrl, prevSnapshotUuid, prevBackupUuid, hostService.getWorkerName(context, cmd, 1)); @@ -234,7 +235,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { if (success) { details = "Successfully backedUp the snapshotUuid: " + snapshotUuid + " to secondary storage."; } - + } finally { if(vmMo != null){ ManagedObjectReference snapshotMor = vmMo.getSnapshotMor(snapshotUuid); @@ -242,7 +243,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { vmMo.removeSnapshot(snapshotUuid, false); } } - + try { if (workerVm != null) { // detach volume and destroy worker vm @@ -251,7 +252,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } catch (Throwable e) { s_logger.warn("Failed to destroy worker VM: " + workerVMName); - } + } } } catch (Throwable e) { if (e instanceof RemoteException) { @@ -278,7 +279,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { VmwareContext context = hostService.getServiceContext(cmd); try { VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); - + VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(cmd.getVmName()); if (vmMo == null) { if(s_logger.isDebugEnabled()) @@ -294,7 +295,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { Ternary result = createTemplateFromVolume(vmMo, accountId, templateId, cmd.getUniqueName(), - secondaryStoragePoolURL, volumePath, + secondaryStoragePoolURL, volumePath, hostService.getWorkerName(context, cmd, 0)); return new CreatePrivateTemplateAnswer(cmd, true, null, @@ -344,7 +345,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { return new CreatePrivateTemplateAnswer(cmd, false, details); } } - + @Override public Answer execute(VmwareHostService hostService, CopyVolumeCommand cmd) { Long volumeId = cmd.getVolumeId(); @@ -393,64 +394,64 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { return new CopyVolumeAnswer(cmd, false, "CopyVolumeCommand failed due to exception: " + StringUtils.getExceptionStackInfo(e), null, null); } } - + @Override - public Answer execute(VmwareHostService hostService, CreateVolumeFromSnapshotCommand cmd) { + public Answer execute(VmwareHostService hostService, CreateVolumeFromSnapshotCommand cmd) { - String primaryStorageNameLabel = cmd.getPrimaryStoragePoolNameLabel(); - Long accountId = cmd.getAccountId(); - Long volumeId = cmd.getVolumeId(); - String secondaryStorageUrl = cmd.getSecondaryStorageUrl(); - String backedUpSnapshotUuid = cmd.getSnapshotUuid(); + String primaryStorageNameLabel = cmd.getPrimaryStoragePoolNameLabel(); + Long accountId = cmd.getAccountId(); + Long volumeId = cmd.getVolumeId(); + String secondaryStorageUrl = cmd.getSecondaryStorageUrl(); + String backedUpSnapshotUuid = cmd.getSnapshotUuid(); - String details = null; - boolean success = false; - String newVolumeName = UUID.randomUUID().toString().replaceAll("-", ""); + String details = null; + boolean success = false; + String newVolumeName = UUID.randomUUID().toString().replaceAll("-", ""); - VmwareContext context = hostService.getServiceContext(cmd); - try { - VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); + VmwareContext context = hostService.getServiceContext(cmd); + try { + VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); ManagedObjectReference morPrimaryDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, primaryStorageNameLabel); - if (morPrimaryDs == null) { - String msg = "Unable to find datastore: " + primaryStorageNameLabel; - s_logger.error(msg); - throw new Exception(msg); - } + if (morPrimaryDs == null) { + String msg = "Unable to find datastore: " + primaryStorageNameLabel; + s_logger.error(msg); + throw new Exception(msg); + } - DatastoreMO primaryDsMo = new DatastoreMO(hyperHost.getContext(), morPrimaryDs); - details = createVolumeFromSnapshot(hyperHost, primaryDsMo, - newVolumeName, accountId, volumeId, secondaryStorageUrl, backedUpSnapshotUuid); - if (details == null) { - success = true; - } - } catch (Throwable e) { - if (e instanceof RemoteException) { - hostService.invalidateServiceContext(context); - } - - s_logger.error("Unexpecpted exception ", e); - details = "CreateVolumeFromSnapshotCommand exception: " + StringUtils.getExceptionStackInfo(e); - } + DatastoreMO primaryDsMo = new DatastoreMO(hyperHost.getContext(), morPrimaryDs); + details = createVolumeFromSnapshot(hyperHost, primaryDsMo, + newVolumeName, accountId, volumeId, secondaryStorageUrl, backedUpSnapshotUuid); + if (details == null) { + success = true; + } + } catch (Throwable e) { + if (e instanceof RemoteException) { + hostService.invalidateServiceContext(context); + } - return new CreateVolumeFromSnapshotAnswer(cmd, success, details, newVolumeName); - } + s_logger.error("Unexpecpted exception ", e); + details = "CreateVolumeFromSnapshotCommand exception: " + StringUtils.getExceptionStackInfo(e); + } + + return new CreateVolumeFromSnapshotAnswer(cmd, success, details, newVolumeName); + } // templateName: name in secondary storage // templateUuid: will be used at hypervisor layer private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl, String templatePathAtSecondaryStorage, String templateName, String templateUuid) throws Exception { - - s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + + s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage + ", templateName: " + templateName); - + String secondaryMountPoint = _mountService.getMountPoint(secondaryStorageUrl); s_logger.info("Secondary storage mount point: " + secondaryMountPoint); - - String srcOVAFileName = secondaryMountPoint + "/" + templatePathAtSecondaryStorage + + + String srcOVAFileName = secondaryMountPoint + "/" + templatePathAtSecondaryStorage + templateName + "." + ImageFormat.OVA.getFileExtension(); - + String srcFileName = getOVFFilePath(srcOVAFileName); if(srcFileName == null) { Script command = new Script("tar", 0, s_logger); @@ -465,40 +466,40 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { throw new Exception(msg); } } - + srcFileName = getOVFFilePath(srcOVAFileName); - if(srcFileName == null) { - String msg = "Unable to locate OVF file in template package directory: " + srcOVAFileName; + if(srcFileName == null) { + String msg = "Unable to locate OVF file in template package directory: " + srcOVAFileName; s_logger.error(msg); throw new Exception(msg); } - + String vmName = templateUuid; hyperHost.importVmFromOVF(srcFileName, vmName, datastoreMo, "thin"); - + VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName); if(vmMo == null) { - String msg = "Failed to import OVA template. secondaryStorage: " + String msg = "Failed to import OVA template. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage + ", templateName: " + templateName + ", templateUuid: " + templateUuid; s_logger.error(msg); throw new Exception(msg); } - + if(vmMo.createSnapshot("cloud.template.base", "Base snapshot", false, false)) { vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_UUID, templateUuid); vmMo.markAsTemplate(); } else { vmMo.destroy(); - String msg = "Unable to create base snapshot for template, templateName: " + templateName + ", templateUuid: " + templateUuid; + String msg = "Unable to create base snapshot for template, templateName: " + templateName + ", templateUuid: " + templateUuid; s_logger.error(msg); throw new Exception(msg); } } - - private Ternary createTemplateFromVolume(VirtualMachineMO vmMo, long accountId, long templateId, String templateUniqueName, + + private Ternary createTemplateFromVolume(VirtualMachineMO vmMo, long accountId, long templateId, String templateUniqueName, String secStorageUrl, String volumePath, String workerVmName) throws Exception { - + String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl); String installPath = getTemplateRelativeDirInSecStorage(accountId, templateId); String installFullPath = secondaryMountPoint + "/" + installPath; @@ -506,16 +507,16 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { Script command = new Script(false, "mkdir", _timeout, s_logger); command.add("-p"); command.add(installFullPath); - + String result = command.execute(); if(result != null) { - String msg = "unable to prepare template directory: " + String msg = "unable to prepare template directory: " + installPath + ", storage: " + secStorageUrl + ", error msg: " + result; s_logger.error(msg); throw new Exception(msg); } } - + VirtualMachineMO clonedVm = null; try { Pair volumeDeviceInfo = vmMo.getDiskDevice(volumePath, false); @@ -524,15 +525,15 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { s_logger.error(msg); throw new Exception(msg); } - + if(!vmMo.createSnapshot(templateUniqueName, "Temporary snapshot for template creation", false, false)) { String msg = "Unable to take snapshot for creating template from volume. volume path: " + volumePath; s_logger.error(msg); throw new Exception(msg); } - + // 4 MB is the minimum requirement for VM memory in VMware - vmMo.cloneFromCurrentSnapshot(workerVmName, 0, 4, volumeDeviceInfo.second(), + vmMo.cloneFromCurrentSnapshot(workerVmName, 0, 4, volumeDeviceInfo.second(), VmwareHelper.getDiskDeviceDatastore(volumeDeviceInfo.first())); clonedVm = vmMo.getRunningHost().findVmOnHyperHost(workerVmName); if(clonedVm == null) { @@ -540,9 +541,9 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { s_logger.error(msg); throw new Exception(msg); } - + clonedVm.exportVm(secondaryMountPoint + "/" + installPath, templateUniqueName, true, false); - + long physicalSize = new File(installFullPath + "/" + templateUniqueName + ".ova").length(); VmdkProcessor processor = new VmdkProcessor(); Map params = new HashMap(); @@ -552,54 +553,54 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { postCreatePrivateTemplate(installFullPath, templateId, templateUniqueName, physicalSize, virtualSize); return new Ternary(installPath + "/" + templateUniqueName + ".ova", physicalSize, virtualSize); - + } finally { if(clonedVm != null) { clonedVm.detachAllDisks(); clonedVm.destroy(); } - + vmMo.removeSnapshot(templateUniqueName, false); } } - - private Ternary createTemplateFromSnapshot(long accountId, long templateId, String templateUniqueName, + + private Ternary createTemplateFromSnapshot(long accountId, long templateId, String templateUniqueName, String secStorageUrl, long volumeId, String backedUpSnapshotUuid) throws Exception { - + String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl); String installPath = getTemplateRelativeDirInSecStorage(accountId, templateId); String installFullPath = secondaryMountPoint + "/" + installPath; String installFullName = installFullPath + "/" + templateUniqueName + ".ova"; - String snapshotFullName = secondaryMountPoint + "/" + getSnapshotRelativeDirInSecStorage(accountId, volumeId) + String snapshotFullName = secondaryMountPoint + "/" + getSnapshotRelativeDirInSecStorage(accountId, volumeId) + "/" + backedUpSnapshotUuid + ".ova"; String result; Script command; - + synchronized(installPath.intern()) { command = new Script(false, "mkdir", _timeout, s_logger); command.add("-p"); command.add(installFullPath); - + result = command.execute(); if(result != null) { - String msg = "unable to prepare template directory: " + String msg = "unable to prepare template directory: " + installPath + ", storage: " + secStorageUrl + ", error msg: " + result; s_logger.error(msg); throw new Exception(msg); } } - + try { command = new Script(false, "cp", _timeout, s_logger); command.add(snapshotFullName); command.add(installFullName); result = command.execute(); if(result != null) { - String msg = "unable to copy snapshot " + snapshotFullName + " to " + installFullPath; + String msg = "unable to copy snapshot " + snapshotFullName + " to " + installFullPath; s_logger.error(msg); throw new Exception(msg); } - + // untar OVA file at template directory command = new Script("tar", 0, s_logger); command.add("--no-same-owner"); @@ -608,12 +609,12 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { s_logger.info("Executing command: " + command.toString()); result = command.execute(); if(result != null) { - String msg = "unable to untar snapshot " + snapshotFullName + " to " - + installFullPath; + String msg = "unable to untar snapshot " + snapshotFullName + " to " + + installFullPath; s_logger.error(msg); throw new Exception(msg); } - + long physicalSize = new File(installFullPath + "/" + templateUniqueName + ".ova").length(); VmdkProcessor processor = new VmdkProcessor(); Map params = new HashMap(); @@ -623,45 +624,45 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { postCreatePrivateTemplate(installFullPath, templateId, templateUniqueName, physicalSize, virtualSize); return new Ternary(installPath + "/" + templateUniqueName + ".ova", physicalSize, virtualSize); - + } catch(Exception e) { // TODO, clean up left over files throw e; } } - - private void postCreatePrivateTemplate(String installFullPath, long templateId, + + private void postCreatePrivateTemplate(String installFullPath, long templateId, String templateName, long size, long virtualSize) throws Exception { // TODO a bit ugly here BufferedWriter out = null; try { out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/template.properties"))); - out.write("filename=" + templateName + ".ova"); + out.write("filename=" + templateName + ".ova"); out.newLine(); - out.write("description="); + out.write("description="); out.newLine(); - out.write("checksum="); + out.write("checksum="); out.newLine(); - out.write("hvm=false"); + out.write("hvm=false"); out.newLine(); - out.write("size=" + size); + out.write("size=" + size); out.newLine(); - out.write("ova=true"); + out.write("ova=true"); out.newLine(); - out.write("id=" + templateId); + out.write("id=" + templateId); out.newLine(); - out.write("public=false"); + out.write("public=false"); out.newLine(); - out.write("ova.filename=" + templateName + ".ova"); + out.write("ova.filename=" + templateName + ".ova"); out.newLine(); out.write("uniquename=" + templateName); out.newLine(); - out.write("ova.virtualsize=" + virtualSize); + out.write("ova.virtualsize=" + virtualSize); out.newLine(); - out.write("virtualsize=" + virtualSize); + out.write("virtualsize=" + virtualSize); out.newLine(); - out.write("ova.size=" + size); + out.write("ova.size=" + size); out.newLine(); } finally { if(out != null) @@ -669,21 +670,21 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } - private String createVolumeFromSnapshot(VmwareHypervisorHost hyperHost, DatastoreMO primaryDsMo, String newVolumeName, + private String createVolumeFromSnapshot(VmwareHypervisorHost hyperHost, DatastoreMO primaryDsMo, String newVolumeName, long accountId, long volumeId, String secStorageUrl, String snapshotBackupUuid) throws Exception { - - restoreVolumeFromSecStorage(hyperHost, primaryDsMo, newVolumeName, + + restoreVolumeFromSecStorage(hyperHost, primaryDsMo, newVolumeName, secStorageUrl, getSnapshotRelativeDirInSecStorage(accountId, volumeId), snapshotBackupUuid); return null; } - - private void restoreVolumeFromSecStorage(VmwareHypervisorHost hyperHost, DatastoreMO primaryDsMo, String newVolumeName, + + private void restoreVolumeFromSecStorage(VmwareHypervisorHost hyperHost, DatastoreMO primaryDsMo, String newVolumeName, String secStorageUrl, String secStorageDir, String backupName) throws Exception { - + String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl); - String srcOVAFileName = secondaryMountPoint + "/" + secStorageDir + "/" + String srcOVAFileName = secondaryMountPoint + "/" + secStorageDir + "/" + backupName + "." + ImageFormat.OVA.getFileExtension(); - + String srcFileName = getOVFFilePath(srcOVAFileName); if(srcFileName == null) { Script command = new Script("tar", 0, s_logger); @@ -698,21 +699,21 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { throw new Exception(msg); } } - + srcFileName = getOVFFilePath(srcOVAFileName); if(srcFileName == null) { - String msg = "Unable to locate OVF file in template package directory: " + srcOVAFileName; + String msg = "Unable to locate OVF file in template package directory: " + srcOVAFileName; s_logger.error(msg); throw new Exception(msg); } - + VirtualMachineMO clonedVm = null; try { hyperHost.importVmFromOVF(srcFileName, newVolumeName, primaryDsMo, "thin"); clonedVm = hyperHost.findVmOnHyperHost(newVolumeName); if(clonedVm == null) throw new Exception("Unable to create container VM for volume creation"); - + clonedVm.moveAllVmDiskFiles(primaryDsMo, "", false); clonedVm.detachAllDisks(); } finally { @@ -722,24 +723,24 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } } - - private String backupSnapshotToSecondaryStorage(VirtualMachineMO vmMo, long accountId, long volumeId, - String volumePath, String snapshotUuid, String secStorageUrl, + + private String backupSnapshotToSecondaryStorage(VirtualMachineMO vmMo, long accountId, long volumeId, + String volumePath, String snapshotUuid, String secStorageUrl, String prevSnapshotUuid, String prevBackupUuid, String workerVmName) throws Exception { - + String backupUuid = UUID.randomUUID().toString(); - exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, + exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, getSnapshotRelativeDirInSecStorage(accountId, volumeId), backupUuid, workerVmName); return backupUuid; } - - private void exportVolumeToSecondaryStroage(VirtualMachineMO vmMo, String volumePath, + + private void exportVolumeToSecondaryStroage(VirtualMachineMO vmMo, String volumePath, String secStorageUrl, String secStorageDir, String exportName, String workerVmName) throws Exception { - + String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl); String exportPath = secondaryMountPoint + "/" + secStorageDir; - + synchronized(exportPath.intern()) { if(!new File(exportPath).exists()) { Script command = new Script(false, "mkdir", _timeout, s_logger); @@ -752,16 +753,16 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { VirtualMachineMO clonedVm = null; try { - + Pair volumeDeviceInfo = vmMo.getDiskDevice(volumePath, false); if(volumeDeviceInfo == null) { String msg = "Unable to find related disk device for volume. volume path: " + volumePath; s_logger.error(msg); throw new Exception(msg); } - + // 4 MB is the minimum requirement for VM memory in VMware - vmMo.cloneFromCurrentSnapshot(workerVmName, 0, 4, volumeDeviceInfo.second(), + vmMo.cloneFromCurrentSnapshot(workerVmName, 0, 4, volumeDeviceInfo.second(), VmwareHelper.getDiskDeviceDatastore(volumeDeviceInfo.first())); clonedVm = vmMo.getRunningHost().findVmOnHyperHost(workerVmName); if(clonedVm == null) { @@ -769,7 +770,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { s_logger.error(msg); throw new Exception(msg); } - + clonedVm.exportVm(exportPath, exportName, true, true); } finally { if(clonedVm != null) { @@ -778,7 +779,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } } - + private String deleteSnapshotOnSecondaryStorge(long accountId, long volumeId, String secStorageUrl, String backupUuid) throws Exception { String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl); @@ -787,18 +788,18 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { if(file.exists()) { if(file.delete()) return null; - + } else { return "Backup file does not exist. backupUuid: " + backupUuid; } - + return "Failed to delete snapshot backup file, backupUuid: " + backupUuid; } - - private Pair copyVolumeToSecStorage(VmwareHostService hostService, VmwareHypervisorHost hyperHost, CopyVolumeCommand cmd, - String vmName, long volumeId, String poolId, String volumePath, + + private Pair copyVolumeToSecStorage(VmwareHostService hostService, VmwareHypervisorHost hyperHost, CopyVolumeCommand cmd, + String vmName, long volumeId, String poolId, String volumePath, String secStorageUrl, String workerVmName) throws Exception { - + String volumeFolder = String.valueOf(volumeId) + "/"; VirtualMachineMO workerVm=null; VirtualMachineMO vmMo=null; @@ -822,21 +823,21 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { vmConfig.setName(workerVmName); vmConfig.setMemoryMB((long) 4); vmConfig.setNumCPUs(1); - vmConfig.setGuestId(VirtualMachineGuestOsIdentifier._otherGuest.toString()); + vmConfig.setGuestId(VirtualMachineGuestOsIdentifier.OTHER_GUEST.value()); VirtualMachineFileInfo fileInfo = new VirtualMachineFileInfo(); fileInfo.setVmPathName(String.format("[%s]", dsMo.getName())); vmConfig.setFiles(fileInfo); // Scsi controller VirtualLsiLogicController scsiController = new VirtualLsiLogicController(); - scsiController.setSharedBus(VirtualSCSISharing.noSharing); + scsiController.setSharedBus(VirtualSCSISharing.NO_SHARING); scsiController.setBusNumber(0); scsiController.setKey(1); VirtualDeviceConfigSpec scsiControllerSpec = new VirtualDeviceConfigSpec(); scsiControllerSpec.setDevice(scsiController); - scsiControllerSpec.setOperation(VirtualDeviceConfigSpecOperation.add); - vmConfig.setDeviceChange(new VirtualDeviceConfigSpec[] { scsiControllerSpec }); - + scsiControllerSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); + vmConfig.getDeviceChange().add(scsiControllerSpec); + hyperHost.createVm(vmConfig); workerVm = hyperHost.findVmOnHyperHost(workerVmName); if (workerVm == null) { @@ -844,7 +845,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { s_logger.error(msg); throw new Exception(msg); } - + //attach volume to worker VM String datastoreVolumePath = String.format("[%s] %s.vmdk", dsMo.getName(), volumePath); workerVm.attachDisk(new String[] { datastoreVolumePath }, morDs); @@ -853,7 +854,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { vmMo.createSnapshot(exportName, "Temporary snapshot for copy-volume command", false, false); - exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, "volumes/" + volumeFolder, exportName, + exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, "volumes/" + volumeFolder, exportName, hostService.getWorkerName(hyperHost.getContext(), cmd, 1)); return new Pair(volumeFolder, exportName); @@ -867,16 +868,16 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } - private Pair copyVolumeFromSecStorage(VmwareHypervisorHost hyperHost, long volumeId, + private Pair copyVolumeFromSecStorage(VmwareHypervisorHost hyperHost, long volumeId, DatastoreMO dsMo, String secStorageUrl, String exportName) throws Exception { String volumeFolder = String.valueOf(volumeId) + "/"; String newVolume = UUID.randomUUID().toString().replaceAll("-", ""); restoreVolumeFromSecStorage(hyperHost, dsMo, newVolume, secStorageUrl, "volumes/" + volumeFolder, exportName); - + return new Pair(volumeFolder, newVolume); } - + private String getOVFFilePath(String srcOVAFileName) { File file = new File(srcOVAFileName); assert(_storage != null); @@ -891,11 +892,11 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } return null; } - + private static String getTemplateRelativeDirInSecStorage(long accountId, long templateId) { return "template/tmpl/" + accountId + "/" + templateId; } - + private static String getSnapshotRelativeDirInSecStorage(long accountId, long volumeId) { return "snapshots/" + accountId + "/" + volumeId; } @@ -912,18 +913,18 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { Map mapNewDisk = new HashMap(); try { VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); - + // wait if there are already VM snapshot task running ManagedObjectReference taskmgr = context.getServiceContent().getTaskManager(); - ManagedObjectReference[] tasks = (ManagedObjectReference[]) context.getServiceUtil().getDynamicProperty(taskmgr, "recentTask"); + List tasks = (ArrayList)context.getVimClient().getDynamicProperty(taskmgr, "recentTask"); for (ManagedObjectReference taskMor : tasks) { - TaskInfo info = (TaskInfo) (context.getServiceUtil().getDynamicProperty(taskMor, "info")); + TaskInfo info = (TaskInfo) (context.getVimClient().getDynamicProperty(taskMor, "info")); if(info.getEntityName().equals(cmd.getVmName()) && info.getName().equalsIgnoreCase("CreateSnapshot_Task")){ s_logger.debug("There is already a VM snapshot task running, wait for it"); - context.getServiceUtil().waitForTask(taskMor); + context.getVimClient().waitForTask(taskMor); } } - + vmMo = hyperHost.findVmOnHyperHost(vmName); if(vmMo == null) vmMo = hyperHost.findVmOnPeerHyperHost(vmName); @@ -952,7 +953,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { mapNewDisk.put(s[0], vmdkName); } } - + // update volume path using maps for (VolumeTO volumeTO : volumeTOs) { String parentUUID = volumeTO.getPath(); @@ -1003,7 +1004,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } s_logger.debug("snapshot: " + vmSnapshotName + " is removed"); - // after removed snapshot, the volumes' paths have been changed for the VM, needs to report new paths to manager + // after removed snapshot, the volumes' paths have been changed for the VM, needs to report new paths to manager VirtualDisk[] vdisks = vmMo.getAllDiskDevice(); for (int i = 0; i < vdisks.length; i++) { @SuppressWarnings("deprecation") @@ -1045,18 +1046,18 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { Map mapNewDisk = new HashMap(); try { VmwareHypervisorHost hyperHost = hostService.getHyperHost(context, cmd); - + // wait if there are already VM revert task running ManagedObjectReference taskmgr = context.getServiceContent().getTaskManager(); - ManagedObjectReference[] tasks = (ManagedObjectReference[]) context.getServiceUtil().getDynamicProperty(taskmgr, "recentTask"); + List tasks = (ArrayList)context.getVimClient().getDynamicProperty(taskmgr, "recentTask"); for (ManagedObjectReference taskMor : tasks) { - TaskInfo info = (TaskInfo) (context.getServiceUtil().getDynamicProperty(taskMor, "info")); + TaskInfo info = (TaskInfo) (context.getVimClient().getDynamicProperty(taskMor, "info")); if(info.getEntityName().equals(cmd.getVmName()) && info.getName().equalsIgnoreCase("RevertToSnapshot_Task")){ s_logger.debug("There is already a VM snapshot task running, wait for it"); - context.getServiceUtil().waitForTask(taskMor); + context.getVimClient().waitForTask(taskMor); } } - + HostMO hostMo = (HostMO) hyperHost; vmMo = hyperHost.findVmOnHyperHost(vmName); if(vmMo == null) @@ -1113,7 +1114,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } - + private VirtualMachineMO createWorkingVM(DatastoreMO dsMo, VmwareHypervisorHost hyperHost) throws Exception { String uniqueName = UUID.randomUUID().toString(); VirtualMachineMO workingVM = null; @@ -1121,20 +1122,20 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { vmConfig.setName(uniqueName); vmConfig.setMemoryMB((long) 4); vmConfig.setNumCPUs(1); - vmConfig.setGuestId(VirtualMachineGuestOsIdentifier._otherGuest.toString()); + vmConfig.setGuestId(VirtualMachineGuestOsIdentifier.OTHER_GUEST.toString()); VirtualMachineFileInfo fileInfo = new VirtualMachineFileInfo(); fileInfo.setVmPathName(String.format("[%s]", dsMo.getName())); vmConfig.setFiles(fileInfo); VirtualLsiLogicController scsiController = new VirtualLsiLogicController(); - scsiController.setSharedBus(VirtualSCSISharing.noSharing); + scsiController.setSharedBus(VirtualSCSISharing.NO_SHARING); scsiController.setBusNumber(0); scsiController.setKey(1); VirtualDeviceConfigSpec scsiControllerSpec = new VirtualDeviceConfigSpec(); scsiControllerSpec.setDevice(scsiController); - scsiControllerSpec.setOperation(VirtualDeviceConfigSpecOperation.add); + scsiControllerSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); - vmConfig.setDeviceChange(new VirtualDeviceConfigSpec[] { scsiControllerSpec }); + vmConfig.getDeviceChange().add(scsiControllerSpec); hyperHost.createVm(vmConfig); workingVM = hyperHost.findVmOnHyperHost(uniqueName); return workingVM; diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java index 8324bcf0d30..5db9da3c02d 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java @@ -23,12 +23,10 @@ import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.hypervisor.vmware.manager.VmwareManager; -import com.cloud.hypervisor.vmware.manager.VmwareManagerImpl; +import com.cloud.hypervisor.vmware.util.VmwareClient; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.utils.StringUtils; -import com.cloud.utils.component.ComponentContext; -import com.vmware.apputils.version.ExtendedAppUtil; @Component public class VmwareContextFactory { @@ -56,15 +54,15 @@ public class VmwareContextFactory { assert(vCenterPassword != null); String serviceUrl = "https://" + vCenterAddress + "/sdk/vimService"; - String[] params = new String[] {"--url", serviceUrl, "--username", vCenterUserName, "--password", vCenterPassword }; + //String[] params = new String[] {"--url", serviceUrl, "--username", vCenterUserName, "--password", vCenterPassword }; if(s_logger.isDebugEnabled()) s_logger.debug("initialize VmwareContext. url: " + serviceUrl + ", username: " + vCenterUserName + ", password: " + StringUtils.getMaskedPasswordForDisplay(vCenterPassword)); - ExtendedAppUtil appUtil = ExtendedAppUtil.initialize(vCenterAddress + "-" + s_seq++, params); + VmwareClient vimClient = new VmwareClient(vCenterAddress + "-" + s_seq++); + vimClient.connect(serviceUrl, vCenterUserName, vCenterPassword); - appUtil.connect(); - VmwareContext context = new VmwareContext(appUtil, vCenterAddress); + VmwareContext context = new VmwareContext(vimClient, vCenterAddress); context.registerStockObject(VmwareManager.CONTEXT_STOCK_NAME, s_vmwareMgr); context.registerStockObject("serviceconsole", s_vmwareMgr.getServiceConsolePortGroupName()); diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 5cac253fc0c..634827b6468 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.hypervisor.vmware.resource; @@ -24,6 +24,7 @@ import java.net.URI; import java.nio.channels.SocketChannel; import java.rmi.RemoteException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; @@ -159,9 +160,9 @@ import com.cloud.agent.api.storage.CreatePrivateTemplateAnswer; import com.cloud.agent.api.storage.DestroyCommand; import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer; import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand; -import com.cloud.agent.api.to.FirewallRuleTO; import com.cloud.agent.api.storage.ResizeVolumeAnswer; import com.cloud.agent.api.storage.ResizeVolumeCommand; +import com.cloud.agent.api.to.FirewallRuleTO; import com.cloud.agent.api.to.IpAddressTO; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.PortForwardingRuleTO; @@ -192,6 +193,7 @@ import com.cloud.hypervisor.vmware.mo.VirtualSwitchType; import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost; import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHostNetworkSummary; import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHostResourceSummary; +import com.cloud.hypervisor.vmware.resource.VmwareContextFactory; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.hypervisor.vmware.util.VmwareGuestOsMapper; import com.cloud.hypervisor.vmware.util.VmwareHelper; @@ -199,6 +201,8 @@ import com.cloud.network.HAProxyConfigurator; import com.cloud.network.LoadBalancerConfigurator; import com.cloud.network.Networks; import com.cloud.network.Networks.BroadcastDomainType; +import com.cloud.network.Networks.TrafficType; +import com.cloud.network.VmwareTrafficLabel; import com.cloud.network.rules.FirewallRule; import com.cloud.resource.ServerResource; import com.cloud.serializer.GsonHelper; @@ -244,7 +248,9 @@ import com.vmware.vim25.PerfMetricSeries; import com.vmware.vim25.PerfQuerySpec; import com.vmware.vim25.PerfSampleInfo; import com.vmware.vim25.RuntimeFault; +import com.vmware.vim25.RuntimeFaultFaultMsg; import com.vmware.vim25.ToolsUnavailable; +import com.vmware.vim25.ToolsUnavailableFaultMsg; import com.vmware.vim25.VimPortType; import com.vmware.vim25.VirtualDevice; import com.vmware.vim25.VirtualDeviceConfigSpec; @@ -260,13 +266,15 @@ import com.vmware.vim25.VirtualMachinePowerState; import com.vmware.vim25.VirtualMachineRuntimeInfo; import com.vmware.vim25.VirtualSCSISharing; + public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService { private static final Logger s_logger = Logger.getLogger(VmwareResource.class); protected String _name; protected final long _ops_timeout = 900000; // 15 minutes time out to time - protected final int _shutdown_waitMs = 300000; // wait up to 5 minutes for shutdown + + protected final int _shutdown_waitMs = 300000; // wait up to 5 minutes for shutdown // out an operation protected final int _retry = 24; @@ -284,18 +292,16 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected String _vCenterAddress; protected String _privateNetworkVSwitchName; - protected String _publicNetworkVSwitchName; - protected String _guestNetworkVSwitchName; - protected VirtualSwitchType _vSwitchType = VirtualSwitchType.StandardVirtualSwitch; - protected boolean _nexusVSwitch = false; + protected VmwareTrafficLabel _guestTrafficInfo = new VmwareTrafficLabel(TrafficType.Guest); + protected VmwareTrafficLabel _publicTrafficInfo = new VmwareTrafficLabel(TrafficType.Public); + protected int _portsPerDvPortGroup; + protected boolean _fullCloneFlag = false; - protected float _cpuOverprovisioningFactor = 1; protected boolean _reserveCpu = false; - protected float _memOverprovisioningFactor = 1; protected boolean _reserveMem = false; protected boolean _recycleHungWorker = false; - protected DiskControllerType _rootDiskController = DiskControllerType.ide; + protected DiskControllerType _rootDiskController = DiskControllerType.ide; protected ManagedObjectReference _morHyperHost; protected VmwareContext _serviceContext; @@ -311,9 +317,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected static HashMap s_statesTable; static { s_statesTable = new HashMap(); - s_statesTable.put(VirtualMachinePowerState.poweredOn, State.Running); - s_statesTable.put(VirtualMachinePowerState.poweredOff, State.Stopped); - s_statesTable.put(VirtualMachinePowerState.suspended, State.Stopped); + s_statesTable.put(VirtualMachinePowerState.POWERED_ON, State.Running); + s_statesTable.put(VirtualMachinePowerState.POWERED_OFF, State.Stopped); + s_statesTable.put(VirtualMachinePowerState.SUSPENDED, State.Stopped); } public VmwareResource() { @@ -322,6 +328,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa @Override public Answer executeRequest(Command cmd) { + if(s_logger.isTraceEnabled()) s_logger.trace("Begin executeRequest(), cmd: " + cmd.getClass().getSimpleName()); @@ -479,14 +486,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa synchronized (this) { try { - JmxUtil.registerMBean("VMware " + _morHyperHost.get_value(), "Command " + cmdSequence + "-" + cmd.getClass().getSimpleName(), mbean); + JmxUtil.registerMBean("VMware " + _morHyperHost.getValue(), "Command " + cmdSequence + "-" + cmd.getClass().getSimpleName(), mbean); _cmdMBeans.add(mbean); if (_cmdMBeans.size() >= MAX_CMD_MBEAN) { PropertyMapDynamicBean mbeanToRemove = _cmdMBeans.get(0); _cmdMBeans.remove(0); - JmxUtil.unregisterMBean("VMware " + _morHyperHost.get_value(), "Command " + mbeanToRemove.getProp("Sequence") + "-" + mbeanToRemove.getProp("Name")); + JmxUtil.unregisterMBean("VMware " + _morHyperHost.getValue(), "Command " + mbeanToRemove.getProp("Sequence") + "-" + mbeanToRemove.getProp("Name")); } } catch (Exception e) { if(s_logger.isTraceEnabled()) @@ -498,12 +505,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa NDC.pop(); } + if(s_logger.isTraceEnabled()) s_logger.trace("End executeRequest(), cmd: " + cmd.getClass().getSimpleName()); return answer; } - + private Answer execute(ResizeVolumeCommand cmd) { String path = cmd.getPath(); String vmName = cmd.getInstanceName(); @@ -531,15 +539,15 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (newSize < oldSize){ throw new Exception("VMware doesn't support shrinking volume from larger size: " + oldSize+ " MB to a smaller size: " + newSize + " MB"); } else if(newSize == oldSize){ - return new ResizeVolumeAnswer(cmd, true, "success", newSize*1024); + return new ResizeVolumeAnswer(cmd, true, "success", newSize*1024); } disk.setCapacityInKB(newSize); VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); deviceConfigSpec.setDevice(disk); - deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.edit); - vmConfigSpec.setDeviceChange(new VirtualDeviceConfigSpec[] { deviceConfigSpec }); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.EDIT); + vmConfigSpec.getDeviceChange().add(deviceConfigSpec); if (!vmMo.configureVm(vmConfigSpec)) { throw new Exception("Failed to configure VM to resize disk. vmName: " + vmName); } @@ -611,6 +619,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (!resultPair.first()) { throw new Exception(" vpc network usage plugin call failed "); + } if (option.equals("get") || option.equals("vpn")) { @@ -631,7 +640,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } return new NetworkUsageAnswer(cmd, "success", 0L, 0L); } catch (Throwable e) { - s_logger.error("Unable to execute NetworkUsage command on DomR (" + privateIp + "), domR may not be ready yet. failure due to " + + s_logger.error("Unable to execute NetworkUsage command on DomR (" + privateIp + "), domR may not be ready yet. failure due to " + VmwareHelper.getExceptionMessage(e), e); } return new NetworkUsageAnswer(cmd, "success", 0L, 0L); @@ -728,6 +738,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } + if (!result.first()) { s_logger.error("SetFirewallRulesCommand failure on setting one rule. args: " + args); @@ -737,7 +748,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } return new SetFirewallRulesAnswer(cmd, false, results); - } + } } catch (Throwable e) { s_logger.error("SetFirewallRulesCommand(args: " + args + ") failed on setting one rule due to " @@ -747,10 +758,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa results[i] = "Failed"; } return new SetFirewallRulesAnswer(cmd, false, results); - } + } return new SetFirewallRulesAnswer(cmd, true, results); - } + } protected Answer execute(SetStaticNatRulesCommand cmd) { if (s_logger.isInfoEnabled()) { @@ -1318,7 +1329,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa NicTO nicTo = cmd.getNic(); VirtualDevice nic; Pair networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo); - if (mgr.getNexusVSwitchGlobalParameter()) { + if (VmwareHelper.isDvPortGroup(networkInfo.first())) { String dvSwitchUuid; ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); DatacenterMO dataCenterMo = new DatacenterMO(context, dcMor); @@ -1334,12 +1345,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; - deviceConfigSpecArray[0] = new VirtualDeviceConfigSpec(); - deviceConfigSpecArray[0].setDevice(nic); - deviceConfigSpecArray[0].setOperation(VirtualDeviceConfigSpecOperation.add); + //VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; + VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); + deviceConfigSpec.setDevice(nic); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); - vmConfigSpec.setDeviceChange(deviceConfigSpecArray); + vmConfigSpec.getDeviceChange().add(deviceConfigSpec); if(!vmMo.configureVm(vmConfigSpec)) { throw new Exception("Failed to configure devices when running PlugNicCommand"); } @@ -1382,12 +1393,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return new UnPlugNicAnswer(cmd, true, "success"); } VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; - deviceConfigSpecArray[0] = new VirtualDeviceConfigSpec(); - deviceConfigSpecArray[0].setDevice(nic); - deviceConfigSpecArray[0].setOperation(VirtualDeviceConfigSpecOperation.remove); + //VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; + VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); + deviceConfigSpec.setDevice(nic); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.REMOVE); - vmConfigSpec.setDeviceChange(deviceConfigSpecArray); + vmConfigSpec.getDeviceChange().add(deviceConfigSpec); if(!vmMo.configureVm(vmConfigSpec)) { throw new Exception("Failed to configure devices when running unplugNicCommand"); } @@ -1525,6 +1536,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (removeVif) { + String nicMasksStr = vmMo.getCustomFieldValue(CustomFieldConstants.CLOUD_NIC_MASK); int nicMasks = Integer.parseInt(nicMasksStr); nicMasks &= ~(1 << publicNicInfo.first().intValue()); @@ -1549,13 +1561,16 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa private void plugPublicNic(VirtualMachineMO vmMo, final String vlanId, final String vifMacAddress) throws Exception { // TODO : probably need to set traffic shaping Pair networkInfo = null; - - if (!_nexusVSwitch) { - networkInfo = HypervisorHostHelper.prepareNetwork(this._publicNetworkVSwitchName, "cloud.public", + VirtualSwitchType vSwitchType = VirtualSwitchType.StandardVirtualSwitch; + if (_publicTrafficInfo != null) { + vSwitchType = _publicTrafficInfo.getVirtualSwitchType(); + } + if (VirtualSwitchType.StandardVirtualSwitch == vSwitchType) { + networkInfo = HypervisorHostHelper.prepareNetwork(this._publicTrafficInfo.getVirtualSwitchName(), "cloud.public", vmMo.getRunningHost(), vlanId, null, null, this._ops_timeout, true); } else { - networkInfo = HypervisorHostHelper.prepareNetwork(this._publicNetworkVSwitchName, "cloud.public", - vmMo.getRunningHost(), vlanId, null, null, this._ops_timeout); + networkInfo = HypervisorHostHelper.prepareNetwork(this._publicTrafficInfo.getVirtualSwitchName(), "cloud.public", + vmMo.getRunningHost(), vlanId, null, null, this._ops_timeout, vSwitchType, _portsPerDvPortGroup); } int nicIndex = allocPublicNicIndex(vmMo); @@ -1565,7 +1580,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VirtualEthernetCard device = (VirtualEthernetCard) nicDevices[nicIndex]; - if (!_nexusVSwitch) { + if (VirtualSwitchType.StandardVirtualSwitch == vSwitchType) { VirtualEthernetCardNetworkBackingInfo nicBacking = new VirtualEthernetCardNetworkBackingInfo(); nicBacking.setDeviceName(networkInfo.second()); nicBacking.setNetwork(networkInfo.first()); @@ -1577,12 +1592,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); - VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; - deviceConfigSpecArray[0] = new VirtualDeviceConfigSpec(); - deviceConfigSpecArray[0].setDevice(device); - deviceConfigSpecArray[0].setOperation(VirtualDeviceConfigSpecOperation.edit); - vmConfigSpec.setDeviceChange(deviceConfigSpecArray); + //VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[1]; + VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec(); + deviceConfigSpec.setDevice(device); + deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.EDIT); + + vmConfigSpec.getDeviceChange().add(deviceConfigSpec); if(!vmMo.configureVm(vmConfigSpec)) { throw new Exception("Failed to configure devices when plugPublicNic"); } @@ -1669,7 +1685,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa protected Answer execute(SavePasswordCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info("Executing resource SavePasswordCommand. vmName: " + cmd.getVmName() + ", vmIp: " + cmd.getVmIpAddress() + ", password: " + + s_logger.info("Executing resource SavePasswordCommand. vmName: " + cmd.getVmName() + ", vmIp: " + cmd.getVmIpAddress() + ", password: " + StringUtils.getMaskedPasswordForDisplay(cmd.getPassword())); } @@ -1715,6 +1732,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } // ssh -p 3922 -o StrictHostKeyChecking=no -i $cert root@$domr "/root/edithosts.sh $mac $ip $vm $dfltrt $ns $staticrt" >/dev/null + String args = " -m " + cmd.getVmMac(); if (cmd.getVmIpAddress() != null) { args += " -4 " + cmd.getVmIpAddress(); @@ -1732,12 +1750,16 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (cmd.getStaticRoutes() != null) { args += " -s " + cmd.getStaticRoutes(); } - + if (cmd.getVmIp6Address() != null) { args += " -6 " + cmd.getVmIp6Address(); args += " -u " + cmd.getDuid(); } - + + if (!cmd.isDefault()) { + args += " -N"; + } + if (s_logger.isDebugEnabled()) { s_logger.debug("Run command on domR " + cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP) + ", /root/edithosts.sh " + args); } @@ -2089,10 +2111,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } - assert (vmSpec.getSpeed() != null) && (rootDiskDataStoreDetails != null); - if (!hyperHost.createBlankVm(vmName, vmSpec.getCpus(), vmSpec.getSpeed().intValue(), - getReserveCpuMHz(vmSpec.getSpeed().intValue()), vmSpec.getLimitCpuUse(), ramMb, getReserveMemMB(ramMb), - translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).toString(), rootDiskDataStoreDetails.first(), false)) { + assert (vmSpec.getMinSpeed() != null) && (rootDiskDataStoreDetails != null); + if (!hyperHost.createBlankVm(vmName, vmSpec.getCpus(), vmSpec.getMaxSpeed().intValue(), + vmSpec.getMinSpeed(), vmSpec.getLimitCpuUse(),(int)(vmSpec.getMaxRam()/(1024*1024)), ramMb, + translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).value(), rootDiskDataStoreDetails.first(), false)) { throw new Exception("Failed to create VM. vmName: " + vmName); } } @@ -2122,9 +2144,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec(); int ramMb = (int) (vmSpec.getMinRam() / (1024 * 1024)); - VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getSpeed().intValue(), - getReserveCpuMHz(vmSpec.getSpeed().intValue()), ramMb, getReserveMemMB(ramMb), - translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).toString(), vmSpec.getLimitCpuUse()); + VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getMaxSpeed(), + vmSpec.getMinSpeed(),(int) (vmSpec.getMaxRam()/(1024*1024)), ramMb, + translateGuestOsIdentifier(vmSpec.getArch(), vmSpec.getOs()).value(), vmSpec.getLimitCpuUse()); VirtualDeviceConfigSpec[] deviceConfigSpecArray = new VirtualDeviceConfigSpec[totalChangeDevices]; int i = 0; @@ -2151,17 +2173,17 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa DatastoreMO secDsMo = new DatastoreMO(hyperHost.getContext(), morSecDs); deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); - Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, String.format("[%s] systemvm/%s", secDsMo.getName(), mgr.getSystemVMIsoFileNameOnDatastore()), + Pair isoInfo = VmwareHelper.prepareIsoDevice(vmMo, String.format("[%s] systemvm/%s", secDsMo.getName(), mgr.getSystemVMIsoFileNameOnDatastore()), secDsMo.getMor(), true, true, i, i + 1); deviceConfigSpecArray[i].setDevice(isoInfo.first()); if (isoInfo.second()) { if(s_logger.isDebugEnabled()) s_logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first())); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.add); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); } else { if(s_logger.isDebugEnabled()) s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.edit); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); } i++; } else { @@ -2177,11 +2199,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (isoInfo.second()) { if(s_logger.isDebugEnabled()) s_logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first())); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.add); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); } else { if(s_logger.isDebugEnabled()) s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.edit); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); } } else { deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); @@ -2191,12 +2213,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if(s_logger.isDebugEnabled()) s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.add); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); } else { if(s_logger.isDebugEnabled()) s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first())); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.edit); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT); } } i++; @@ -2248,7 +2270,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa device = VmwareHelper.prepareDiskDevice(vmMo, controllerKey, new String[] { datastoreDiskPath }, volumeDsDetails.first(), i, i + 1); } deviceConfigSpecArray[i].setDevice(device); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.add); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); + if(s_logger.isDebugEnabled()) s_logger.debug("Prepare volume at new device " + _gson.toJson(device)); @@ -2264,7 +2287,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa s_logger.info("Prepare NIC device based on NicTO: " + _gson.toJson(nicTo)); Pair networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo); - if (mgr.getNexusVSwitchGlobalParameter()) { + if (VmwareHelper.isDvPortGroup(networkInfo.first())) { String dvSwitchUuid; ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); DatacenterMO dataCenterMo = new DatacenterMO(context, dcMor); @@ -2279,7 +2302,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); deviceConfigSpecArray[i].setDevice(nic); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.add); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); if(s_logger.isDebugEnabled()) s_logger.debug("Prepare NIC at new device " + _gson.toJson(deviceConfigSpecArray[i])); @@ -2292,7 +2315,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa nicCount++; } - vmConfigSpec.setDeviceChange(deviceConfigSpecArray); + vmConfigSpec.getDeviceChange().addAll(Arrays.asList(deviceConfigSpecArray)); // pass boot arguments through machine.id & perform customized options to VMX @@ -2317,7 +2340,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String keyboardLayout = null; if(vmSpec.getDetails() != null) keyboardLayout = vmSpec.getDetails().get(VmDetailConstants.KEYBOARD); - vmConfigSpec.setExtraConfig(configureVnc(extraOptions, hyperHost, vmName, vmSpec.getVncPassword(), keyboardLayout)); + vmConfigSpec.getExtraConfig().addAll(Arrays.asList(configureVnc(extraOptions, hyperHost, vmName, vmSpec.getVncPassword(), keyboardLayout))); if (!vmMo.configureVm(vmConfigSpec)) { throw new Exception("Failed to configure VM before start. vmName: " + vmName); @@ -2352,6 +2375,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } private Map validateVmDetails(Map vmDetails) { + Map validatedDetails = new HashMap(); if(vmDetails != null && vmDetails.size() > 0) { @@ -2375,21 +2399,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return validatedDetails; } - private int getReserveCpuMHz(int cpuMHz) { - if(this._reserveCpu) { - return (int)(cpuMHz / this._cpuOverprovisioningFactor); - } - return 0; - } - - private int getReserveMemMB(int memMB) { - if(this._reserveMem) { - return (int)(memMB / this._memOverprovisioningFactor); - } - - return 0; - } private NicTO[] sortNicsByDeviceId(NicTO[] nics) { @@ -2477,21 +2487,36 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } private Pair prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo) throws Exception { + Pair switchName; + TrafficType trafficType; + VirtualSwitchType switchType; + + switchName = getTargetSwitch(nicTo); + trafficType = nicTo.getType(); + // Get switch type from resource property which is dictated by cluster property + // If a virtual switch type is specified while adding cluster that will be used. + // Else If virtual switch type is specified in physical traffic label that will be used + // Else use standard vSwitch + switchType = VirtualSwitchType.StandardVirtualSwitch; + if (trafficType == TrafficType.Guest && _guestTrafficInfo != null) { + switchType = _guestTrafficInfo.getVirtualSwitchType(); + } else if (trafficType == TrafficType.Public && _publicTrafficInfo != null) { + switchType = _publicTrafficInfo.getVirtualSwitchType(); + } - Pair switchName = getTargetSwitch(nicTo); String namePrefix = getNetworkNamePrefix(nicTo); Pair networkInfo = null; - s_logger.info("Prepare network on vSwitch: " + switchName + " with name prefix: " + namePrefix); + s_logger.info("Prepare network on " + switchType + " " + switchName + " with name prefix: " + namePrefix); - if(!_nexusVSwitch) { - networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()), - nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout, + if (VirtualSwitchType.StandardVirtualSwitch == switchType) { + networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()), + nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout, !namePrefix.startsWith("cloud.private")); } else { - networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()), - nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout); + networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()), + nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout, switchType, _portsPerDvPortGroup); } return networkInfo; @@ -2501,8 +2526,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa private Pair getTargetSwitch(NicTO nicTo) throws Exception { if(nicTo.getName() != null && !nicTo.getName().isEmpty()) { String[] tokens = nicTo.getName().split(","); - - if(tokens.length == 2) { + // Format of network traffic label is ,, + // If all 3 fields are mentioned then number of tokens would be 3. + // If only , are mentioned then number of tokens would be 2. + if(tokens.length == 2 || tokens.length == 3) { return new Pair(tokens[0], tokens[1]); } else { return new Pair(nicTo.getName(), Vlan.UNTAGGED); @@ -2510,11 +2537,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } if (nicTo.getType() == Networks.TrafficType.Guest) { - return new Pair(this._guestNetworkVSwitchName, Vlan.UNTAGGED); + return new Pair(this._guestTrafficInfo.getVirtualSwitchName(), Vlan.UNTAGGED); } else if (nicTo.getType() == Networks.TrafficType.Control || nicTo.getType() == Networks.TrafficType.Management) { return new Pair(this._privateNetworkVSwitchName, Vlan.UNTAGGED); } else if (nicTo.getType() == Networks.TrafficType.Public) { - return new Pair(this._publicNetworkVSwitchName, Vlan.UNTAGGED); + return new Pair(this._publicTrafficInfo.getVirtualSwitchName(), Vlan.UNTAGGED); } else if (nicTo.getType() == Networks.TrafficType.Storage) { return new Pair(this._privateNetworkVSwitchName, Vlan.UNTAGGED); } else if (nicTo.getType() == Networks.TrafficType.Vpn) { @@ -2884,7 +2911,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa try { vmMo.rebootGuest(); return new RebootAnswer(cmd, "reboot succeeded", true); - } catch(ToolsUnavailable e) { + } catch(ToolsUnavailableFaultMsg e) { s_logger.warn("VMware tools is not installed at guest OS, we will perform hard reset for reboot"); } catch(Exception e) { s_logger.warn("We are not able to perform gracefull guest reboot due to " + VmwareHelper.getExceptionMessage(e)); @@ -3069,8 +3096,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VmwareManager mgr = dcMo.getContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME); - ObjectContent[] ocs = dcMo.getHostPropertiesOnDatacenterHostFolder(new String[] { "name", "parent" }); - if (ocs != null && ocs.length > 0) { + List ocs = dcMo.getHostPropertiesOnDatacenterHostFolder(new String[] { "name", "parent" }); + if (ocs != null && ocs.size() > 0) { for (ObjectContent oc : ocs) { HostMO hostMo = new HostMO(dcMo.getContext(), oc.getObj()); VmwareHypervisorHostNetworkSummary netSummary = hostMo.getHyperHostNetworkSummary(mgr.getManagementPortGroupByHost(hostMo)); @@ -3103,7 +3130,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa ManagedObjectReference morDatastore = null; morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, pool.getUuid()); if(morDatastore == null) - morDatastore = hyperHost.mountDatastore(pool.getType() == StoragePoolType.VMFS, pool.getHost(), + morDatastore = hyperHost.mountDatastore(pool.getType() == StoragePoolType.VMFS, pool.getHost(), pool.getPort(), pool.getPath(), pool.getUuid().replace("-", "")); assert (morDatastore != null); @@ -3374,7 +3401,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return new CreateVMSnapshotAnswer(cmd, false, ""); } } - + protected Answer execute(DeleteVMSnapshotCommand cmd) { try { VmwareContext context = getServiceContext(); @@ -3387,7 +3414,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return new DeleteVMSnapshotAnswer(cmd, false, ""); } } - + protected Answer execute(RevertToVMSnapshotCommand cmd){ try{ VmwareContext context = getServiceContext(); @@ -3590,7 +3617,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if(result.first()) return new Answer(cmd); } catch (Exception e) { - s_logger.error("Unable to execute ping command on DomR (" + controlIp + "), domR may not be ready yet. failure due to " + s_logger.error("Unable to execute ping command on DomR (" + controlIp + "), domR may not be ready yet. failure due to " + VmwareHelper.getExceptionMessage(e), e); } return new Answer(cmd,false,"PingTestCommand failed"); @@ -3651,11 +3678,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa /* * DestroyCommand content example - * + * * {"volume": {"id":5,"name":"Volume1", "mountPoint":"/export/home/kelven/vmware-test/primary", * "path":"6bb8762f-c34c-453c-8e03-26cc246ceec4", "size":0,"type":"DATADISK","resourceType": * "STORAGE_POOL","storagePoolType":"NetworkFilesystem", "poolId":0,"deviceId":0 } } - * + * * {"volume": {"id":1, "name":"i-2-1-KY-ROOT", "mountPoint":"/export/home/kelven/vmware-test/primary", * "path":"i-2-1-KY-ROOT","size":0,"type":"ROOT", "resourceType":"STORAGE_POOL", "storagePoolType":"NetworkFilesystem", * "poolId":0,"deviceId":0 } } @@ -3704,16 +3731,24 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } } - } + } if (s_logger.isInfoEnabled()) s_logger.info("Destroy volume by original name: " + cmd.getVolume().getPath() + ".vmdk"); dsMo.deleteFile(cmd.getVolume().getPath() + ".vmdk", morDc, true); // root volume may be created via linked-clone, delete the delta disk as well - if (s_logger.isInfoEnabled()) + if (_fullCloneFlag) { + if (s_logger.isInfoEnabled()) { + s_logger.info("Destroy volume by derived name: " + cmd.getVolume().getPath() + "-flat.vmdk"); + } + dsMo.deleteFile(cmd.getVolume().getPath() + "-flat.vmdk", morDc, true); + } else { + if (s_logger.isInfoEnabled()) { s_logger.info("Destroy volume by derived name: " + cmd.getVolume().getPath() + "-delta.vmdk"); + } dsMo.deleteFile(cmd.getVolume().getPath() + "-delta.vmdk", morDc, true); + } return new Answer(cmd, true, "Success"); } @@ -3787,7 +3822,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa * NetworkMO(hostMo.getContext(), netDetails.getNetworkMor()); ManagedObjectReference[] vms = * networkMo.getVMsOnNetwork(); if(vms == null || vms.length == 0) { if(s_logger.isInfoEnabled()) { * s_logger.info("Cleanup network as it is currently not in use: " + netDetails.getName()); } - * + * * hostMo.deletePortGroup(netDetails.getName()); } } } catch(Throwable e) { * s_logger.warn("Unable to cleanup network due to exception, skip for next time"); } */ @@ -3815,6 +3850,70 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } + + + private boolean createVMFullClone(VirtualMachineMO vmTemplate, DatacenterMO dcMo, DatastoreMO dsMo, + String vmdkName, ManagedObjectReference morDatastore, ManagedObjectReference morPool) throws Exception { + + if(dsMo.folderExists(String.format("[%s]", dsMo.getName()), vmdkName)) + dsMo.deleteFile(String.format("[%s] %s/", dsMo.getName(), vmdkName), dcMo.getMor(), false); + + s_logger.info("creating full clone from template"); + if (!vmTemplate.createFullClone(vmdkName, dcMo.getVmFolder(), morPool, morDatastore)) { + String msg = "Unable to create full clone from the template"; + s_logger.error(msg); + throw new Exception(msg); + } + + // we can't rely on un-offical API (VirtualMachineMO.moveAllVmDiskFiles() any more, use hard-coded disk names that we know + // to move files + s_logger.info("Move volume out of volume-wrapper VM "); + dsMo.moveDatastoreFile(String.format("[%s] %s/%s.vmdk", dsMo.getName(), vmdkName, vmdkName), + dcMo.getMor(), dsMo.getMor(), + String.format("[%s] %s.vmdk", dsMo.getName(), vmdkName), dcMo.getMor(), true); + + dsMo.moveDatastoreFile(String.format("[%s] %s/%s-flat.vmdk", dsMo.getName(), vmdkName, vmdkName), + dcMo.getMor(), dsMo.getMor(), + String.format("[%s] %s-flat.vmdk", dsMo.getName(), vmdkName), dcMo.getMor(), true); + + return true; + } + + private boolean createVMLinkedClone(VirtualMachineMO vmTemplate, DatacenterMO dcMo, DatastoreMO dsMo, + String vmdkName, ManagedObjectReference morDatastore, ManagedObjectReference morPool) throws Exception { + + ManagedObjectReference morBaseSnapshot = vmTemplate.getSnapshotMor("cloud.template.base"); + if (morBaseSnapshot == null) { + String msg = "Unable to find template base snapshot, invalid template"; + s_logger.error(msg); + throw new Exception(msg); + } + + if(dsMo.folderExists(String.format("[%s]", dsMo.getName()), vmdkName)) + dsMo.deleteFile(String.format("[%s] %s/", dsMo.getName(), vmdkName), dcMo.getMor(), false); + + s_logger.info("creating linked clone from template"); + if (!vmTemplate.createLinkedClone(vmdkName, morBaseSnapshot, dcMo.getVmFolder(), morPool, morDatastore)) { + String msg = "Unable to clone from the template"; + s_logger.error(msg); + throw new Exception(msg); + } + + // we can't rely on un-offical API (VirtualMachineMO.moveAllVmDiskFiles() any more, use hard-coded disk names that we know + // to move files + s_logger.info("Move volume out of volume-wrapper VM "); + dsMo.moveDatastoreFile(String.format("[%s] %s/%s.vmdk", dsMo.getName(), vmdkName, vmdkName), + dcMo.getMor(), dsMo.getMor(), + String.format("[%s] %s.vmdk", dsMo.getName(), vmdkName), dcMo.getMor(), true); + + dsMo.moveDatastoreFile(String.format("[%s] %s/%s-delta.vmdk", dsMo.getName(), vmdkName, vmdkName), + dcMo.getMor(), dsMo.getMor(), + String.format("[%s] %s-delta.vmdk", dsMo.getName(), vmdkName), dcMo.getMor(), true); + + return true; + } + + @Override public synchronized CreateAnswer execute(CreateCommand cmd) { if (s_logger.isInfoEnabled()) { @@ -3874,37 +3973,16 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa ManagedObjectReference morPool = hyperHost.getHyperHostOwnerResourcePool(); ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); - ManagedObjectReference morBaseSnapshot = vmTemplate.getSnapshotMor("cloud.template.base"); - if (morBaseSnapshot == null) { - String msg = "Unable to find template base snapshot, invalid template"; - s_logger.error(msg); - throw new Exception(msg); - } - - if(dsMo.folderExists(String.format("[%s]", dsMo.getName()), vmdkName)) - dsMo.deleteFile(String.format("[%s] %s/", dsMo.getName(), vmdkName), dcMo.getMor(), false); - - s_logger.info("create linked clone from template"); - if (!vmTemplate.createLinkedClone(vmdkName, morBaseSnapshot, dcMo.getVmFolder(), morPool, morDatastore)) { - String msg = "Unable to clone from the template"; - s_logger.error(msg); - throw new Exception(msg); + //createVMLinkedClone(vmTemplate, dcMo, dsMo, vmdkName, morDatastore, morPool); + if (!_fullCloneFlag) { + createVMLinkedClone(vmTemplate, dcMo, dsMo, vmdkName, morDatastore, morPool); + } else { + createVMFullClone(vmTemplate, dcMo, dsMo, vmdkName, morDatastore, morPool); } VirtualMachineMO vmMo = new ClusterMO(context, morCluster).findVmOnHyperHost(vmdkName); assert (vmMo != null); - // we can't rely on un-offical API (VirtualMachineMO.moveAllVmDiskFiles() any more, use hard-coded disk names that we know - // to move files - s_logger.info("Move volume out of volume-wrapper VM "); - dsMo.moveDatastoreFile(String.format("[%s] %s/%s.vmdk", dsMo.getName(), vmdkName, vmdkName), - dcMo.getMor(), dsMo.getMor(), - String.format("[%s] %s.vmdk", dsMo.getName(), vmdkName), dcMo.getMor(), true); - - dsMo.moveDatastoreFile(String.format("[%s] %s/%s-delta.vmdk", dsMo.getName(), vmdkName, vmdkName), - dcMo.getMor(), dsMo.getMor(), - String.format("[%s] %s-delta.vmdk", dsMo.getName(), vmdkName), dcMo.getMor(), true); - s_logger.info("detach disks from volume-wrapper VM " + vmdkName); vmMo.detachAllDisks(); @@ -3965,21 +4043,21 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa vmConfig.setName(vmName); vmConfig.setMemoryMB((long) 4); // vmware request minimum of 4 MB vmConfig.setNumCPUs(1); - vmConfig.setGuestId(VirtualMachineGuestOsIdentifier._otherGuest.toString()); + vmConfig.setGuestId(VirtualMachineGuestOsIdentifier.OTHER_GUEST.value()); VirtualMachineFileInfo fileInfo = new VirtualMachineFileInfo(); fileInfo.setVmPathName(String.format("[%s]", dsMo.getName())); vmConfig.setFiles(fileInfo); // Scsi controller VirtualLsiLogicController scsiController = new VirtualLsiLogicController(); - scsiController.setSharedBus(VirtualSCSISharing.noSharing); + scsiController.setSharedBus(VirtualSCSISharing.NO_SHARING); scsiController.setBusNumber(0); scsiController.setKey(1); VirtualDeviceConfigSpec scsiControllerSpec = new VirtualDeviceConfigSpec(); scsiControllerSpec.setDevice(scsiController); - scsiControllerSpec.setOperation(VirtualDeviceConfigSpecOperation.add); + scsiControllerSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD); - vmConfig.setDeviceChange(new VirtualDeviceConfigSpec[] { scsiControllerSpec }); + vmConfig.getDeviceChange().add(scsiControllerSpec ); hyperHost.createVm(vmConfig); vmMo = hyperHost.findVmOnHyperHost(vmName); return vmMo; @@ -4018,11 +4096,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa new String[] {"name", "config.template", "runtime.powerState", "runtime.bootTime"}); if(ocs != null) { for(ObjectContent oc : ocs) { - DynamicProperty[] props = oc.getPropSet(); + List props = oc.getPropSet(); if(props != null) { String name = null; boolean template = false; - VirtualMachinePowerState powerState = VirtualMachinePowerState.poweredOff; + VirtualMachinePowerState powerState = VirtualMachinePowerState.POWERED_OFF; GregorianCalendar bootTime = null; for(DynamicProperty prop : props) { @@ -4032,7 +4110,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa template = (Boolean)prop.getVal(); else if(prop.getName().equals("runtime.powerState")) powerState = (VirtualMachinePowerState)prop.getVal(); - else if(prop.getName().equals("runtime.bootTime")) + else if(prop.getName().equals("runtime.bootTime")) bootTime = (GregorianCalendar)prop.getVal(); } @@ -4040,7 +4118,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa boolean recycle = false; // recycle stopped worker VM and VM that has been running for too long (hard-coded 10 hours for now) - if(powerState == VirtualMachinePowerState.poweredOff) + if(powerState == VirtualMachinePowerState.POWERED_OFF) recycle = true; else if(bootTime != null && (new Date().getTime() - bootTime.getTimeInMillis() > 10*3600*1000)) recycle = true; @@ -4144,7 +4222,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa DatastoreSummary dsSummary = dsMo.getSummary(); String address = hostMo.getHostName(); - StoragePoolInfo pInfo = new StoragePoolInfo(poolUuid, address, dsMo.getMor().get_value(), "", StoragePoolType.LVM, dsSummary.getCapacity(), dsSummary.getFreeSpace()); + StoragePoolInfo pInfo = new StoragePoolInfo(poolUuid, address, dsMo.getMor().getValue(), "", StoragePoolType.LVM, dsSummary.getCapacity(), dsSummary.getFreeSpace()); StartupStorageCommand cmd = new StartupStorageCommand(); cmd.setName(poolUuid); cmd.setPoolInfo(pInfo); @@ -4182,7 +4260,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa fillHostHardwareInfo(serviceContext, cmd); fillHostNetworkInfo(serviceContext, cmd); fillHostDetailsInfo(serviceContext, details); - } catch (RuntimeFault e) { + } catch (RuntimeFaultFaultMsg e) { s_logger.error("RuntimeFault while retrieving host info: " + e.toString(), e); throw new CloudRuntimeException("RuntimeFault while retrieving host info"); } catch (RemoteException e) { @@ -4204,7 +4282,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa cmd.setVersion(VmwareResource.class.getPackage().getImplementationVersion()); } - private void fillHostHardwareInfo(VmwareContext serviceContext, StartupRoutingCommand cmd) throws RuntimeFault, RemoteException, Exception { + private void fillHostHardwareInfo(VmwareContext serviceContext, StartupRoutingCommand cmd) throws RuntimeFaultFaultMsg, RemoteException, Exception { VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); VmwareHypervisorHostResourceSummary summary = hyperHost.getHyperHostResourceSummary(); @@ -4220,7 +4298,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa cmd.setMemory(summary.getMemoryBytes()); } - private void fillHostNetworkInfo(VmwareContext serviceContext, StartupRoutingCommand cmd) throws RuntimeFault, RemoteException { + private void fillHostNetworkInfo(VmwareContext serviceContext, StartupRoutingCommand cmd) throws RuntimeFaultFaultMsg, RemoteException { try { VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); @@ -4256,7 +4334,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); ClusterDasConfigInfo dasConfig = hyperHost.getDasConfig(); - if (dasConfig != null && dasConfig.getEnabled() != null && dasConfig.getEnabled().booleanValue()) { + if (dasConfig != null && dasConfig.isEnabled() != null && dasConfig.isEnabled().booleanValue()) { details.put("NativeHA", "true"); } } @@ -4374,7 +4452,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return hyperHost.findVmOnPeerHyperHost(vmName) != null; } - protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervisorHost hyperHost, String vmName, + protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervisorHost hyperHost, String vmName, String vncPassword, String keyboardLayout) throws Exception { VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName); @@ -4460,9 +4538,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } if (cpuArchitecture.equalsIgnoreCase("x86_64")) { - return VirtualMachineGuestOsIdentifier.otherGuest64; + return VirtualMachineGuestOsIdentifier.OTHER_GUEST_64; } - return VirtualMachineGuestOsIdentifier.otherGuest; + return VirtualMachineGuestOsIdentifier.OTHER_GUEST; } private void prepareNetworkForVmTargetHost(HostMO hostMo, VirtualMachineMO vmMo) throws Exception { @@ -4481,7 +4559,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String[] tokens = networkName.split("\\."); if (tokens.length == 3) { Integer networkRateMbps = null; - if (shapingPolicy != null && shapingPolicy.getEnabled() != null && shapingPolicy.getEnabled().booleanValue()) { + if (shapingPolicy != null && shapingPolicy.isEnabled() != null && shapingPolicy.isEnabled().booleanValue()) { networkRateMbps = (int) (shapingPolicy.getPeakBandwidth().longValue() / (1024 * 1024)); } String vlanId = null; @@ -4497,14 +4575,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String[] tokens = networkName.split("\\."); if (tokens.length == 3) { Integer networkRateMbps = null; - if (shapingPolicy != null && shapingPolicy.getEnabled() != null && shapingPolicy.getEnabled().booleanValue()) { + if (shapingPolicy != null && shapingPolicy.isEnabled() != null && shapingPolicy.isEnabled().booleanValue()) { networkRateMbps = (int) (shapingPolicy.getPeakBandwidth().longValue() / (1024 * 1024)); } String vlanId = null; if(!"untagged".equalsIgnoreCase(tokens[2])) vlanId = tokens[2]; - HypervisorHostHelper.prepareNetwork(this._publicNetworkVSwitchName, "cloud.public", + HypervisorHostHelper.prepareNetwork(_publicTrafficInfo.getVirtualSwitchName(), "cloud.public", hostMo, vlanId, networkRateMbps, null, this._ops_timeout, false); } else { s_logger.info("Skip suspecious cloud network " + networkName); @@ -4513,7 +4591,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String[] tokens = networkName.split("\\."); if (tokens.length >= 3) { Integer networkRateMbps = null; - if (shapingPolicy != null && shapingPolicy.getEnabled() != null && shapingPolicy.getEnabled().booleanValue()) { + if (shapingPolicy != null && shapingPolicy.isEnabled() != null && shapingPolicy.isEnabled().booleanValue()) { networkRateMbps = (int) (shapingPolicy.getPeakBandwidth().longValue() / (1024 * 1024)); } @@ -4521,7 +4599,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if(!"untagged".equalsIgnoreCase(tokens[2])) vlanId = tokens[2]; - HypervisorHostHelper.prepareNetwork(this._guestNetworkVSwitchName, "cloud.guest", + HypervisorHostHelper.prepareNetwork(_guestTrafficInfo.getVirtualSwitchName(), "cloud.guest", hostMo, vlanId, networkRateMbps, null, this._ops_timeout, false); } else { s_logger.info("Skip suspecious cloud network " + networkName); @@ -4539,12 +4617,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa HashMap newStates = new HashMap(); if (ocs != null && ocs.length > 0) { for (ObjectContent oc : ocs) { - DynamicProperty[] objProps = oc.getPropSet(); + List objProps = oc.getPropSet(); if (objProps != null) { boolean isTemplate = false; String name = null; - VirtualMachinePowerState powerState = VirtualMachinePowerState.poweredOff; + VirtualMachinePowerState powerState = VirtualMachinePowerState.POWERED_OFF; for (DynamicProperty objProp : objProps) { if (objProp.getName().equals("config.template")) { if (objProp.getVal().toString().equalsIgnoreCase("true")) { @@ -4571,19 +4649,19 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa private HashMap getVmStats(List vmNames) throws Exception { VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); HashMap vmResponseMap = new HashMap(); - ManagedObjectReference perfMgr = getServiceContext().getServiceConnection().getServiceContent().getPerfManager(); - VimPortType service = getServiceContext().getServiceConnection().getService(); + ManagedObjectReference perfMgr = getServiceContext().getServiceContent().getPerfManager(); + VimPortType service = getServiceContext().getService(); PerfCounterInfo rxPerfCounterInfo = null; PerfCounterInfo txPerfCounterInfo = null; - PerfCounterInfo[] cInfo = (PerfCounterInfo[]) getServiceContext().getServiceUtil().getDynamicProperty(perfMgr, "perfCounter"); - for(int i=0; i cInfo = (List) getServiceContext().getVimClient().getDynamicProperty(perfMgr, "perfCounter"); + for(PerfCounterInfo info : cInfo) { + if ("net".equalsIgnoreCase(info.getGroupInfo().getKey())) { + if ("transmitted".equalsIgnoreCase(info.getNameInfo().getKey())) { + txPerfCounterInfo = info; } - if ("received".equalsIgnoreCase(cInfo[i].getNameInfo().getKey())) { - rxPerfCounterInfo = cInfo[i]; + if ("received".equalsIgnoreCase(info.getNameInfo().getKey())) { + rxPerfCounterInfo = info; } } } @@ -4591,7 +4669,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa ObjectContent[] ocs = hyperHost.getVmPropertiesOnHyperHost(new String[] {"name", "summary.config.numCpu", "summary.quickStats.overallCpuUsage"}); if (ocs != null && ocs.length > 0) { for (ObjectContent oc : ocs) { - DynamicProperty[] objProps = oc.getPropSet(); + List objProps = oc.getPropSet(); if (objProps != null) { String name = null; String numberCPUs = null; @@ -4615,13 +4693,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa assert(vmMor!=null); ArrayList vmNetworkMetrics = new ArrayList(); - // get all the metrics from the available sample period - PerfMetricId[] perfMetrics = service.queryAvailablePerfMetric(perfMgr, vmMor, null, null, null); + // get all the metrics from the available sample period + List perfMetrics = service.queryAvailablePerfMetric(perfMgr, vmMor, null, null, null); if(perfMetrics != null) { - for(int index=0; index < perfMetrics.length; ++index) { - if ( ((rxPerfCounterInfo != null) && (perfMetrics[index].getCounterId() == rxPerfCounterInfo.getKey())) || - ((txPerfCounterInfo != null) && (perfMetrics[index].getCounterId() == txPerfCounterInfo.getKey())) ) { - vmNetworkMetrics.add(perfMetrics[index]); + for(int index=0; index < perfMetrics.size(); ++index) { + if ( ((rxPerfCounterInfo != null) && (perfMetrics.get(index).getCounterId() == rxPerfCounterInfo.getKey())) || + ((txPerfCounterInfo != null) && (perfMetrics.get(index).getCounterId() == txPerfCounterInfo.getKey())) ) { + vmNetworkMetrics.add(perfMetrics.get(index)); } } } @@ -4634,23 +4712,26 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa PerfQuerySpec qSpec = new PerfQuerySpec(); qSpec.setEntity(vmMor); PerfMetricId[] availableMetricIds = (PerfMetricId[]) vmNetworkMetrics.toArray(new PerfMetricId[0]); - qSpec.setMetricId(availableMetricIds); - PerfQuerySpec[] qSpecs = new PerfQuerySpec[] {qSpec}; - PerfEntityMetricBase[] values = service.queryPerf(perfMgr, qSpecs); + qSpec.getMetricId().addAll(Arrays.asList(availableMetricIds)); + List qSpecs = new ArrayList(); + qSpecs.add(qSpec); + List values = service.queryPerf(perfMgr, qSpecs); - for(int i=0; i infos = ((PerfEntityMetric)values.get(i)).getSampleInfo(); + int endMs = infos.get(infos.size()-1).getTimestamp().getSecond() * 1000 + infos.get(infos.size()-1).getTimestamp().getMillisecond(); + int beginMs = infos.get(0).getTimestamp().getSecond() * 1000 + infos.get(0).getTimestamp().getMillisecond(); + sampleDuration = (endMs - beginMs) /1000; + List vals = ((PerfEntityMetric)values.get(i)).getValue(); + for(int vi = 0; ((vals!= null) && (vi < vals.size())); ++vi){ + if(vals.get(vi) instanceof PerfMetricIntSeries) { + PerfMetricIntSeries val = (PerfMetricIntSeries)vals.get(vi); + List perfValues = val.getValue(); + if (vals.get(vi).getId().getCounterId() == rxPerfCounterInfo.getKey()) { + networkReadKBs = sampleDuration * perfValues.get(3); //get the average RX rate multiplied by sampled duration } - if (vals[vi].getId().getCounterId() == txPerfCounterInfo.getKey()) { - networkWriteKBs = sampleDuration * perfValues[3];//get the average TX rate multiplied by sampled duration + if (vals.get(vi).getId().getCounterId() == txPerfCounterInfo.getKey()) { + networkWriteKBs = sampleDuration * perfValues.get(3);//get the average TX rate multiplied by sampled duration } } } @@ -4660,8 +4741,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } } - return vmResponseMap; - } + return vmResponseMap; + } protected String networkUsage(final String privateIpAddress, final String option, final String ethName) { String args = null; @@ -4694,7 +4775,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return result.second(); } catch (Throwable e) { - s_logger.error("Unable to execute NetworkUsage command on DomR (" + privateIpAddress + "), domR may not be ready yet. failure due to " + s_logger.error("Unable to execute NetworkUsage command on DomR (" + privateIpAddress + "), domR may not be ready yet. failure due to " + VmwareHelper.getExceptionMessage(e), e); } @@ -4835,8 +4916,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa _morHyperHost = new ManagedObjectReference(); String[] hostTokens = tokens[0].split(":"); _morHyperHost.setType(hostTokens[0]); - _morHyperHost.set_value(hostTokens[1]); + _morHyperHost.setValue(hostTokens[1]); + _guestTrafficInfo = (VmwareTrafficLabel) params.get("guestTrafficInfo"); + _publicTrafficInfo = (VmwareTrafficLabel) params.get("publicTrafficInfo"); VmwareContext context = getServiceContext(); try { VmwareManager mgr = context.getStockObject(VmwareManager.CONTEXT_STOCK_NAME); @@ -4844,12 +4927,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(context, context.getServiceContent().getCustomFieldsManager()); cfmMo.ensureCustomFieldDef("Datastore", CustomFieldConstants.CLOUD_UUID); - if (mgr.getNexusVSwitchGlobalParameter()) { + if (_publicTrafficInfo != null && _publicTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch || + _guestTrafficInfo != null && _guestTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch) { cfmMo.ensureCustomFieldDef("DistributedVirtualPortgroup", CustomFieldConstants.CLOUD_GC_DVP); - } else { - cfmMo.ensureCustomFieldDef("Network", CustomFieldConstants.CLOUD_GC); } - + cfmMo.ensureCustomFieldDef("Network", CustomFieldConstants.CLOUD_GC); cfmMo.ensureCustomFieldDef("VirtualMachine", CustomFieldConstants.CLOUD_UUID); cfmMo.ensureCustomFieldDef("VirtualMachine", CustomFieldConstants.CLOUD_NIC_MASK); @@ -4857,15 +4939,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa _hostName = hostMo.getHyperHostName(); Map vsmCredentials; - if (mgr.getNexusVSwitchGlobalParameter()) { + if (_guestTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch || + _publicTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch) { vsmCredentials = mgr.getNexusVSMCredentialsByClusterId(Long.parseLong(_cluster)); if (vsmCredentials != null) { s_logger.info("Stocking credentials while configuring resource."); context.registerStockObject("vsmcredentials", vsmCredentials); } _privateNetworkVSwitchName = mgr.getPrivateVSwitchName(Long.parseLong(_dcId), HypervisorType.VMware); - _publicNetworkVSwitchName = mgr.getPublicVSwitchName(Long.parseLong(_dcId), HypervisorType.VMware); - _guestNetworkVSwitchName = mgr.getGuestVSwitchName(Long.parseLong(_dcId), HypervisorType.VMware); } } catch (Exception e) { @@ -4874,19 +4955,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if(_privateNetworkVSwitchName == null) { _privateNetworkVSwitchName = (String) params.get("private.network.vswitch.name"); - } - if(_publicNetworkVSwitchName == null) { - _publicNetworkVSwitchName = (String) params.get("public.network.vswitch.name"); - } - if(_guestNetworkVSwitchName == null) { - _guestNetworkVSwitchName = (String) params.get("guest.network.vswitch.name"); } - String value = (String) params.get("cpu.overprovisioning.factor"); - if(value != null) - _cpuOverprovisioningFactor = Float.parseFloat(value); - - value = (String) params.get("vmware.reserve.cpu"); + String value = (String) params.get("vmware.reserve.cpu"); if(value != null && value.equalsIgnoreCase("true")) _reserveCpu = true; @@ -4894,10 +4965,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if(value != null && value.equalsIgnoreCase("true")) _recycleHungWorker = true; - value = (String) params.get("mem.overprovisioning.factor"); - if(value != null) - _memOverprovisioningFactor = Float.parseFloat(value); - value = (String) params.get("vmware.reserve.mem"); if(value != null && value.equalsIgnoreCase("true")) _reserveMem = true; @@ -4908,12 +4975,22 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa else _rootDiskController = DiskControllerType.ide; - value = params.get("vmware.use.nexus.vswitch").toString(); - if(value != null && value.equalsIgnoreCase("true")) - _nexusVSwitch = true; + Integer intObj = (Integer) params.get("ports.per.dvportgroup"); + if (intObj != null) + _portsPerDvPortGroup = intObj.intValue(); - s_logger.info("VmwareResource network configuration info. private vSwitch: " + _privateNetworkVSwitchName + ", public vSwitch: " + _publicNetworkVSwitchName + ", guest network: " - + _guestNetworkVSwitchName); + s_logger.info("VmwareResource network configuration info." + + " private traffic over vSwitch: " + _privateNetworkVSwitchName + ", public traffic over " + + this._publicTrafficInfo.getVirtualSwitchType() + " : " + this._publicTrafficInfo.getVirtualSwitchName() + + ", guest traffic over " + this._guestTrafficInfo.getVirtualSwitchType() + " : " + + this._guestTrafficInfo.getVirtualSwitchName()); + + value = params.get("vmware.create.full.clone").toString(); + if (value != null && value.equalsIgnoreCase("true")) { + _fullCloneFlag = true; + } else { + _fullCloneFlag = false; + } return true; } @@ -4957,7 +5034,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa boolean bRefresh = false; if(firewallMo != null) { HostFirewallInfo firewallInfo = firewallMo.getFirewallInfo(); - if(firewallInfo != null) { + if(firewallInfo != null && firewallInfo.getRuleset() != null) { for(HostFirewallRuleset rule : firewallInfo.getRuleset()) { if("vncServer".equalsIgnoreCase(rule.getKey())) { bRefresh = true; @@ -5012,13 +5089,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa @Override public void setName(String name) { // TODO Auto-generated method stub - + } @Override public void setConfigParams(Map params) { // TODO Auto-generated method stub - + } @Override @@ -5036,6 +5113,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa @Override public void setRunLevel(int level) { // TODO Auto-generated method stub - + } } diff --git a/plugins/hypervisors/vmware/src/com/cloud/network/VmwareTrafficLabel.java b/plugins/hypervisors/vmware/src/com/cloud/network/VmwareTrafficLabel.java new file mode 100644 index 00000000000..90a42781e29 --- /dev/null +++ b/plugins/hypervisors/vmware/src/com/cloud/network/VmwareTrafficLabel.java @@ -0,0 +1,118 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.hypervisor.vmware.mo.VirtualSwitchType; +import com.cloud.network.Networks.TrafficType; + +public class VmwareTrafficLabel implements TrafficLabel { + public static final String DEFAULT_VSWITCH_NAME = "vSwitch0"; + public static final String DEFAULT_DVSWITCH_NAME = "dvSwitch0"; + public static final String DEFAULT_NDVSWITCH_NAME = "epp0"; + public static final int MAX_FIELDS_VMWARE_LABEL = 3; + public static final int VMWARE_LABEL_FIELD_INDEX_NAME = 0; + public static final int VMWARE_LABEL_FIELD_INDEX_VLANID = 1; + public static final int VMWARE_LABEL_FIELD_INDEX_VSWITCH_TYPE = 2; + + TrafficType _trafficType = TrafficType.None; + VirtualSwitchType _vSwitchType = VirtualSwitchType.StandardVirtualSwitch; + String _vSwitchName = DEFAULT_VSWITCH_NAME; + String _vlanId = null; + + public VmwareTrafficLabel(String networkLabel, TrafficType trafficType, VirtualSwitchType defVswitchType) { + _trafficType = trafficType; + _parseLabel(networkLabel, defVswitchType); + } + + public VmwareTrafficLabel(String networkLabel, TrafficType trafficType) { + _trafficType = trafficType; + _parseLabel(networkLabel, VirtualSwitchType.StandardVirtualSwitch); + } + + public VmwareTrafficLabel(TrafficType trafficType, VirtualSwitchType defVswitchType) { + _trafficType = trafficType; // Define traffic label with specific traffic type + _parseLabel(null, defVswitchType); + } + + public VmwareTrafficLabel(TrafficType trafficType) { + _trafficType = trafficType; // Define traffic label with specific traffic type + _parseLabel(null, VirtualSwitchType.StandardVirtualSwitch); + } + + public VmwareTrafficLabel() { + } + + private void _parseLabel(String networkLabel, VirtualSwitchType defVswitchType) { + if (networkLabel == null || networkLabel.isEmpty()) { + // Set defaults for label in case of distributed vSwitch + if (defVswitchType.equals(VirtualSwitchType.VMwareDistributedVirtualSwitch)) { + _vSwitchName = DEFAULT_DVSWITCH_NAME; + _vSwitchType = VirtualSwitchType.VMwareDistributedVirtualSwitch; + } else if (defVswitchType.equals(VirtualSwitchType.NexusDistributedVirtualSwitch)) { + _vSwitchName = DEFAULT_NDVSWITCH_NAME; + _vSwitchType = VirtualSwitchType.NexusDistributedVirtualSwitch; + } + return; + } + String[] tokens = networkLabel.split(","); + if (tokens.length > VMWARE_LABEL_FIELD_INDEX_NAME) { + _vSwitchName = tokens[VMWARE_LABEL_FIELD_INDEX_NAME].trim(); + } + if (tokens.length > VMWARE_LABEL_FIELD_INDEX_VLANID) { + _vlanId = tokens[VMWARE_LABEL_FIELD_INDEX_VLANID].trim(); + } + if (tokens.length > VMWARE_LABEL_FIELD_INDEX_VSWITCH_TYPE) { + _vSwitchType = VirtualSwitchType.getType(tokens[VMWARE_LABEL_FIELD_INDEX_VSWITCH_TYPE].trim()); + if(VirtualSwitchType.None == _vSwitchType) { + throw new InvalidParameterValueException("Invalid virtual switch type : " + tokens[VMWARE_LABEL_FIELD_INDEX_VSWITCH_TYPE].trim()); + } + } + if (tokens.length > MAX_FIELDS_VMWARE_LABEL ) { + throw new InvalidParameterValueException("Found extraneous fields in vmware traffic label : " + networkLabel); + } + } + + @Override + public TrafficType getTrafficType() { + return _trafficType; + } + + @Override + public String getNetworkLabel() { + return null; + } + + public VirtualSwitchType getVirtualSwitchType() { + return _vSwitchType; + } + + public String getVirtualSwitchName() { + return _vSwitchName; + } + + public String getVlanId() { + return _vlanId; + } + public void setVirtualSwitchName(String vSwitchName) { + _vSwitchName = vSwitchName; + } + + public void setVirtualSwitchType(VirtualSwitchType vSwitchType) { + _vSwitchType = vSwitchType; + } +} diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java index fc298c895c8..646ef633fc7 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.storage.resource; @@ -20,45 +20,44 @@ import java.util.HashMap; import java.util.Iterator; import java.util.Map; +import com.cloud.hypervisor.vmware.util.VmwareClient; import com.cloud.hypervisor.vmware.util.VmwareContext; -import com.vmware.apputils.version.ExtendedAppUtil; public class VmwareSecondaryStorageContextFactory { private static volatile int s_seq = 1; - + private static Map s_contextMap = new HashMap(); - + public static void initFactoryEnvironment() { System.setProperty("axis.socketSecureFactory", "org.apache.axis.components.net.SunFakeTrustSocketFactory"); } - + public static VmwareContext create(String vCenterAddress, String vCenterUserName, String vCenterPassword) throws Exception { assert(vCenterAddress != null); assert(vCenterUserName != null); assert(vCenterPassword != null); - + VmwareContext context = null; - + synchronized(s_contextMap) { context = s_contextMap.get(vCenterAddress); if(context == null) { String serviceUrl = "https://" + vCenterAddress + "/sdk/vimService"; - String[] params = new String[] {"--url", serviceUrl, "--username", vCenterUserName, "--password", vCenterPassword }; - ExtendedAppUtil appUtil = ExtendedAppUtil.initialize(vCenterAddress + "-" + s_seq++, params); - - appUtil.connect(); - context = new VmwareContext(appUtil, vCenterAddress); + //String[] params = new String[] {"--url", serviceUrl, "--username", vCenterUserName, "--password", vCenterPassword }; + VmwareClient vimClient = new VmwareClient(vCenterAddress + "-" + s_seq++); + vimClient.connect(serviceUrl, vCenterUserName, vCenterPassword); + context = new VmwareContext(vimClient, vCenterAddress); context.registerStockObject("username", vCenterUserName); context.registerStockObject("password", vCenterPassword); - + s_contextMap.put(vCenterAddress, context); } } - + assert(context != null); return context; } - + public static void invalidate(VmwareContext context) { synchronized(s_contextMap) { for(Iterator> entryIter = s_contextMap.entrySet().iterator(); entryIter.hasNext();) { @@ -68,7 +67,7 @@ public class VmwareSecondaryStorageContextFactory { } } } - + context.close(); } } diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java index 2abed160dfc..566e750c3fe 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package com.cloud.storage.resource; @@ -85,11 +85,11 @@ public class VmwareSecondaryStorageResourceHandler implements SecondaryStorageRe if(cmd.getContextParam("execid") != null) { answer.setContextParam("execid", cmd.getContextParam("execid")); } - + if(cmd.getContextParam("checkpoint") != null) { answer.setContextParam("checkpoint", cmd.getContextParam("checkpoint")); } - + if(cmd.getContextParam("checkpoint2") != null) { answer.setContextParam("checkpoint2", cmd.getContextParam("checkpoint2")); } @@ -219,23 +219,23 @@ public class VmwareSecondaryStorageResourceHandler implements SecondaryStorageRe } morHyperHost.setType(hostTokens[0]); - morHyperHost.set_value(hostTokens[1]); + morHyperHost.setValue(hostTokens[1]); if(morHyperHost.getType().equalsIgnoreCase("HostSystem")) { HostMO hostMo = new HostMO(context, morHyperHost); try { - + ManagedObjectReference mor = hostMo.getHyperHostCluster(); ClusterMO clusterMo = new ClusterMO(hostMo.getContext(), mor); List> hostsInCluster = clusterMo.getClusterHosts(); for(Pair hostPair : hostsInCluster) { HostMO hostIteratorMo = new HostMO(hostMo.getContext(), hostPair.first()); - + VmwareHypervisorHostNetworkSummary netSummary = hostIteratorMo.getHyperHostNetworkSummary( hostIteratorMo.getHostType() == VmwareHostType.ESXi ? cmd.getContextParam("manageportgroup") : cmd.getContextParam("serviceconsole")); _resource.ensureOutgoingRuleForAddress(netSummary.getHostIp()); - + s_logger.info("Setup firewall rule for host: " + netSummary.getHostIp()); } } catch(Throwable e) { @@ -253,7 +253,7 @@ public class VmwareSecondaryStorageResourceHandler implements SecondaryStorageRe public String getWorkerName(VmwareContext context, Command cmd, int workerSequence) { assert(cmd.getContextParam("worker") != null); assert(workerSequence < 2); - + if(workerSequence == 0) return cmd.getContextParam("worker"); return cmd.getContextParam("worker2"); @@ -276,7 +276,7 @@ public class VmwareSecondaryStorageResourceHandler implements SecondaryStorageRe assert(hostTokens.length == 2); morHyperHost.setType(hostTokens[0]); - morHyperHost.set_value(hostTokens[1]); + morHyperHost.setValue(hostTokens[1]); if(morHyperHost.getType().equalsIgnoreCase("HostSystem")) { HostMO hostMo = new HostMO(context, morHyperHost); diff --git a/plugins/hypervisors/xen/pom.xml b/plugins/hypervisors/xen/pom.xml index 0a57afca284..72d32f3029c 100644 --- a/plugins/hypervisors/xen/pom.xml +++ b/plugins/hypervisors/xen/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java index 65a97a8de31..89bc1cf5708 100755 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java @@ -79,9 +79,9 @@ import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceStateAdapter; import com.cloud.resource.ServerResource; import com.cloud.resource.UnableDeleteHostException; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; -import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateHostDao; import com.cloud.user.Account; @@ -315,6 +315,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L params.put("wait", Integer.toString(_wait)); details.put("wait", Integer.toString(_wait)); params.put("migratewait", _configDao.getValue(Config.MigrateWait.toString())); + params.put(Config.XenMaxNics.toString().toLowerCase(), _configDao.getValue(Config.XenMaxNics.toString())); params.put(Config.InstanceName.toString().toLowerCase(), _instance); details.put(Config.InstanceName.toString().toLowerCase(), _instance); try { @@ -426,8 +427,8 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L prodVersion = prodVersion.trim(); } - if(prodBrand.equals("XCP") && (prodVersion.equals("1.0.0") || prodVersion.equals("1.1.0") || prodVersion.equals("5.6.100") || prodVersion.startsWith("1.4") )) - return new XcpServerResource(); + if(prodBrand.equals("XCP") && (prodVersion.equals("1.0.0") || prodVersion.equals("1.1.0") || prodVersion.equals("5.6.100") || prodVersion.startsWith("1.4") || prodVersion.startsWith("1.6"))) + return new XcpServerResource(); if(prodBrand.equals("XenServer") && prodVersion.equals("5.6.0")) return new XenServer56Resource(); @@ -454,7 +455,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L return new XcpOssResource(); } - String msg = "Only support XCP 1.0.0, 1.1.0, 1.5 beta; XenServer 5.6, XenServer 5.6 FP1, XenServer 5.6 SP2, Xenserver 6.0, 6.0.2, 6.1.0 but this one is " + prodBrand + " " + prodVersion; + String msg = "Only support XCP 1.0.0, 1.1.0, 1.4.x, 1.5 beta, 1.6.x; XenServer 5.6, XenServer 5.6 FP1, XenServer 5.6 SP2, Xenserver 6.0, 6.0.2, 6.1.0 but this one is " + prodBrand + " " + prodVersion; _alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, dcId, podId, msg, msg); s_logger.debug(msg); throw new RuntimeException(msg); @@ -582,7 +583,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L String prodBrand = details.get("product_brand").trim(); String prodVersion = details.get("product_version").trim(); - if(prodBrand.equals("XCP") && (prodVersion.equals("1.0.0") || prodVersion.equals("1.1.0") || prodVersion.equals("5.6.100") || prodVersion.startsWith("1.4"))) { + if(prodBrand.equals("XCP") && (prodVersion.equals("1.0.0") || prodVersion.equals("1.1.0") || prodVersion.equals("5.6.100") || prodVersion.startsWith("1.4") || prodVersion.startsWith("1.6"))) { resource = XcpServerResource.class.getName(); } else if(prodBrand.equals("XenServer") && prodVersion.equals("5.6.0")) { resource = XenServer56Resource.class.getName(); @@ -604,7 +605,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L } if( resource == null ){ - String msg = "Only support XCP 1.0.0, 1.1.0, 1.5 beta; XenServer 5.6, 5.6 FP1, 5.6 SP2 and Xenserver 6.0 , 6.0.2, 6.1.0 but this one is " + prodBrand + " " + prodVersion; + String msg = "Only support XCP 1.0.0, 1.1.0, 1.4.x, 1.5 beta, 1.6.x; XenServer 5.6, 5.6 FP1, 5.6 SP2 and Xenserver 6.0 , 6.0.2, 6.1.0 but this one is " + prodBrand + " " + prodVersion; s_logger.debug(msg); throw new RuntimeException(msg); } diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixHelper.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixHelper.java index 1b8496ff287..828a8279f9a 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixHelper.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixHelper.java @@ -576,13 +576,12 @@ public class CitrixHelper { _xenServer610GuestOsMap.put("CentOS 5.6 (64-bit)", "CentOS 5 (64-bit)"); _xenServer610GuestOsMap.put("CentOS 5.7 (32-bit)", "CentOS 5 (32-bit)"); _xenServer610GuestOsMap.put("CentOS 5.7 (64-bit)", "CentOS 5 (64-bit)"); - _xenServer610GuestOsMap.put("CentOS 6.0 (32-bit)", "CentOS 6.0 (32-bit)"); - _xenServer610GuestOsMap.put("CentOS 6.0 (64-bit)", "CentOS 6.0 (64-bit)"); - _xenServer610GuestOsMap.put("CentOS 6.1 (32-bit)", "CentOS 6.1 (32-bit)"); - _xenServer610GuestOsMap.put("CentOS 6.1 (64-bit)", "CentOS 6.1 (64-bit)"); - _xenServer610GuestOsMap.put("CentOS 6.2 (32-bit)", "CentOS 6.2 (32-bit)"); - _xenServer610GuestOsMap.put("CentOS 6.2 (64-bit)", "CentOS 6.2 (64-bit)"); - _xenServer610GuestOsMap.put("Debian GNU/Linux 5.0 (32-bit)", "Debian Lenny 5.0 (32-bit)"); + _xenServer610GuestOsMap.put("CentOS 6.0 (32-bit)", "CentOS 6 (32-bit)"); + _xenServer610GuestOsMap.put("CentOS 6.0 (64-bit)", "CentOS 6 (64-bit)"); + _xenServer610GuestOsMap.put("CentOS 6.1 (32-bit)", "CentOS 6 (32-bit)"); + _xenServer610GuestOsMap.put("CentOS 6.1 (64-bit)", "CentOS 6 (64-bit)"); + _xenServer610GuestOsMap.put("CentOS 6.2 (32-bit)", "CentOS 6 (32-bit)"); + _xenServer610GuestOsMap.put("CentOS 6.2 (64-bit)", "CentOS 6 (64-bit)"); _xenServer610GuestOsMap.put("Debian GNU/Linux 6(32-bit)", "Debian Squeeze 6.0 (32-bit)"); _xenServer610GuestOsMap.put("Debian GNU/Linux 6(64-bit)", "Debian Squeeze 6.0 (64-bit)"); _xenServer610GuestOsMap.put("Oracle Enterprise Linux 5.0 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)"); @@ -601,12 +600,12 @@ public class CitrixHelper { _xenServer610GuestOsMap.put("Oracle Enterprise Linux 5.6 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)"); _xenServer610GuestOsMap.put("Oracle Enterprise Linux 5.7 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)"); _xenServer610GuestOsMap.put("Oracle Enterprise Linux 5.7 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)"); - _xenServer610GuestOsMap.put("Oracle Enterprise Linux 6.0 (32-bit)", "Oracle Enterprise Linux 6.0 (32-bit)"); - _xenServer610GuestOsMap.put("Oracle Enterprise Linux 6.0 (64-bit)", "Oracle Enterprise Linux 6.0 (64-bit)"); - _xenServer610GuestOsMap.put("Oracle Enterprise Linux 6.1 (32-bit)", "Oracle Enterprise Linux 6.1 (32-bit)"); - _xenServer610GuestOsMap.put("Oracle Enterprise Linux 6.1 (64-bit)", "Oracle Enterprise Linux 6.1 (64-bit)"); - _xenServer610GuestOsMap.put("Oracle Enterprise Linux 6.2 (32-bit)", "Oracle Enterprise Linux 6.2 (32-bit)"); - _xenServer610GuestOsMap.put("Oracle Enterprise Linux 6.2 (64-bit)", "Oracle Enterprise Linux 6.2 (64-bit)"); + _xenServer610GuestOsMap.put("Oracle Enterprise Linux 6.0 (32-bit)", "Oracle Enterprise Linux 6 (32-bit)"); + _xenServer610GuestOsMap.put("Oracle Enterprise Linux 6.0 (64-bit)", "Oracle Enterprise Linux 6 (64-bit)"); + _xenServer610GuestOsMap.put("Oracle Enterprise Linux 6.1 (32-bit)", "Oracle Enterprise Linux 6 (32-bit)"); + _xenServer610GuestOsMap.put("Oracle Enterprise Linux 6.1 (64-bit)", "Oracle Enterprise Linux 6 (64-bit)"); + _xenServer610GuestOsMap.put("Oracle Enterprise Linux 6.2 (32-bit)", "Oracle Enterprise Linux 6 (32-bit)"); + _xenServer610GuestOsMap.put("Oracle Enterprise Linux 6.2 (64-bit)", "Oracle Enterprise Linux 6 (64-bit)"); _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 4.5 (32-bit)", "Red Hat Enterprise Linux 4.5 (32-bit)"); _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 4.6 (32-bit)", "Red Hat Enterprise Linux 4.6 (32-bit)"); _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 4.7 (32-bit)", "Red Hat Enterprise Linux 4.7 (32-bit)"); @@ -627,12 +626,12 @@ public class CitrixHelper { _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 5.6 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)"); _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 5.7 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)"); _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 5.7 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)"); - _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 6.0 (32-bit)", "Red Hat Enterprise Linux 6.0 (32-bit)"); - _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 6.0 (64-bit)", "Red Hat Enterprise Linux 6.0 (64-bit)"); - _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 6.1 (32-bit)", "Red Hat Enterprise Linux 6.1 (32-bit)"); - _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 6.1 (64-bit)", "Red Hat Enterprise Linux 6.1 (64-bit)"); - _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 6.2 (32-bit)", "Red Hat Enterprise Linux 6.2 (32-bit)"); - _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 6.2 (64-bit)", "Red Hat Enterprise Linux 6.2 (64-bit)"); + _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 6.0 (32-bit)", "Red Hat Enterprise Linux 6 (32-bit)"); + _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 6.0 (64-bit)", "Red Hat Enterprise Linux 6 (64-bit)"); + _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 6.1 (32-bit)", "Red Hat Enterprise Linux 6 (32-bit)"); + _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 6.1 (64-bit)", "Red Hat Enterprise Linux 6 (64-bit)"); + _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 6.2 (32-bit)", "Red Hat Enterprise Linux 6 (32-bit)"); + _xenServer610GuestOsMap.put("Red Hat Enterprise Linux 6.2 (64-bit)", "Red Hat Enterprise Linux 6 (64-bit)"); _xenServer610GuestOsMap.put("SUSE Linux Enterprise Server 9 SP4 (32-bit)", "SUSE Linux Enterprise Server 10 SP1 (32-bit)"); _xenServer610GuestOsMap.put("SUSE Linux Enterprise Server 10 SP1 (32-bit)", "SUSE Linux Enterprise Server 10 SP1 (32-bit)"); _xenServer610GuestOsMap.put("SUSE Linux Enterprise Server 10 SP1 (64-bit)", "SUSE Linux Enterprise Server 10 SP1 (64-bit)"); @@ -662,13 +661,15 @@ public class CitrixHelper { _xenServer610GuestOsMap.put("Windows Server 2008 (32-bit)", "Windows Server 2008 (32-bit)"); _xenServer610GuestOsMap.put("Windows Server 2008 (64-bit)", "Windows Server 2008 (64-bit)"); _xenServer610GuestOsMap.put("Windows Server 2008 R2 (64-bit)", "Windows Server 2008 R2 (64-bit)"); - _xenServer610GuestOsMap.put("Windows Server 8 (64-bit)", "Windows Server 8 (64-bit) (experimental)"); + _xenServer610GuestOsMap.put("Windows Server 8 (64-bit)", "Windows Server 2012 (64-bit) (experimental)"); _xenServer610GuestOsMap.put("Windows Vista (32-bit)", "Windows Vista (32-bit)"); _xenServer610GuestOsMap.put("Windows XP SP3 (32-bit)", "Windows XP SP3 (32-bit)"); _xenServer610GuestOsMap.put("Ubuntu 10.04 (32-bit)", "Ubuntu Lucid Lynx 10.04 (32-bit)"); _xenServer610GuestOsMap.put("Ubuntu 10.04 (64-bit)", "Ubuntu Lucid Lynx 10.04 (64-bit)"); _xenServer610GuestOsMap.put("Ubuntu 10.10 (32-bit)", "Ubuntu Maverick Meerkat 10.10 (32-bit) (experimental)"); _xenServer610GuestOsMap.put("Ubuntu 10.10 (64-bit)", "Ubuntu Maverick Meerkat 10.10 (64-bit) (experimental)"); + _xenServer610GuestOsMap.put("Ubuntu 12.04 (32-bit)", "Ubuntu Precise Pangolin 12.04 (32-bit)"); + _xenServer610GuestOsMap.put("Ubuntu 12.04 (64-bit)", "Ubuntu Precise Pangolin 12.04 (64-bit)"); _xenServer610GuestOsMap.put("Other Linux (32-bit)", "Other install media"); _xenServer610GuestOsMap.put("Other Linux (64-bit)", "Other install media"); _xenServer610GuestOsMap.put("Other (32-bit)", "Other install media"); diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java index 22f4ba9cb80..f0cf2f05bf0 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java @@ -118,6 +118,7 @@ import com.cloud.agent.api.ModifySshKeysCommand; import com.cloud.agent.api.ModifyStoragePoolAnswer; import com.cloud.agent.api.ModifyStoragePoolCommand; import com.cloud.agent.api.NetworkRulesSystemVmCommand; +import com.cloud.agent.api.NetworkRulesVmSecondaryIpCommand; import com.cloud.agent.api.PingCommand; import com.cloud.agent.api.PingRoutingCommand; import com.cloud.agent.api.PingRoutingWithNwGroupsCommand; @@ -331,6 +332,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe protected boolean _isOvs = false; protected List _tmpDom0Vif = new ArrayList(); protected XenServerStorageResource storageResource; + protected int _maxNics = 7; public enum SRType { NFS, LVM, ISCSI, ISO, LVMOISCSI, LVMOHBA, EXT, FILE; @@ -596,6 +598,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe return execute((DeleteVMSnapshotCommand)cmd); } else if (clazz == RevertToVMSnapshotCommand.class) { return execute((RevertToVMSnapshotCommand)cmd); + } else if (clazz == NetworkRulesVmSecondaryIpCommand.class) { + return execute((NetworkRulesVmSecondaryIpCommand)cmd); } else { return Answer.createUnsupportedCommandAnswer(cmd); } @@ -1113,13 +1117,13 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe vm.setAffinity(conn, host); vm.removeFromOtherConfig(conn, "disks"); vm.setNameLabel(conn, vmSpec.getName()); - setMemory(conn, vm, vmSpec.getMinRam()); + setMemory(conn, vm, vmSpec.getMinRam(),vmSpec.getMaxRam()); vm.setVCPUsMax(conn, (long)vmSpec.getCpus()); vm.setVCPUsAtStartup(conn, (long)vmSpec.getCpus()); Map vcpuParams = new HashMap(); - Integer speed = vmSpec.getSpeed(); + Integer speed = vmSpec.getMinSpeed(); if (speed != null) { int cpuWeight = _maxWeight; //cpu_weight @@ -1467,7 +1471,18 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe for (NicTO nic : nics) { if ( nic.isSecurityGroupEnabled() || nic.getIsolationUri() != null && nic.getIsolationUri().getScheme().equalsIgnoreCase(IsolationType.Ec2.toString())) { - result = callHostPlugin(conn, "vmops", "default_network_rules", "vmName", vmName, "vmIP", nic.getIp(), "vmMAC", nic.getMac(), "vmID", Long.toString(vmSpec.getId())); + List nicSecIps = nic.getNicSecIps(); + String secIpsStr; + StringBuilder sb = new StringBuilder(); + if (nicSecIps != null) { + for (String ip : nicSecIps) { + sb.append(ip).append(":"); + } + secIpsStr = sb.toString(); + } else { + secIpsStr = "0:"; + } + result = callHostPlugin(conn, "vmops", "default_network_rules", "vmName", vmName, "vmIP", nic.getIp(), "vmMAC", nic.getMac(), "vmID", Long.toString(vmSpec.getId()), "secIps", secIpsStr); if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) { s_logger.warn("Failed to program default network rules for " + vmName+" on nic with ip:"+nic.getIp()+" mac:"+nic.getMac()); @@ -1889,6 +1904,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe args += " -u " + cmd.getDuid(); } + if (!cmd.isDefault()) { + args += " -z"; + } + String result = callHostPlugin(conn, "vmops", "saveDhcpEntry", "args", args); if (result == null || result.isEmpty()) { return new Answer(cmd, false, "DhcpEntry failed"); @@ -3252,8 +3271,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } } - protected void setMemory(Connection conn, VM vm, long memsize) throws XmlRpcException, XenAPIException { - vm.setMemoryLimits(conn, memsize, memsize, memsize, memsize); + protected void setMemory(Connection conn, VM vm, long minMemsize, long maxMemsize) throws XmlRpcException, XenAPIException { + vm.setMemoryLimits(conn, maxMemsize, maxMemsize, minMemsize, maxMemsize); } private void waitForTask(Connection c, Task task, long pollInterval, long timeout) throws XenAPIException, XmlRpcException { @@ -3842,22 +3861,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe throw new CloudRuntimeException("Could not find an available slot in VM with name to attach a new disk."); } - - protected String getUnusedVIFNum(Connection conn, VM vm) { - String vmName = ""; - try { - vmName = vm.getNameLabel(conn); - Set allowedVIFDevices = vm.getAllowedVIFDevices(conn); - if (allowedVIFDevices.size() > 0) { - return allowedVIFDevices.iterator().next(); - } - } catch (Exception e) { - String msg = "getUnusedVIFNum failed due to " + e.toString(); - s_logger.warn(msg, e); - } - throw new CloudRuntimeException("Could not find available VIF slot in VM with name: " + vmName + " to plug a VIF"); - } - protected String callHostPlugin(Connection conn, String plugin, String cmd, String... params) { Map args = new HashMap(); String msg; @@ -3989,22 +3992,29 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } protected String getLowestAvailableVIFDeviceNum(Connection conn, VM vm) { + String vmName = ""; try { - Set availableDeviceNums = vm.getAllowedVIFDevices(conn); - Iterator deviceNumsIterator = availableDeviceNums.iterator(); - List sortedDeviceNums = new ArrayList(); - - while (deviceNumsIterator.hasNext()) { - try { - sortedDeviceNums.add(Integer.valueOf(deviceNumsIterator.next())); + vmName = vm.getNameLabel(conn); + List usedDeviceNums = new ArrayList(); + Set vifs = vm.getVIFs(conn); + Iterator vifIter = vifs.iterator(); + while(vifIter.hasNext()){ + VIF vif = vifIter.next(); + try{ + usedDeviceNums.add(Integer.valueOf(vif.getDevice(conn))); } catch (NumberFormatException e) { - s_logger.debug("Obtained an invalid value for an available VIF device number for VM: " + vm.getNameLabel(conn)); - return null; + String msg = "Obtained an invalid value for an allocated VIF device number for VM: " + vmName; + s_logger.debug(msg, e); + throw new CloudRuntimeException(msg); } } - Collections.sort(sortedDeviceNums); - return String.valueOf(sortedDeviceNums.get(0)); + for(Integer i=0; i< _maxNics; i++){ + if(!usedDeviceNums.contains(i)){ + s_logger.debug("Lowest available Vif device number: "+i+" for VM: " + vmName); + return i.toString(); + } + } } catch (XmlRpcException e) { String msg = "Caught XmlRpcException: " + e.getMessage(); s_logger.warn(msg, e); @@ -4013,7 +4023,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe s_logger.warn(msg, e); } - return null; + throw new CloudRuntimeException("Could not find available VIF slot in VM with name: " + vmName); } protected VDI mount(Connection conn, StoragePoolType pooltype, String volumeFolder, String volumePath) { @@ -5458,7 +5468,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe "signature", cmd.getSignature(), "seqno", Long.toString(cmd.getSeqNum()), "deflated", "true", - "rules", cmd.compressStringifiedRules()); + "rules", cmd.compressStringifiedRules(), + "secIps", cmd.getSecIpsString()); if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) { s_logger.warn("Failed to program network rules for vm " + cmd.getVmName()); @@ -5655,6 +5666,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe value = (String) params.get("migratewait"); _migratewait = NumbersUtil.parseInt(value, 3600); + _maxNics = NumbersUtil.parseInt((String) params.get("xen.nics.max"), 7); + if (_pod == null) { throw new ConfigurationException("Unable to get the pod"); } @@ -7508,6 +7521,19 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe return new Answer(cmd, success, ""); } + private Answer execute(NetworkRulesVmSecondaryIpCommand cmd) { + boolean success = true; + Connection conn = getConnection(); + + String result = callHostPlugin(conn, "vmops", "network_rules_vmSecondaryIp", "vmName", cmd.getVmName(), "vmMac", cmd.getVmMac(), "vmSecIp", cmd.getVmSecIp(), "action", + cmd.getAction()); + if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) { + success = false; + } + + return new Answer(cmd, success, ""); + } + protected SetFirewallRulesAnswer execute(SetFirewallRulesCommand cmd) { String[] results = new String[cmd.getRules().length]; String callResult; @@ -7765,7 +7791,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe s_logger.warn(msg); return new PlugNicAnswer(cmd, false, msg); } - String deviceId = getUnusedVIFNum(conn, vm); + String deviceId = getLowestAvailableVIFDeviceNum(conn, vm); nic.setDeviceId(Integer.parseInt(deviceId)); vif = createVif(conn, vmName, vm, nic); vif.plug(conn); diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpServerResource.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpServerResource.java index 0ce91bc58e2..7a958708e76 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpServerResource.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpServerResource.java @@ -72,17 +72,17 @@ public class XcpServerResource extends CitrixResourceBase { } @Override - protected void setMemory(Connection conn, VM vm, long memsize) throws XmlRpcException, XenAPIException { + protected void setMemory(Connection conn, VM vm, long minMemsize, long maxMemsize) throws XmlRpcException, XenAPIException { vm.setMemoryStaticMin(conn, 33554432L); - vm.setMemoryDynamicMin(conn, 33554432L); - vm.setMemoryDynamicMax(conn, 33554432L); + //vm.setMemoryDynamicMin(conn, 33554432L); + //vm.setMemoryDynamicMax(conn, 33554432L); vm.setMemoryStaticMax(conn, 33554432L); - vm.setMemoryStaticMax(conn, memsize); - vm.setMemoryDynamicMax(conn, memsize); - vm.setMemoryDynamicMin(conn, memsize); - vm.setMemoryStaticMin(conn, memsize); + //vm.setMemoryStaticMax(conn, maxMemsize ); + vm.setMemoryDynamicMax(conn, maxMemsize ); + vm.setMemoryDynamicMin(conn, minMemsize ); + //vm.setMemoryStaticMin(conn, maxMemsize ); } @@ -99,7 +99,7 @@ public class XcpServerResource extends CitrixResourceBase { return answer; } catch (Exception ex) { s_logger.warn("Failed to get network usage stats due to ", ex); - return new NetworkUsageAnswer(cmd, ex); + return new NetworkUsageAnswer(cmd, ex); } } diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java index 58b8a035171..7040311d04e 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java @@ -136,9 +136,9 @@ public class XenServer56FP1Resource extends XenServer56Resource { record.nameLabel = vmSpec.getName(); record.actionsAfterCrash = Types.OnCrashBehaviour.DESTROY; record.actionsAfterShutdown = Types.OnNormalExit.DESTROY; - record.memoryDynamicMax = vmSpec.getMinRam(); + record.memoryDynamicMax = vmSpec.getMaxRam(); record.memoryDynamicMin = vmSpec.getMinRam(); - record.memoryStaticMax = vmSpec.getMinRam(); + record.memoryStaticMax = vmSpec.getMaxRam(); record.memoryStaticMin = vmSpec.getMinRam(); record.VCPUsMax = (long) vmSpec.getCpus(); record.VCPUsAtStartup = (long) vmSpec.getCpus(); @@ -152,7 +152,7 @@ public class XenServer56FP1Resource extends XenServer56Resource { Map vcpuParams = new HashMap(); - Integer speed = vmSpec.getSpeed(); + Integer speed = vmSpec.getMinSpeed(); if (speed != null) { int cpuWeight = _maxWeight; // cpu_weight diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer610Resource.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer610Resource.java index 17ad3e62383..8d267b114fa 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer610Resource.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer610Resource.java @@ -40,7 +40,7 @@ public class XenServer610Resource extends XenServer56FP1Resource { @Override protected String getGuestOsType(String stdType, boolean bootFromCD) { - return CitrixHelper.getXenServer602GuestOsType(stdType, bootFromCD); + return CitrixHelper.getXenServer610GuestOsType(stdType, bootFromCD); } @Override diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageResource.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageResource.java index 70660d2bb69..9c291491114 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageResource.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageResource.java @@ -144,6 +144,7 @@ public class XenServerStorageResource { try { obj = Decoder.decode(uriString); + DecodedDataStore store = obj.getStore(); if (obj.getObjType().equalsIgnoreCase("template") && store.getRole().equalsIgnoreCase("image")) { return getTemplateSize(cmd, obj.getPath()); @@ -224,6 +225,7 @@ public class XenServerStorageResource { } protected SR getNfsSR(Connection conn, DecodedDataStore store) { + Map deviceConfig = new HashMap(); String uuid = store.getUuid(); @@ -410,6 +412,7 @@ public class XenServerStorageResource { try { DecodedDataObject obj = Decoder.decode(storeUrl); DecodedDataStore store = obj.getStore(); + if (store.getScheme().equalsIgnoreCase("nfs")) { SR sr = getNfsSR(conn, store); } else if (store.getScheme().equalsIgnoreCase("iscsi")) { @@ -570,7 +573,9 @@ public class XenServerStorageResource { Connection conn = hypervisorResource.getConnection(); try { DecodedDataObject obj = Decoder.decode(dataStoreUri); + DecodedDataStore store = obj.getStore(); + SR sr = hypervisorResource.getStorageRepository(conn, store.getUuid()); hypervisorResource.setupHeartbeatSr(conn, sr, false); long capacity = sr.getPhysicalSize(conn); diff --git a/plugins/network-elements/bigswitch-vns/pom.xml b/plugins/network-elements/bigswitch-vns/pom.xml index 32650f31497..95a7692ce75 100644 --- a/plugins/network-elements/bigswitch-vns/pom.xml +++ b/plugins/network-elements/bigswitch-vns/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/dns-notifier/pom.xml b/plugins/network-elements/dns-notifier/pom.xml index ea35d788653..1dea4b933d1 100644 --- a/plugins/network-elements/dns-notifier/pom.xml +++ b/plugins/network-elements/dns-notifier/pom.xml @@ -22,7 +22,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml org.apache.cloudstack diff --git a/plugins/network-elements/elastic-loadbalancer/pom.xml b/plugins/network-elements/elastic-loadbalancer/pom.xml index dac500d8fd2..4d02a61e93e 100644 --- a/plugins/network-elements/elastic-loadbalancer/pom.xml +++ b/plugins/network-elements/elastic-loadbalancer/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/f5/pom.xml b/plugins/network-elements/f5/pom.xml index bf40332cfbb..d0f8133f2b4 100644 --- a/plugins/network-elements/f5/pom.xml +++ b/plugins/network-elements/f5/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/juniper-srx/pom.xml b/plugins/network-elements/juniper-srx/pom.xml index 6040720da6e..28f2c29eda7 100644 --- a/plugins/network-elements/juniper-srx/pom.xml +++ b/plugins/network-elements/juniper-srx/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/netscaler/pom.xml b/plugins/network-elements/netscaler/pom.xml index b11009d8b1a..1eb73a236dc 100644 --- a/plugins/network-elements/netscaler/pom.xml +++ b/plugins/network-elements/netscaler/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java index 8f902df703f..c1c735aa270 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java +++ b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java @@ -638,14 +638,11 @@ StaticNatServiceProvider { @Override public IpDeployer getIpDeployer(Network network) { - ExternalLoadBalancerDeviceVO lbDevice = getExternalLoadBalancerForNetwork(network); - if (lbDevice == null) { - s_logger.error("Cannot find external load balanacer for network " + network.getName()); - return null; - } + if (_networkMgr.isNetworkInlineMode(network)) { return getIpDeployerForInlineMode(network); } + return this; } diff --git a/plugins/network-elements/nicira-nvp/pom.xml b/plugins/network-elements/nicira-nvp/pom.xml index 70f85607e4a..4e05a4f9fae 100644 --- a/plugins/network-elements/nicira-nvp/pom.xml +++ b/plugins/network-elements/nicira-nvp/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/nicira-nvp/src/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java b/plugins/network-elements/nicira-nvp/src/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java index 3ba6167a47d..b78d165ddd6 100644 --- a/plugins/network-elements/nicira-nvp/src/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java +++ b/plugins/network-elements/nicira-nvp/src/com/cloud/network/guru/NiciraNvpGuestNetworkGuru.java @@ -151,7 +151,12 @@ public class NiciraNvpGuestNetworkGuru extends GuestNetworkGuru { long dcId = dest.getDataCenter().getId(); //get physical network id - long physicalNetworkId = _networkModel.findPhysicalNetworkId(dcId, offering.getTags(), offering.getTrafficType()); + Long physicalNetworkId = network.getPhysicalNetworkId(); + + // physical network id can be null in Guest Network in Basic zone, so locate the physical network + if (physicalNetworkId == null) { + physicalNetworkId = _networkModel.findPhysicalNetworkId(dcId, offering.getTags(), offering.getTrafficType()); + } NetworkVO implemented = new NetworkVO(network.getTrafficType(), network.getMode(), network.getBroadcastDomainType(), network.getNetworkOfferingId(), State.Allocated, network.getDataCenterId(), physicalNetworkId); diff --git a/plugins/network-elements/nicira-nvp/test/com/cloud/network/guru/NiciraNvpGuestNetworkGuruTest.java b/plugins/network-elements/nicira-nvp/test/com/cloud/network/guru/NiciraNvpGuestNetworkGuruTest.java index f86e705336c..0e4f8fd4f84 100644 --- a/plugins/network-elements/nicira-nvp/test/com/cloud/network/guru/NiciraNvpGuestNetworkGuruTest.java +++ b/plugins/network-elements/nicira-nvp/test/com/cloud/network/guru/NiciraNvpGuestNetworkGuruTest.java @@ -252,6 +252,7 @@ public class NiciraNvpGuestNetworkGuruTest { NetworkVO network = mock(NetworkVO.class); when(network.getName()).thenReturn("testnetwork"); when(network.getState()).thenReturn(State.Implementing); + when(network.getPhysicalNetworkId()).thenReturn(42L); DeployDestination dest = mock(DeployDestination.class); @@ -308,7 +309,7 @@ public class NiciraNvpGuestNetworkGuruTest { when(network.getState()).thenReturn(State.Implementing); when(network.getGateway()).thenReturn("10.1.1.1"); when(network.getCidr()).thenReturn("10.1.1.0/24"); - + when(network.getPhysicalNetworkId()).thenReturn(42L); DeployDestination dest = mock(DeployDestination.class); @@ -365,6 +366,7 @@ public class NiciraNvpGuestNetworkGuruTest { NetworkVO network = mock(NetworkVO.class); when(network.getName()).thenReturn("testnetwork"); when(network.getState()).thenReturn(State.Implementing); + when(network.getPhysicalNetworkId()).thenReturn(42L); DeployDestination dest = mock(DeployDestination.class); diff --git a/plugins/network-elements/ovs/pom.xml b/plugins/network-elements/ovs/pom.xml index ab7ffab8465..7964b931e19 100644 --- a/plugins/network-elements/ovs/pom.xml +++ b/plugins/network-elements/ovs/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/parent/pom.xml b/plugins/parent/pom.xml deleted file mode 100644 index 3a0bf3ce3cf..00000000000 --- a/plugins/parent/pom.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - 4.0.0 - cloud-plugin-parent - Apache CloudStack Plugin POM - pom - - com.cloud - cloud-parent - 4.0.0-SNAPSHOT - ../../parent/pom.xml - - - - com.cloud - cloud-server - ${project.version} - - - - install - src - - diff --git a/plugins/pom.xml b/plugins/pom.xml index 02459b4c1b5..88f617b4560 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT install diff --git a/plugins/storage-allocators/random/pom.xml b/plugins/storage-allocators/random/pom.xml index b476d1de49f..6b91908271a 100644 --- a/plugins/storage-allocators/random/pom.xml +++ b/plugins/storage-allocators/random/pom.xml @@ -16,14 +16,22 @@ specific language governing permissions and limitations under the License. --> - + 4.0.0 cloud-plugin-storage-allocator-random Apache CloudStack Plugin - Storage Allocator Random org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml + + + org.apache.cloudstack + cloud-engine-storage + ${project.version} + + diff --git a/plugins/storage-allocators/random/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java b/plugins/storage-allocators/random/src/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java similarity index 73% rename from plugins/storage-allocators/random/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java rename to plugins/storage-allocators/random/src/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java index 812867ee69d..cbe6647ded8 100644 --- a/plugins/storage-allocators/random/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java +++ b/plugins/storage-allocators/random/src/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package com.cloud.storage.allocator; +package org.apache.cloudstack.storage.allocator; import java.util.ArrayList; import java.util.Collections; @@ -21,44 +21,32 @@ import java.util.List; import javax.ejb.Local; +import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner.ExcludeList; -import com.cloud.server.StatsCollector; import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolVO; -import com.cloud.storage.VMTemplateVO; import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; -@Component @Local(value=StoragePoolAllocator.class) public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator { private static final Logger s_logger = Logger.getLogger(RandomStoragePoolAllocator.class); @Override - public boolean allocatorIsCorrectType(DiskProfile dskCh) { - return true; - } - - @Override - public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { + public List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { List suitablePools = new ArrayList(); - VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate(); - // Check that the allocator type is correct - if (!allocatorIsCorrectType(dskCh)) { - return suitablePools; - } long dcId = plan.getDataCenterId(); Long podId = plan.getPodId(); Long clusterId = plan.getClusterId(); s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); - List pools = _storagePoolDao.listBy(dcId, podId, clusterId); + List pools = _storagePoolDao.listBy(dcId, podId, clusterId, ScopeType.CLUSTER); if (pools.size() == 0) { if (s_logger.isDebugEnabled()) { s_logger.debug("No storage pools available for allocation, returning"); @@ -66,8 +54,6 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator { return suitablePools; } - StatsCollector sc = StatsCollector.getInstance(); - Collections.shuffle(pools); if (s_logger.isDebugEnabled()) { s_logger.debug("RandomStoragePoolAllocator has " + pools.size() + " pools to check for allocation"); @@ -76,8 +62,10 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator { if(suitablePools.size() == returnUpTo){ break; } - if (checkPool(avoid, pool, dskCh, template, null, sc, plan)) { - suitablePools.add(pool); + StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()); + + if (filter(avoid, pol, dskCh, plan)) { + suitablePools.add(pol); } } diff --git a/plugins/storage/image/s3/pom.xml b/plugins/storage/image/s3/pom.xml index 4ea6517527b..7ab0d3e9301 100644 --- a/plugins/storage/image/s3/pom.xml +++ b/plugins/storage/image/s3/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../../pom.xml diff --git a/plugins/storage/volume/solidfire/pom.xml b/plugins/storage/volume/solidfire/pom.xml index cbbc54c368d..9db0685e91b 100644 --- a/plugins/storage/volume/solidfire/pom.xml +++ b/plugins/storage/volume/solidfire/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../../pom.xml diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java index 3244c7aa4ed..f31126c2aeb 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java @@ -24,9 +24,9 @@ import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.storage.snapshot.SnapshotInfo; -import org.apache.cloudstack.storage.volume.PrimaryDataStoreDriver; public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { @@ -72,17 +72,25 @@ public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { return false; } - @Override - public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { - // TODO Auto-generated method stub - - } - @Override public void revertSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { // TODO Auto-generated method stub } + @Override + public void resize(DataObject data, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } + + @Override + public void takeSnapshot(SnapshotInfo snapshot, + AsyncCompletionCallback callback) { + // TODO Auto-generated method stub + + } + } diff --git a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java index 6f0b2e73d3a..91c446fe5ae 100644 --- a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java +++ b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/VolumeTest.java @@ -79,7 +79,7 @@ public class VolumeTest { public void setUp() { //create data center DataCenterVO dc = new DataCenterVO(UUID.randomUUID().toString(), "test", "8.8.8.8", null, "10.0.0.1", null, "10.0.0.1/24", - null, null, NetworkType.Basic, null, null, true, true); + null, null, NetworkType.Basic, null, null, true, true, null, null); dc = dcDao.persist(dc); dcId = dc.getId(); //create pod @@ -148,4 +148,4 @@ public class VolumeTest { public void createPrimaryDataStoreTest() { createPrimaryDataStore(); } -} \ No newline at end of file +} diff --git a/plugins/user-authenticators/ldap/pom.xml b/plugins/user-authenticators/ldap/pom.xml index 05e9466d825..5c45f1177b6 100644 --- a/plugins/user-authenticators/ldap/pom.xml +++ b/plugins/user-authenticators/ldap/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/user-authenticators/ldap/src/com/cloud/server/auth/LDAPUserAuthenticator.java b/plugins/user-authenticators/ldap/src/com/cloud/server/auth/LDAPUserAuthenticator.java index fb0273e6ea3..61eebe5fc93 100644 --- a/plugins/user-authenticators/ldap/src/com/cloud/server/auth/LDAPUserAuthenticator.java +++ b/plugins/user-authenticators/ldap/src/com/cloud/server/auth/LDAPUserAuthenticator.java @@ -66,7 +66,7 @@ public class LDAPUserAuthenticator extends DefaultUserAuthenticator { String port = _configDao.getValue(LDAPParams.port.toString()); String queryFilter = _configDao.getValue(LDAPParams.queryfilter.toString()); String searchBase = _configDao.getValue(LDAPParams.searchbase.toString()); - String useSSL = _configDao.getValue(LDAPParams.usessl.toString()); + Boolean useSSL = Boolean.valueOf(_configDao.getValue(LDAPParams.usessl.toString())); String bindDN = _configDao.getValue(LDAPParams.dn.toString()); String bindPasswd = _configDao.getValue(LDAPParams.passwd.toString()); String trustStore = _configDao.getValue(LDAPParams.truststore.toString()); @@ -77,7 +77,7 @@ public class LDAPUserAuthenticator extends DefaultUserAuthenticator { Hashtable env = new Hashtable(11); env.put(Context.INITIAL_CONTEXT_FACTORY,"com.sun.jndi.ldap.LdapCtxFactory"); String protocol = "ldap://" ; - if (new Boolean(useSSL)){ + if (useSSL){ env.put(Context.SECURITY_PROTOCOL, "ssl"); protocol="ldaps://" ; System.setProperty("javax.net.ssl.trustStore", trustStore); @@ -123,7 +123,7 @@ public class LDAPUserAuthenticator extends DefaultUserAuthenticator { env = new Hashtable(11); env.put(Context.INITIAL_CONTEXT_FACTORY,"com.sun.jndi.ldap.LdapCtxFactory"); protocol = "ldap://" ; - if (new Boolean(useSSL)){ + if (useSSL){ env.put(Context.SECURITY_PROTOCOL, "ssl"); protocol="ldaps://" ; } @@ -135,8 +135,7 @@ public class LDAPUserAuthenticator extends DefaultUserAuthenticator { ctx.close(); } catch (NamingException ne) { - ne.printStackTrace(); - s_logger.warn("Authentication failed due to " + ne.getMessage()); + s_logger.warn("Authentication Failed ! " + ne.getMessage() + (ne.getCause() != null ? ("; Caused by:" + ne.getCause().getMessage()) : "")); return false; } catch (Exception e){ diff --git a/plugins/user-authenticators/md5/pom.xml b/plugins/user-authenticators/md5/pom.xml index f358f8f1c21..605014ff953 100644 --- a/plugins/user-authenticators/md5/pom.xml +++ b/plugins/user-authenticators/md5/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/user-authenticators/plain-text/pom.xml b/plugins/user-authenticators/plain-text/pom.xml index 6406fa92489..60336ebb22d 100644 --- a/plugins/user-authenticators/plain-text/pom.xml +++ b/plugins/user-authenticators/plain-text/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/plugins/user-authenticators/sha256salted/pom.xml b/plugins/user-authenticators/sha256salted/pom.xml index 3f530f76e17..22e97632e3d 100644 --- a/plugins/user-authenticators/sha256salted/pom.xml +++ b/plugins/user-authenticators/sha256salted/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../../pom.xml diff --git a/pom.xml b/pom.xml index 820e9380cf1..86482d7a77b 100644 --- a/pom.xml +++ b/pom.xml @@ -28,7 +28,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT pom Apache CloudStack Apache CloudStack is an IaaS (“Infrastracture as a Serviceâ€) cloud orchestration platform. @@ -82,14 +82,16 @@ 2.4 1.2 1.0-20081010.060147 + 5.1 3.1.2.RELEASE - 4.1 1.9.5 1.3.21.1 2.6 1.4 0.9.8 - 0.8 + 0.10 + build/replace.properties + 0.4.9 @@ -161,9 +163,9 @@ usage utils deps/XenServerJava + engine plugins patches - engine framework services test @@ -343,6 +345,7 @@ **/*.patch **/.classpath **/.project + **/.idea/** **/*.iml **/.settings/** .metadata/** @@ -360,6 +363,7 @@ **/*.zip **/target/** **/.vagrant + awsapi/overlays/** build/build.number services/console-proxy/server/js/jquery.js debian/compat @@ -377,8 +381,15 @@ tools/appliance/definitions/systemvmtemplate/definition.rb tools/appliance/definitions/systemvmtemplate/preseed.cfg tools/appliance/definitions/systemvmtemplate/zerodisk.sh + tools/appliance/definitions/systemvmtemplate64/base.sh + tools/appliance/definitions/systemvmtemplate64/cleanup.sh + tools/appliance/definitions/systemvmtemplate64/definition.rb + tools/appliance/definitions/systemvmtemplate64/preseed.cfg + tools/appliance/definitions/systemvmtemplate64/zerodisk.sh + tools/cli/cloudmonkey.egg-info/* tools/devcloud/src/deps/boxes/basebox-build/definition.rb tools/devcloud/src/deps/boxes/basebox-build/preseed.cfg + tools/marvin/Marvin.egg-info/* ui/lib/flot/jquery.colorhelpers.js ui/lib/flot/jquery.flot.crosshair.js ui/lib/flot/jquery.flot.fillbetween.js @@ -402,6 +413,7 @@ ui/lib/qunit/qunit.css ui/lib/qunit/qunit.js ui/lib/reset.css + ui/lib/require.js waf patches/systemvm/debian/systemvm.vmx patches/systemvm/debian/config/root/.ssh/authorized_keys @@ -431,7 +443,6 @@ patches/systemvm/debian/config/var/www/html/userdata/.htaccess patches/systemvm/debian/config/var/www/html/latest/.htaccess patches/systemvm/debian/vpn/etc/ipsec.d/l2tp.conf - @@ -444,7 +455,8 @@ ${cs.jdk.version} true 128m - 512m + 512m + -XDignore.symbol.file=true @@ -504,13 +516,12 @@ developer + + tools/devcloud/devcloud.cfg + developer - tools/apidoc - tools/devcloud - tools/devcloud-kvm - tools/marvin - tools/cli + tools @@ -524,5 +535,113 @@ vmware-base + + simulator + + + deploydb-simulator + + + + + + org.codehaus.mojo + properties-maven-plugin + 1.0-alpha-2 + + + initialize + + read-project-properties + + + + ${project.basedir}/utils/conf/db.properties + ${project.basedir}/utils/conf/db.properties.override + + true + + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + + mysql + mysql-connector-java + ${cs.mysql.version} + + + commons-dbcp + commons-dbcp + ${cs.dbcp.version} + + + commons-pool + commons-pool + ${cs.pool.version} + + + org.jasypt + jasypt + ${cs.jasypt.version} + + + org.apache.cloudstack + cloud-utils + ${project.version} + + + org.apache.cloudstack + cloud-server + ${project.version} + + + + + process-resources + create-schema + + java + + + + + false + true + + org.apache.cloudstack + cloud-server + + com.cloud.upgrade.DatabaseCreator + + + ${project.basedir}/utils/conf/db.properties + ${project.basedir}/utils/conf/db.properties.override + + ${basedir}/target/db/create-schema-simulator.sql + ${basedir}/target/db/templates.simulator.sql + + com.cloud.upgrade.DatabaseUpgradeChecker + --database=simulator + --rootpassword=${db.root.password} + + + + + catalina.home + ${project.basedir}/utils + + + + + + + diff --git a/python/lib/cloudutils/utilities.py b/python/lib/cloudutils/utilities.py index c9d1e339f72..739a48385a0 100755 --- a/python/lib/cloudutils/utilities.py +++ b/python/lib/cloudutils/utilities.py @@ -122,7 +122,14 @@ class Distribution: if kernel.find("2.6.32") != -1: self.release = "10.04" self.arch = bash("uname -m").getStdout() - + elif os.path.exists("/usr/bin/lsb_release"): + o = bash("/usr/bin/lsb_release -i") + distributor = o.getStdout().split(":\t")[1] + if "Debian" in distributor: + # This obviously needs a rewrite at some point + self.distro = "Ubuntu" + else: + raise UnknownSystemException(distributor) else: raise UnknownSystemException diff --git a/scripts/network/domr/dhcp_entry.sh b/scripts/network/domr/dhcp_entry.sh index e417f7273a2..fb5a1669e72 100755 --- a/scripts/network/domr/dhcp_entry.sh +++ b/scripts/network/domr/dhcp_entry.sh @@ -22,7 +22,7 @@ # @VERSION@ usage() { - printf "Usage: %s: -r -m -v -n -s -d -N -6 -u \n" $(basename $0) >&2 + printf "Usage: %s: -r -m -v -n -s -d -N -6 -u [-z]\n" $(basename $0) >&2 exit 2 } @@ -40,7 +40,7 @@ duid= opts= -while getopts 'r:m:v:n:d:s:N:6:u:' OPTION +while getopts 'r:m:v:n:d:s:N:6:u:z' OPTION do case $OPTION in r) domrIp="$OPTARG" @@ -69,6 +69,8 @@ do u) duid="$OPTARG" opts="$opts -u $duid" ;; + z) opts="$opts -N" + ;; ?) usage exit 1 ;; diff --git a/scripts/network/ping/baremetal_user_data.py b/scripts/network/ping/baremetal_user_data.py new file mode 100755 index 00000000000..a8ce32cb3ba --- /dev/null +++ b/scripts/network/ping/baremetal_user_data.py @@ -0,0 +1,104 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +''' +Created on Jul 2, 2012 + +@author: frank +''' +import sys +import os +import os.path +import base64 + +HTML_ROOT = "/var/www/html/" + +def writeIfNotHere(fileName, texts): + if not os.path.exists(fileName): + entries = [] + else: + f = open(fileName, 'r') + entries = f.readlines() + f.close() + + texts = [ "%s\n" % t for t in texts ] + need = False + for t in texts: + if not t in entries: + entries.append(t) + need = True + + if need: + f = open(fileName, 'w') + f.write(''.join(entries)) + f.close() + +def createRedirectEntry(vmIp, folder, filename): + entry = "RewriteRule ^%s$ ../%s/%%{REMOTE_ADDR}/%s [L,NC,QSA]" % (filename, folder, filename) + htaccessFolder="/var/www/html/latest" + htaccessFile=os.path.join(htaccessFolder, ".htaccess") + if not os.path.exists(htaccessFolder): + os.makedirs(htaccessFolder) + writeIfNotHere(htaccessFile, ["Options +FollowSymLinks", "RewriteEngine On", entry]) + + htaccessFolder = os.path.join("/var/www/html/", folder, vmIp) + if not os.path.exists(htaccessFolder): + os.makedirs(htaccessFolder) + htaccessFile=os.path.join(htaccessFolder, ".htaccess") + entry="Options -Indexes\nOrder Deny,Allow\nDeny from all\nAllow from %s" % vmIp + f = open(htaccessFile, 'w') + f.write(entry) + f.close() + + if folder in ['metadata', 'meta-data']: + entry1="RewriteRule ^meta-data/(.+)$ ../%s/%%{REMOTE_ADDR}/$1 [L,NC,QSA]" % folder + htaccessFolder="/var/www/html/latest" + htaccessFile=os.path.join(htaccessFolder, ".htaccess") + entry2="RewriteRule ^meta-data/$ ../%s/%%{REMOTE_ADDR}/meta-data [L,NC,QSA]" % folder + writeIfNotHere(htaccessFile, [entry1, entry2]) + + +def addUserData(vmIp, folder, fileName, contents): + + baseFolder = os.path.join(HTML_ROOT, folder, vmIp) + if not os.path.exists(baseFolder): + os.makedirs(baseFolder) + + createRedirectEntry(vmIp, folder, fileName) + + datafileName = os.path.join(HTML_ROOT, folder, vmIp, fileName) + metaManifest = os.path.join(HTML_ROOT, folder, vmIp, "meta-data") + if folder == "userdata": + if contents != "none": + contents = base64.urlsafe_b64decode(contents) + else: + contents = "" + + f = open(datafileName, 'w') + f.write(contents) + f.close() + + if folder == "metadata" or folder == "meta-data": + writeIfNotHere(metaManifest, fileName) + +if __name__ == '__main__': + string = sys.argv[1] + allEntires = string.split(";") + for entry in allEntires: + (vmIp, folder, fileName, contents) = entry.split(',', 3) + addUserData(vmIp, folder, fileName, contents) + sys.exit(0) diff --git a/scripts/network/ping/prepare_kickstart_bootfile.py b/scripts/network/ping/prepare_kickstart_bootfile.py new file mode 100755 index 00000000000..4378293b43a --- /dev/null +++ b/scripts/network/ping/prepare_kickstart_bootfile.py @@ -0,0 +1,78 @@ +#!/usr/bin/python +# Copyright 2012 Citrix Systems, Inc. Licensed under the +# Apache License, Version 2.0 (the "License"); you may not use this +# file except in compliance with the License. Citrix Systems, Inc. +# reserves all rights not expressly granted by the License. +# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Automatically generated by addcopyright.py at 04/03/2012 + + + + + +# Usage: prepare_tftp_bootfile.py tftp_dir mac cifs_server share directory image_to_restore cifs_username cifs_password +import os, sys +from sys import exit +from os import makedirs +from os.path import exists, join + +fmt1 = '''DEFAULT default +PROMPT 1 +TIMEOUT 26 +DISPLAY boot.msg +LABEL default +KERNEL %s +APPEND ramdisk_size=66000 initrd=%s ksdevice=%s ks=%s +''' +fmt2 = '''DEFAULT default +PROMPT 1 +TIMEOUT 26 +DISPLAY boot.msg +LABEL default +KERNEL %s +APPEND ramdisk_size=66000 initrd=%s ks=%s +''' + +tftp_dir = '' +mac = '' +kernel = '' +initrd = '' +ks_file = '' +ks_device = '' + +def prepare(): + try: + pxelinux = join(tftp_dir, "pxelinux.cfg") + if exists(pxelinux) == False: + makedirs(pxelinux) + + cfg_name = "01-" + mac.replace(':','-').lower() + cfg_path = join(pxelinux, cfg_name) + f = open(cfg_path, "w") + if ks_device == '': + stuff = fmt2 % (kernel, initrd, ks_file) + else: + stuff = fmt1 % (kernel, initrd, ks_device, ks_file) + f.write(stuff) + f.close() + return 0 + except Exception, e: + print e + return 1 + + +if __name__ == "__main__": + if len(sys.argv) < 7: + print "Usage: prepare_kickstart_bootfile.py tftp_dir mac kernel initrd ks_file ks_device" + exit(1) + + (tftp_dir, mac, kernel, initrd, ks_file, ks_device) = sys.argv[1:] + + ret = prepare() + exit(ret) diff --git a/scripts/network/ping/prepare_kickstart_kernel_initrd.py b/scripts/network/ping/prepare_kickstart_kernel_initrd.py new file mode 100755 index 00000000000..ff618480e69 --- /dev/null +++ b/scripts/network/ping/prepare_kickstart_kernel_initrd.py @@ -0,0 +1,75 @@ +#!/usr/bin/python +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import sys +import tempfile +import os.path +import os + +kernel = None +initrd = None +copy_to = None + +def cmd(cmdstr, err=True): + print cmdstr + if os.system(cmdstr) != 0 and err: + raise Exception("Failed to run shell command: %s" % cmdstr) + +def prepare(): + global kernel, initrd, copy_to + try: + k = os.path.join(copy_to, "vmlinuz") + i = os.path.join(copy_to, "initrd.img") + if os.path.exists(k) and os.path.exists(i): + print "Having template(%s) prepared already, skip copying" % copy_to + return 0 + else: + if not os.path.exists(copy_to): + os.makedirs(copy_to) + + + def copy_from_nfs(src, dst): + mnt_path = tempfile.mkdtemp() + try: + nfs_path = os.path.dirname(src) + filename = os.path.basename(src) + t = os.path.join(mnt_path, filename) + mnt = "mount %s %s" % (nfs_path, mnt_path) + cmd(mnt) + cp = "cp -f %s %s" % (t, dst) + cmd(cp) + finally: + umnt = "umount %s" % mnt_path + cmd(umnt, False) + rm = "rm -r %s" % mnt_path + cmd(rm, False) + + copy_from_nfs(kernel, copy_to) + copy_from_nfs(initrd, copy_to) + except Exception, e: + print e + return 1 + +if __name__ == "__main__": + if len(sys.argv) < 4: + print "Usage: prepare_kickstart_kerneal_initrd.py path_to_kernel path_to_initrd path_kernel_initrd_copy_to" + sys.exit(1) + + (kernel, initrd, copy_to) = sys.argv[1:] + sys.exit(prepare()) + diff --git a/scripts/storage/qcow2/resizevolume.sh b/scripts/storage/qcow2/resizevolume.sh index d15513e250f..6a5c91d9dd6 100755 --- a/scripts/storage/qcow2/resizevolume.sh +++ b/scripts/storage/qcow2/resizevolume.sh @@ -68,7 +68,7 @@ log() { if [ $shouldwelog -eq 1 ] then - echo "$d - $1" >> /var/log/cloud/agent/resizevolume.log + echo "$d - $1" >> /var/log/cloudstack/agent/resizevolume.log fi } @@ -237,7 +237,7 @@ do esac done -shouldwelog=1 #set this to 1 while debugging to get output in /var/log/cloud/agent/resizevolume.log +shouldwelog=1 #set this to 1 while debugging to get output in /var/log/cloudstack/agent/resizevolume.log if [ "$ptype" == "CLVM" ] then diff --git a/scripts/util/ipmi.py b/scripts/util/ipmi.py old mode 100644 new mode 100755 diff --git a/scripts/vm/hypervisor/kvm/patchviasocket.pl b/scripts/vm/hypervisor/kvm/patchviasocket.pl new file mode 100644 index 00000000000..443d6e4277b --- /dev/null +++ b/scripts/vm/hypervisor/kvm/patchviasocket.pl @@ -0,0 +1,58 @@ +#!/usr/bin/perl -w +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +############################################################# +# This script connects to the system vm socket and writes the +# authorized_keys and cmdline data to it. The system VM then +# reads it from /dev/vport0p1 in cloud_early_config +############################################################# + +use strict; +use Getopt::Std; +use IO::Socket; +$|=1; + +my $opts = {}; +getopt('pn',$opts); +my $name = $opts->{n}; +my $cmdline = $opts->{p}; +my $sockfile = "/var/lib/libvirt/qemu/$name.agent"; +my $pubkeyfile = "/root/.ssh/id_rsa.pub.cloud"; + +if (! -S $sockfile) { + print "ERROR: $sockfile socket not found\n"; + exit 1; +} + +if (! -f $pubkeyfile) { + print "ERROR: ssh public key not found on host at $pubkeyfile\n"; + exit 1; +} + +open(FILE,$pubkeyfile) or die "ERROR: unable to open $pubkeyfile - $^E"; +my $key = ; +close FILE; + +$cmdline =~ s/%/ /g; +my $msg = "pubkey:" . $key . "\ncmdline:" . $cmdline; + +my $socket = IO::Socket::UNIX->new(Peer=>$sockfile,Type=>SOCK_STREAM) + or die "ERROR: unable to connect to $sockfile - $^E\n"; +print $socket "$msg\r\n"; +close $socket; + diff --git a/scripts/vm/hypervisor/kvm/rundomrpre.sh b/scripts/vm/hypervisor/kvm/rundomrpre.sh deleted file mode 100755 index dc783749815..00000000000 --- a/scripts/vm/hypervisor/kvm/rundomrpre.sh +++ /dev/null @@ -1,147 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -# $Id: rundomrpre.sh 10427 2010-07-09 03:30:48Z edison $ $HeadURL: svn://svn.lab.vmops.com/repos/vmdev/java/scripts/vm/hypervisor/kvm/rundomrpre.sh $ - -set -x -pubKey="/root/.ssh/id_rsa.pub.cloud" -mntpath() { - local vmname=$1 - if [ ! -d /mnt/$vmname ] - then - mkdir -p /mnt/$vmname - fi - echo "/mnt/$vmname" -} - -mount_raw_disk() { - local vmname=$1 - local datadisk=$2 - local path=$(mntpath $vmname) - if [ ! -f $datadisk -a ! -b $datadisk ] - then - printf "$datadisk doesn't exist" >&2 - return 2 - fi - - retry=10 - while [ $retry -gt 0 ] - do - if [ -b $datadisk ]; then - mount $datadisk $path &>/dev/null - ret=$? - else - mount $datadisk $path -o loop &>/dev/null - ret=$? - fi - sleep 10 - if [ $ret -gt 0 ] - then - sleep 5 - else - break - fi - retry=$(($retry-1)) - done - return 0 -} - -umount_raw_disk() { - local vmname=$1 - local datadisk=$2 - local path=$(mntpath $vmname) - - retry=10 - sync - while [ $retry -gt 0 ] - do - umount -d $path &>/dev/null - if [ $? -gt 0 ] - then - sleep 5 - else - rm -rf $path - break - fi - retry=$(($retry-1)) - done - return $? -} - -patch_all() { - local vmname=$1 - local cmdline=$2 - local datadisk=$3 - local path=$(mntpath $vmname) - - - if [ -f $pubKey ] - then - cp $pubKey $path/authorized_keys - fi - echo $cmdline > $path/cmdline - sed -i "s/%/\ /g" $path/cmdline - return 0 -} - -lflag= -dflag= - -while getopts 't:v:i:m:e:E:a:A:g:l:n:d:b:B:p:I:N:Mx:X:' OPTION -do - case $OPTION in - l) lflag=1 - vmname="$OPTARG" - ;; - t) tflag=1 - vmtype="$OPTARG" - ;; - d) dflag=1 - rootdisk="$OPTARG" - ;; - p) pflag=1 - cmdline="$OPTARG" - ;; - *) ;; - esac -done - -if [ "$lflag$tflag$dflag" != "111" ] -then - printf "Error: No enough parameter\n" >&2 - exit 1 -fi - -if [ "$vmtype" = "all" ] -then - mount_raw_disk $vmname $rootdisk - if [ $? -gt 0 ] - then - printf "Failed to mount $rootdisk" - exit $? - fi - - patch_all $vmname $cmdline $rootdisk - - umount_raw_disk $vmname $rootdisk - exit $? -fi - - -exit $? diff --git a/scripts/vm/hypervisor/xenserver/vmops b/scripts/vm/hypervisor/xenserver/vmops index db6f6d63ac9..31c9a59663e 100755 --- a/scripts/vm/hypervisor/xenserver/vmops +++ b/scripts/vm/hypervisor/xenserver/vmops @@ -610,6 +610,7 @@ def destroy_network_rules_for_vm(session, args): util.SMlog("Ignoring failure to delete egress chain " + vmchain_egress) remove_rule_log_for_vm(vm_name) + remove_secip_log_for_vm(vm_name) if 1 in [ vm_name.startswith(c) for c in ['r-', 's-', 'v-', 'l-'] ]: return 'true' @@ -749,6 +750,43 @@ def default_arp_antispoof(vm_chain, vifs, vm_ip, vm_mac): return 'true' + +@echo +def network_rules_vmSecondaryIp(session, args): + vm_name = args.pop('vmName') + vm_mac = args.pop('vmMac') + ip_secondary = args.pop('vmSecIp') + action = args.pop('action') + util.SMlog("vmMac = "+ vm_mac) + util.SMlog("vmName = "+ vm_name) + #action = "-A" + util.SMlog("action = "+ action) + try: + vm = session.xenapi.VM.get_by_name_label(vm_name) + if len(vm) != 1: + return 'false' + vm_rec = session.xenapi.VM.get_record(vm[0]) + vm_vifs = vm_rec.get('VIFs') + vifnums = [session.xenapi.VIF.get_record(vif).get('device') for vif in vm_vifs] + domid = vm_rec.get('domid') + except: + util.SMlog("### Failed to get domid or vif list for vm ##" + vm_name) + return 'false' + + if domid == '-1': + util.SMlog("### Failed to get domid for vm (-1): " + vm_name) + return 'false' + + vifs = ["vif" + domid + "." + v for v in vifnums] + #vm_name = '-'.join(vm_name.split('-')[:-1]) + vmchain = chain_name(vm_name) + add_to_ipset(vmchain, [ip_secondary], action) + + #add arptables rules for the secondary ip + arp_rules_vmip(vmchain, vifs, [ip_secondary], vm_mac, action) + + return 'true' + @echo def default_network_rules_systemvm(session, args): vm_name = args.pop('vmName') @@ -798,6 +836,55 @@ def default_network_rules_systemvm(session, args): util.SMlog("Failed to log default network rules for systemvm, ignoring") return 'true' +@echo +def create_ipset_forvm (ipsetname): + result = True + try: + util.SMlog("Creating ipset chain .... " + ipsetname) + util.pread2(['ipset', '-F', ipsetname]) + util.pread2(['ipset', '-X', ipsetname]) + util.pread2(['ipset', '-N', ipsetname, 'iphash']) + except: + util.SMlog("ipset chain not exists creating.... " + ipsetname) + util.pread2(['ipset', '-N', ipsetname, 'iphash']) + + return result + +@echo +def add_to_ipset(ipsetname, ips, action): + result = True + for ip in ips: + try: + util.SMlog("vm ip " + ip) + util.pread2(['ipset', action, ipsetname, ip]) + except: + util.SMlog("vm ip alreday in ip set" + ip) + continue + + return result + +@echo +def arp_rules_vmip (vm_chain, vifs, ips, vm_mac, action): + try: + if action == "-A": + action = "-I" + for vif in vifs: + for vm_ip in ips: + #accept any arp requests to this vm as long as the request is for this vm's ip + util.pread2(['arptables', action, vm_chain, '-o', vif, '--opcode', 'Request', '--destination-ip', vm_ip, '-j', 'ACCEPT']) + #accept any arp replies to this vm as long as the mac and ip matches + util.pread2(['arptables', action, vm_chain, '-o', vif, '--opcode', 'Reply', '--destination-mac', vm_mac, '--destination-ip', vm_ip, '-j', 'ACCEPT']) + #accept arp replies into the bridge as long as the source mac and ips match the vm + util.pread2(['arptables', action, vm_chain, '-i', vif, '--opcode', 'Reply', '--source-mac', vm_mac, '--source-ip', vm_ip, '-j', 'ACCEPT']) + #accept any arp requests from this vm. In the future this can be restricted to deny attacks on hosts + #also important to restrict source ip and src mac in these requests as they can be used to update arp tables on destination + util.pread2(['arptables', action, vm_chain, '-i', vif, '--opcode', 'Request', '--source-mac', vm_mac, '--source-ip', vm_ip, '-j', 'RETURN']) + except: + util.SMlog("Failed to program arptables rules for ip") + return 'false' + + return 'true' + @echo def default_network_rules(session, args): @@ -805,6 +892,8 @@ def default_network_rules(session, args): vm_ip = args.pop('vmIP') vm_id = args.pop('vmID') vm_mac = args.pop('vmMAC') + sec_ips = args.pop("secIps") + action = "-A" try: vm = session.xenapi.VM.get_by_name_label(vm_name) @@ -854,6 +943,32 @@ def default_network_rules(session, args): except: util.pread2(['iptables', '-F', vmchain_default]) + vmipset = vm_name + #create ipset and add vm ips to that ip set + if create_ipset_forvm(vmipset) == False: + util.SMlog(" failed to create ipset for rule " + str(tokens)) + return 'false' + + #add primary nic ip to ipset + if add_to_ipset(vmipset, [vm_ip], action ) == False: + util.SMlog(" failed to add vm " + vm_ip + " ip to set ") + return 'false' + + #add secodnary nic ips to ipset + secIpSet = "1" + ips = sec_ips.split(':') + ips.pop() + if ips[0] == "0": + secIpSet = "0"; + + if secIpSet == "1": + util.SMlog("Adding ipset for secondary ips") + add_to_ipset(vmipset, ips, action) + if write_secip_log_for_vm(vm_name, sec_ips, vm_id) == False: + util.SMlog("Failed to log default network rules, ignoring") + + keyword = '--' + get_ipset_keyword() + try: for v in vifs: util.pread2(['iptables', '-A', 'BRIDGE-FIREWALL', '-m', 'physdev', '--physdev-is-bridged', '--physdev-out', v, '-j', vmchain_default]) @@ -861,16 +976,22 @@ def default_network_rules(session, args): #don't let vm spoof its ip address for v in vifs: - util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', v, '--source', vm_ip,'-p', 'udp', '--dport', '53', '-j', 'RETURN']) - util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', v, '--source', '!', vm_ip, '-j', 'DROP']) - util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-out', v, '--destination', '!', vm_ip, '-j', 'DROP']) - util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', v, '--source', vm_ip, '-j', vmchain_egress]) + #util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', v, '--source', vm_ip,'-p', 'udp', '--dport', '53', '-j', 'RETURN']) + util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', v, '-m', 'set', keyword, vmipset, 'src', '-p', 'udp', '--dport', '53', '-j', 'RETURN']) + util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', v, '-m', 'set', '!', keyword, vmipset, 'src', '-j', 'DROP']) + util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-out', v, '-m', 'set', '!', keyword, vmipset, 'dst', '-j', 'DROP']) + util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-in', v, '-m', 'set', keyword, vmipset, 'src', '-j', vmchain_egress]) util.pread2(['iptables', '-A', vmchain_default, '-m', 'physdev', '--physdev-is-bridged', '--physdev-out', v, '-j', vmchain]) except: util.SMlog("Failed to program default rules for vm " + vm_name) return 'false' default_arp_antispoof(vmchain, vifs, vm_ip, vm_mac) + #add default arp rules for secondary ips; + if secIpSet == "1": + util.SMlog("Adding arp rules for sec ip") + arp_rules_vmip(vmchain, vifs, ips, vm_mac, action) + default_ebtables_antispoof_rules(vmchain, vifs, vm_ip, vm_mac) if write_rule_log_for_vm(vm_name, vm_id, vm_ip, domid, '_initial_', '-1', vm_mac) == False: @@ -994,10 +1115,45 @@ def network_rules_for_rebooted_vm(session, vmName): destroy_arptables_rules(vmchain) [vm_ip, vm_mac] = get_vm_mac_ip_from_log(vmchain) default_arp_antispoof(vmchain, vifs, vm_ip, vm_mac) + + #check wether the vm has secondary ips + if is_secondary_ips_set(vm_name) == True: + vmips = get_vm_sec_ips(vm_name) + #add arp rules for the secondaryp ip + for ip in vmips: + arp_rules_vmip(vmchain, vifs, [ip], vm_mac, "-A") + + default_ebtables_antispoof_rules(vmchain, vifs, vm_ip, vm_mac) rewrite_rule_log_for_vm(vm_name, curr_domid) return True + + +@echo +def get_vm_sec_ips(vm_name): + logfilename = "/var/run/cloud/" + vm_name +".ip" + + lines = (line.rstrip() for line in open(logfilename)) + for line in lines: + try: + [_vmName,_vmIP,_vmID] = line.split(',') + break + except ValueError,v: + [_vmName,_vmIP,_vmID] = line.split(',') + + _vmIPS = _vmIP.split(":")[:-1] + return _vmIPS + +@echo +def is_secondary_ips_set(vm_name): + logfilename = "/var/run/cloud/" + vm_name +".ip" + if not os.path.exists(logfilename): + return False + + return True + +@echo def rewrite_rule_log_for_vm(vm_name, new_domid): logfilename = "/var/run/cloud/" + vm_name +".log" if not os.path.exists(logfilename): @@ -1194,6 +1350,39 @@ def check_rule_log_for_vm(vmName, vmID, vmIP, domID, signature, seqno): return [reprogramDefault, reprogramChain, rewriteLog] +@echo +def write_secip_log_for_vm (vmName, secIps, vmId): + vm_name = vmName + logfilename = "/var/run/cloud/"+vm_name+".ip" + util.SMlog("Writing log to " + logfilename) + logf = open(logfilename, 'w') + output = ','.join([vmName, secIps, vmId]) + result = True + + try: + logf.write(output) + logf.write('\n') + except: + util.SMlog("Failed to write to rule log file " + logfilename) + result = False + + logf.close() + + return result + +@echo +def remove_secip_log_for_vm(vmName): + vm_name = vmName + logfilename = "/var/run/cloud/"+vm_name+".ip" + + result = True + try: + os.remove(logfilename) + except: + util.SMlog("Failed to delete rule log file " + logfilename) + result = False + + return result @echo def write_rule_log_for_vm(vmName, vmID, vmIP, domID, signature, seqno, vmMac='ff:ff:ff:ff:ff:ff'): @@ -1289,6 +1478,7 @@ def network_rules(session, args): vm_mac = args.get('vmMAC') signature = args.pop('signature') seqno = args.pop('seqno') + sec_ips = args.pop("secIps") deflated = 'false' if 'deflated' in args: deflated = args.pop('deflated') @@ -1469,6 +1659,7 @@ if __name__ == "__main__": "can_bridge_firewall":can_bridge_firewall, "default_network_rules":default_network_rules, "destroy_network_rules_for_vm":destroy_network_rules_for_vm, "default_network_rules_systemvm":default_network_rules_systemvm, + "network_rules_vmSecondaryIp":network_rules_vmSecondaryIp, "get_rule_logs_for_vms":get_rule_logs_for_vms, "setLinkLocalIP":setLinkLocalIP, "cleanup_rules":cleanup_rules, diff --git a/scripts/vm/hypervisor/xenserver/xenheartbeat.sh b/scripts/vm/hypervisor/xenserver/xenheartbeat.sh index 9cf2afe87f6..dd876ba4b79 100755 --- a/scripts/vm/hypervisor/xenserver/xenheartbeat.sh +++ b/scripts/vm/hypervisor/xenserver/xenheartbeat.sh @@ -6,9 +6,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -17,9 +17,9 @@ # under the License. #set -x - + usage() { - printf "Usage: %s [uuid of this host] [interval in seconds]\n" $(basename $0) >&2 + printf "Usage: %s [uuid of this host] [timeout in seconds] [interval in seconds]\n" $(basename $0) >&2 } @@ -33,16 +33,37 @@ if [ -z $2 ]; then exit 3 fi +if [ ! -z $3 ]; then + interval=$3 +else + interval=5 +fi + +if [ $interval -gt $2 ]; then + usage + exit 3 +fi + file=/opt/xensource/bin/heartbeat -while true -do - sleep $2 +lastdate=$(($(date +%s) + $interval)) + +while [ $(date +%s) -lt $(($lastdate + $2)) ] +do + sleep $interval if [ ! -f $file ] then continue fi + # test heartbeat file + dirs=$(cat $file | grep "sr-mount\|VG_XenStorage") + if [ ! -n "$dirs" ];then + /usr/bin/logger -t heartbeat "Problem with heartbeat, no iSCSI or NFS mount defined in $file!" + lastdate=$(date +%s) + continue + fi + # for iscsi dirs=$(cat $file | grep VG_XenStorage) for dir in $dirs @@ -51,13 +72,17 @@ do hb=$dir/hb-$1 date +%s | dd of=$hb count=100 bs=1 2>/dev/null if [ $? -ne 0 ]; then - /usr/bin/logger -t heartbeat "Problem with $hb" - reboot -f + /usr/bin/logger -t heartbeat "Potential problem with $hb: not reachable since $(($(date +%s) - $lastdate)) seconds" + else + lastdate=$(date +%s) fi else + /usr/bin/logger -t heartbeat "Potential problem with heartbeat, dir not found for $dir" + lastdate=$(date +%s) sed -i /${dir##/*/}/d $file fi done + # for nfs dirs=$(cat $file | grep sr-mount) for dir in $dirs @@ -67,13 +92,17 @@ do hb=$dir/hb-$1 date +%s | dd of=$hb count=100 bs=1 2>/dev/null if [ $? -ne 0 ]; then - /usr/bin/logger -t heartbeat "Problem with $hb" - reboot -f + /usr/bin/logger -t heartbeat "Potential problem with $hb: not reachable since $(($(date +%s) - $lastdate)) seconds" + else + lastdate=$(date +%s) fi else + /usr/bin/logger -t heartbeat "Potential problem with heartbeat, mount not found for $dir" + lastdate=$(date +%s) sed -i /${dir##/*/}/d $file fi done - done +/usr/bin/logger -t heartbeat "Problem with $hb: not reachable for $(($(date +%s) - $lastdate)) seconds, rebooting system!" +reboot -f diff --git a/scripts/vm/network/security_group.py b/scripts/vm/network/security_group.py index dcb01a7604b..ed4180a508e 100755 --- a/scripts/vm/network/security_group.py +++ b/scripts/vm/network/security_group.py @@ -38,13 +38,13 @@ def can_bridge_firewall(privnic): execute("which iptables") except: print "no iptables on your host machine" - exit(1) + sys.exit(1) try: execute("which ebtables") except: print "no ebtables on your host machine" - exit(2) + sys.exit(2) if not os.path.exists('/var/run/cloud'): @@ -797,7 +797,7 @@ def addFWFramework(brname): return False if __name__ == '__main__': - logging.basicConfig(filename="/var/log/cloud/security_group.log", format="%(asctime)s - %(message)s", level=logging.DEBUG) + logging.basicConfig(filename="/var/log/cloudstack/agent/security_group.log", format="%(asctime)s - %(message)s", level=logging.DEBUG) parser = OptionParser() parser.add_option("--vmname", dest="vmName") parser.add_option("--vmip", dest="vmIP") @@ -813,6 +813,9 @@ if __name__ == '__main__': parser.add_option("--hostIp", dest="hostIp") parser.add_option("--hostMacAddr", dest="hostMacAddr") (option, args) = parser.parse_args() + if len(args) == 0: + logging.debug("No command to execute") + sys.exit(1) cmd = args[0] if cmd == "can_bridge_firewall": can_bridge_firewall(args[1]) @@ -830,3 +833,6 @@ if __name__ == '__main__': cleanup_rules() elif cmd == "post_default_network_rules": post_default_network_rules(option.vmName, option.vmID, option.vmIP, option.vmMAC, option.vif, option.brname, option.dhcpSvr, option.hostIp, option.hostMacAddr) + else: + logging.debug("Unknown command: " + cmd) + sys.exit(1) diff --git a/server/conf/migration-components.xml b/server/conf/migration-components.xml index 90fbafa855a..2ba35c836c2 100644 --- a/server/conf/migration-components.xml +++ b/server/conf/migration-components.xml @@ -35,6 +35,7 @@ under the License. + diff --git a/server/pom.xml b/server/pom.xml index 602ed5b977b..b39f731b4c9 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT @@ -77,8 +77,6 @@ org.apache.cloudstack cloud-utils ${project.version} - tests - test org.reflections @@ -108,16 +106,40 @@ test/resources + + %regex[.*[0-9]*To[0-9]*.*Test.*] + + + org.apache.maven.plugins + maven-compiler-plugin + + + default-testCompile + test-compile + + + **/com/cloud/upgrade/*.java + **/com/cloud/async/*.java + + + + testCompile + + + + org.apache.maven.plugins maven-surefire-plugin -Xmx1024m - com/cloud/upgrade/* + %regex[.*[0-9]*To[0-9]*.*Test.*] + com/cloud/upgrade/AdvanceZone223To224UpgradeTest + com/cloud/upgrade/AdvanceZone217To224UpgradeTest com/cloud/async/* com/cloud/cluster/* com/cloud/snapshot/* @@ -130,6 +152,43 @@ + + maven-antrun-plugin + 1.7 + + + generate-resource + generate-resources + + run + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/server/src/com/cloud/agent/AgentManager.java b/server/src/com/cloud/agent/AgentManager.java index 0053851a6fe..6c300ea76fa 100755 --- a/server/src/com/cloud/agent/AgentManager.java +++ b/server/src/com/cloud/agent/AgentManager.java @@ -156,4 +156,6 @@ public interface AgentManager extends Manager { boolean reconnect(long hostId); Answer sendToSSVM(Long dcId, final Command cmd); + + void disconnectWithInvestigation(final long hostId, final Status.Event event); } diff --git a/server/src/com/cloud/agent/manager/AgentManagerImpl.java b/server/src/com/cloud/agent/manager/AgentManagerImpl.java index 2286dabfda1..c1bbb588faf 100755 --- a/server/src/com/cloud/agent/manager/AgentManagerImpl.java +++ b/server/src/com/cloud/agent/manager/AgentManagerImpl.java @@ -39,6 +39,7 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -100,7 +101,6 @@ import com.cloud.resource.ServerResource; import com.cloud.server.ManagementService; import com.cloud.storage.StorageManager; import com.cloud.storage.StorageService; -import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.resource.DummySecondaryStorageResource; @@ -172,7 +172,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Inject protected ConfigurationDao _configDao = null; @Inject - protected StoragePoolDao _storagePoolDao = null; + protected PrimaryDataStoreDao _storagePoolDao = null; @Inject protected StoragePoolHostDao _storagePoolHostDao = null; @Inject @@ -218,7 +218,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl protected int _pingInterval; protected long _pingTimeout; - @Inject protected AgentMonitor _monitor; + @Inject protected AgentMonitorService _monitor; protected ExecutorService _executor; protected ThreadPoolExecutor _connectExecutor; @@ -230,7 +230,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override public boolean configure(final String name, final Map params) throws ConfigurationException { - + final Map configs = _configDao.getConfiguration("AgentManager", params); _port = NumbersUtil.parseInt(configs.get("port"), 8250); final int workers = NumbersUtil.parseInt(configs.get("workers"), 5); @@ -668,7 +668,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl public boolean start() { startDirectlyConnectedHosts(); if (_monitor != null) { - _monitor.start(); + _monitor.startMonitoring(); } if (_connection != null) { _connection.start(); @@ -778,7 +778,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (host != null) { agentStatusTransitTo(host, Event.AgentDisconnected, _nodeId); } - } + } } if (forRebalance) { @@ -895,7 +895,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } catch (NoTransitionException ne) { /* Agent may be currently in status of Down, Alert, Removed, namely there is no next status for some events. * Why this can happen? Ask God not me. I hate there was no piece of comment for code handling race condition. - * God knew what race condition the code dealt with! + * God knew what race condition the code dealt with! */ } @@ -1046,6 +1046,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return false; } + if (host.getStatus() == Status.Disconnected) { + s_logger.info("Host is already disconnected, no work to be done"); + return true; + } + if (host.getStatus() != Status.Up && host.getStatus() != Status.Alert && host.getStatus() != Status.Rebalancing) { s_logger.info("Unable to disconnect host because it is not in the correct state: host=" + hostId + "; Status=" + host.getStatus()); return false; @@ -1197,12 +1202,12 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } Response response = null; - response = new Response(request, answers[0], _nodeId, -1); + response = new Response(request, answers[0], _nodeId, -1); try { link.send(response.toBytes()); } catch (ClosedChannelException e) { s_logger.debug("Failed to send startupanswer: " + e.toString()); - } + } _connectExecutor.execute(new HandleAgentConnectTask(link, cmds, request)); } @@ -1405,7 +1410,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } else { throw new CloudRuntimeException("Unkonwn TapAgentsAction " + action); } - } + } return true; } @@ -1450,7 +1455,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl _executor.submit(new DisconnectTask(attache, event, false)); } - protected void disconnectWithInvestigation(AgentAttache attache, final Status.Event event) { + public void disconnectWithInvestigation(AgentAttache attache, final Status.Event event) { _executor.submit(new DisconnectTask(attache, event, true)); } @@ -1508,7 +1513,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl attache.setMaintenanceMode(true); // Now cancel all of the commands except for the active one. attache.cancelAllCommands(Status.Disconnected, false); - } + } } @Override diff --git a/server/src/com/cloud/agent/manager/AgentMonitor.java b/server/src/com/cloud/agent/manager/AgentMonitor.java index 97c0411d4e6..ae539fd9188 100755 --- a/server/src/com/cloud/agent/manager/AgentMonitor.java +++ b/server/src/com/cloud/agent/manager/AgentMonitor.java @@ -26,6 +26,7 @@ import javax.inject.Inject; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; import com.cloud.agent.api.AgentControlCommand; @@ -56,13 +57,13 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.dao.VMInstanceDao; @Component -public class AgentMonitor extends Thread implements Listener { +public class AgentMonitor extends Thread implements AgentMonitorService { private static Logger s_logger = Logger.getLogger(AgentMonitor.class); private static Logger status_Logger = Logger.getLogger(Status.class); private long _pingTimeout; @Inject private HostDao _hostDao; private boolean _stop; - @Inject private AgentManagerImpl _agentMgr; + @Inject private AgentManager _agentMgr; @Inject private VMInstanceDao _vmDao; @Inject private DataCenterDao _dcDao = null; @Inject private HostPodDao _podDao = null; @@ -71,7 +72,7 @@ public class AgentMonitor extends Thread implements Listener { private ConnectionConcierge _concierge; @Inject ClusterDao _clusterDao; @Inject ResourceManager _resourceMgr; - + // private ConnectionConcierge _concierge; private Map _pingMap; @@ -104,7 +105,7 @@ public class AgentMonitor extends Thread implements Listener { /** * Check if the agent is behind on ping - * + * * @param agentId * agent or host id. * @return null if the agent is not kept here. true if behind; false if not. @@ -144,21 +145,29 @@ public class AgentMonitor extends Thread implements Listener { SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); sc.addAnd(sc.getEntity().getId(), Op.EQ, agentId); HostVO h = sc.find(); - ResourceState resourceState = h.getResourceState(); - if (resourceState == ResourceState.Disabled || resourceState == ResourceState.Maintenance || resourceState == ResourceState.ErrorInMaintenance) { - /* Host is in non-operation state, so no investigation and direct put agent to Disconnected */ - status_Logger.debug("Ping timeout but host " + agentId + " is in resource state of " + resourceState + ", so no investigation"); - _agentMgr.disconnectWithoutInvestigation(agentId, Event.ShutdownRequested); - } else { - status_Logger.debug("Ping timeout for host " + agentId + ", do invstigation"); - _agentMgr.disconnectWithInvestigation(agentId, Event.PingTimeout); + if (h != null) { + ResourceState resourceState = h.getResourceState(); + if (resourceState == ResourceState.Disabled || resourceState == ResourceState.Maintenance + || resourceState == ResourceState.ErrorInMaintenance) { + /* + * Host is in non-operation state, so no + * investigation and direct put agent to + * Disconnected + */ + status_Logger.debug("Ping timeout but host " + agentId + " is in resource state of " + + resourceState + ", so no investigation"); + _agentMgr.disconnectWithoutInvestigation(agentId, Event.ShutdownRequested); + } else { + status_Logger.debug("Ping timeout for host " + agentId + ", do invstigation"); + _agentMgr.disconnectWithInvestigation(agentId, Event.PingTimeout); + } } } SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); sc.addAnd(sc.getEntity().getResourceState(), Op.IN, ResourceState.PrepareForMaintenance, ResourceState.ErrorInMaintenance); List hosts = sc.list(); - + for (HostVO host : hosts) { long hostId = host.getId(); DataCenterVO dcVO = _dcDao.findById(host.getDataCenterId()); @@ -170,7 +179,7 @@ public class AgentMonitor extends Thread implements Listener { List vosMigrating = _vmDao.listVmsMigratingFromHost(hostId); if (vos.isEmpty() && vosMigrating.isEmpty()) { _alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "Migration Complete for host " + hostDesc, "Host [" + hostDesc + "] is ready for maintenance"); - _resourceMgr.resourceStateTransitTo(host, ResourceState.Event.InternalEnterMaintenance, _msId); + _resourceMgr.resourceStateTransitTo(host, ResourceState.Event.InternalEnterMaintenance, _msId); } } } @@ -288,4 +297,8 @@ public class AgentMonitor extends Thread implements Listener { return -1; } + public void startMonitoring() { + start(); + } } + diff --git a/server/src/com/cloud/agent/manager/AgentMonitorService.java b/server/src/com/cloud/agent/manager/AgentMonitorService.java new file mode 100644 index 00000000000..cdb0c1e2cbb --- /dev/null +++ b/server/src/com/cloud/agent/manager/AgentMonitorService.java @@ -0,0 +1,28 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.manager; + +import com.cloud.agent.Listener; + +public interface AgentMonitorService extends Listener { + + public Boolean isAgentBehindOnPing(long agentId); + public Long getAgentPingTime(long agentId); + public void pingBy(long agentId); + public void signalStop(); + public void startMonitoring(); +} diff --git a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java index a25e4014dea..0091e43cab8 100755 --- a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java +++ b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java @@ -26,6 +26,10 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.org.Cluster; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -78,6 +82,8 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { @Inject GuestOSCategoryDao _guestOSCategoryDao = null; @Inject VMInstanceDao _vmInstanceDao = null; @Inject ResourceManager _resourceMgr; + @Inject ClusterDao _clusterDao; + @Inject ClusterDetailsDao _clusterDetailsDao; float _factor = 1; boolean _checkHvm = true; protected String _allocationAlgorithm = "random"; @@ -214,8 +220,14 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator { boolean numCpusGood = host.getCpus().intValue() >= offering.getCpu(); boolean cpuFreqGood = host.getSpeed().intValue() >= offering.getSpeed(); int cpu_requested = offering.getCpu() * offering.getSpeed(); - long ram_requested = offering.getRamSize() * 1024L * 1024L; - boolean hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, _factor, considerReservedCapacity); + long ram_requested = offering.getRamSize() * 1024L * 1024L; + Cluster cluster = _clusterDao.findById(host.getClusterId()); + ClusterDetailsVO clusterDetailsCpuOvercommit = _clusterDetailsDao.findDetail(cluster.getId(),"cpuOvercommitRatio"); + ClusterDetailsVO clusterDetailsRamOvercommmt = _clusterDetailsDao.findDetail(cluster.getId(),"memoryOvercommitRatio"); + Float cpuOvercommitRatio = Float.parseFloat(clusterDetailsCpuOvercommit.getValue()); + Float memoryOvercommitRatio = Float.parseFloat(clusterDetailsRamOvercommmt.getValue()); + + boolean hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false,cpuOvercommitRatio,memoryOvercommitRatio, considerReservedCapacity); if (numCpusGood && cpuFreqGood && hostHasCapacity) { if (s_logger.isDebugEnabled()) { diff --git a/server/src/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java index dc2082f4ba6..3b659c02741 100755 --- a/server/src/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java +++ b/server/src/com/cloud/agent/manager/allocator/impl/RecreateHostAllocator.java @@ -27,6 +27,7 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -49,7 +50,6 @@ import com.cloud.host.dao.HostDao; import com.cloud.org.Grouping; import com.cloud.resource.ResourceManager; import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.Pair; import com.cloud.vm.VirtualMachine; @@ -61,7 +61,7 @@ public class RecreateHostAllocator extends FirstFitRoutingAllocator { private final static Logger s_logger = Logger.getLogger(RecreateHostAllocator.class); @Inject HostPodDao _podDao; - @Inject StoragePoolDao _poolDao; + @Inject PrimaryDataStoreDao _poolDao; @Inject ClusterDao _clusterDao; @Inject VolumeDao _volsDao; @Inject DataCenterDao _dcDao; diff --git a/server/src/com/cloud/alert/AlertManagerImpl.java b/server/src/com/cloud/alert/AlertManagerImpl.java index 4545f0a5e99..f8a8fd8b1b9 100755 --- a/server/src/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/com/cloud/alert/AlertManagerImpl.java @@ -38,6 +38,8 @@ import javax.mail.URLName; import javax.mail.internet.InternetAddress; import javax.naming.ConfigurationException; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -69,8 +71,6 @@ import com.cloud.network.dao.IPAddressDao; import com.cloud.org.Grouping.AllocationState; import com.cloud.resource.ResourceManager; import com.cloud.storage.StorageManager; -import com.cloud.storage.StoragePoolVO; -import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.NumbersUtil; import com.cloud.utils.component.ManagerBase; @@ -102,7 +102,7 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager { @Inject private VolumeDao _volumeDao; @Inject private IPAddressDao _publicIPAddressDao; @Inject private DataCenterIpAddressDao _privateIPAddressDao; - @Inject private StoragePoolDao _storagePoolDao; + @Inject private PrimaryDataStoreDao _storagePoolDao; @Inject private ConfigurationDao _configDao; @Inject private ResourceManager _resourceMgr; @Inject private ConfigurationManager _configMgr; diff --git a/server/src/com/cloud/alert/dao/AlertDao.java b/server/src/com/cloud/alert/dao/AlertDao.java index eb1faa51a2b..fda814d051d 100755 --- a/server/src/com/cloud/alert/dao/AlertDao.java +++ b/server/src/com/cloud/alert/dao/AlertDao.java @@ -16,6 +16,9 @@ // under the License. package com.cloud.alert.dao; +import java.util.Date; +import java.util.List; + import com.cloud.alert.AlertVO; import com.cloud.utils.db.GenericDao; @@ -23,4 +26,8 @@ public interface AlertDao extends GenericDao { AlertVO getLastAlert(short type, long dataCenterId, Long podId, Long clusterId); // This is for backward compatibility AlertVO getLastAlert(short type, long dataCenterId, Long podId); + + public boolean deleteAlert(List Ids, String type, Date olderThan, Long zoneId); + public boolean archiveAlert(List Ids, String type, Date olderThan, Long zoneId); + public List listOlderAlerts(Date oldTime); } diff --git a/server/src/com/cloud/alert/dao/AlertDaoImpl.java b/server/src/com/cloud/alert/dao/AlertDaoImpl.java index 2f3be882edd..4b9bc6a2988 100755 --- a/server/src/com/cloud/alert/dao/AlertDaoImpl.java +++ b/server/src/com/cloud/alert/dao/AlertDaoImpl.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.alert.dao; +import java.util.Date; import java.util.List; import javax.ejb.Local; @@ -25,11 +26,26 @@ import org.springframework.stereotype.Component; import com.cloud.alert.AlertVO; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.Transaction; @Component @Local(value = { AlertDao.class }) public class AlertDaoImpl extends GenericDaoBase implements AlertDao { + + protected final SearchBuilder AlertSearchByIdsAndType; + + public AlertDaoImpl() { + AlertSearchByIdsAndType = createSearchBuilder(); + AlertSearchByIdsAndType.and("id", AlertSearchByIdsAndType.entity().getId(), Op.IN); + AlertSearchByIdsAndType.and("type", AlertSearchByIdsAndType.entity().getType(), Op.EQ); + AlertSearchByIdsAndType.and("createdDateL", AlertSearchByIdsAndType.entity().getCreatedDate(), Op.LT); + AlertSearchByIdsAndType.and("data_center_id", AlertSearchByIdsAndType.entity().getDataCenterId(), Op.EQ); + AlertSearchByIdsAndType.done(); + } + @Override public AlertVO getLastAlert(short type, long dataCenterId, Long podId, Long clusterId) { Filter searchFilter = new Filter(AlertVO.class, "createdDate", Boolean.FALSE, Long.valueOf(0), Long.valueOf(1)); @@ -68,4 +84,73 @@ public class AlertDaoImpl extends GenericDaoBase implements Alert } return null; } + + @Override + public boolean archiveAlert(List Ids, String type, Date olderThan, Long zoneId) { + SearchCriteria sc = AlertSearchByIdsAndType.create(); + + if (Ids != null) { + sc.setParameters("id", Ids.toArray(new Object[Ids.size()])); + } + if(type != null) { + sc.setParameters("type", type); + } + if(zoneId != null) { + sc.setParameters("data_center_id", zoneId); + } + if(olderThan != null) { + sc.setParameters("createdDateL", olderThan); + } + boolean result = true;; + List alerts = listBy(sc); + if (Ids != null && alerts.size() < Ids.size()) { + result = false; + return result; + } + Transaction txn = Transaction.currentTxn(); + txn.start(); + for (AlertVO alert : alerts) { + alert = lockRow(alert.getId(), true); + alert.setArchived(true); + update(alert.getId(), alert); + txn.commit(); + } + txn.close(); + return result; + } + + @Override + public boolean deleteAlert(List ids, String type, Date olderThan, Long zoneId) { + SearchCriteria sc = AlertSearchByIdsAndType.create(); + + if (ids != null) { + sc.setParameters("id", ids.toArray(new Object[ids.size()])); + } + if(type != null) { + sc.setParameters("type", type); + } + if(zoneId != null) { + sc.setParameters("data_center_id", zoneId); + } + if(olderThan != null) { + sc.setParameters("createdDateL", olderThan); + } + boolean result = true; + List alerts = listBy(sc); + if (ids != null && alerts.size() < ids.size()) { + result = false; + return result; + } + remove(sc); + return result; + } + + @Override + public List listOlderAlerts(Date oldTime) { + if (oldTime == null) return null; + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("createDate", SearchCriteria.Op.LT, oldTime); + return listIncludingRemovedBy(sc, null); + } + } diff --git a/server/src/com/cloud/api/ApiDBUtils.java b/server/src/com/cloud/api/ApiDBUtils.java index e6b1bf16a03..8e6f8062463 100755 --- a/server/src/com/cloud/api/ApiDBUtils.java +++ b/server/src/com/cloud/api/ApiDBUtils.java @@ -45,7 +45,8 @@ import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; - +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.springframework.stereotype.Component; import com.cloud.api.query.dao.AccountJoinDao; @@ -182,10 +183,13 @@ import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.*; import com.cloud.storage.Storage.ImageFormat; + import com.cloud.storage.Volume.Type; import com.cloud.storage.dao.*; import com.cloud.storage.snapshot.SnapshotPolicy; +import com.cloud.template.TemplateManager; import com.cloud.user.*; + import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.SSHKeyPairDao; import com.cloud.user.dao.UserDao; @@ -198,6 +202,7 @@ import com.cloud.vm.DomainRouterVO; import com.cloud.vm.InstanceGroup; import com.cloud.vm.InstanceGroupVO; import com.cloud.vm.NicProfile; +import com.cloud.vm.NicSecondaryIp; import com.cloud.vm.UserVmDetailVO; import com.cloud.vm.UserVmManager; import com.cloud.vm.UserVmVO; @@ -206,6 +211,8 @@ import com.cloud.vm.VirtualMachine; import com.cloud.vm.VmStats; import com.cloud.vm.dao.ConsoleProxyDao; import com.cloud.vm.dao.DomainRouterDao; +import com.cloud.vm.dao.NicSecondaryIpDao; +import com.cloud.vm.dao.NicSecondaryIpVO; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.UserVmDetailsDao; import com.cloud.vm.dao.VMInstanceDao; @@ -218,9 +225,12 @@ public class ApiDBUtils { static AsyncJobManager _asyncMgr; static SecurityGroupManager _securityGroupMgr; static StorageManager _storageMgr; + static VolumeManager _volumeMgr; static UserVmManager _userVmMgr; static NetworkModel _networkModel; static NetworkManager _networkMgr; + static TemplateManager _templateMgr; + static StatsCollector _statsCollector; static AccountDao _accountDao; @@ -245,7 +255,7 @@ public class ApiDBUtils { static HostPodDao _podDao; static ServiceOfferingDao _serviceOfferingDao; static SnapshotDao _snapshotDao; - static StoragePoolDao _storagePoolDao; + static PrimaryDataStoreDao _storagePoolDao; static VMTemplateDao _templateDao; static VMTemplateDetailsDao _templateDetailsDao; static VMTemplateHostDao _templateHostDao; @@ -312,6 +322,8 @@ public class ApiDBUtils { static AsyncJobDao _asyncJobDao; static HostDetailsDao _hostDetailsDao; static VMSnapshotDao _vmSnapshotDao; + static ClusterDetailsDao _clusterDetailsDao; + static NicSecondaryIpDao _nicSecondaryIpDao; @Inject private ManagementServer ms; @Inject public AsyncJobManager asyncMgr; @@ -321,6 +333,8 @@ public class ApiDBUtils { @Inject private NetworkModel networkModel; @Inject private NetworkManager networkMgr; @Inject private StatsCollector statsCollector; + @Inject private TemplateManager templateMgr; + @Inject private VolumeManager volumeMgr; @Inject private AccountDao accountDao; @Inject private AccountVlanMapDao accountVlanMapDao; @@ -344,7 +358,7 @@ public class ApiDBUtils { @Inject private HostPodDao podDao; @Inject private ServiceOfferingDao serviceOfferingDao; @Inject private SnapshotDao snapshotDao; - @Inject private StoragePoolDao storagePoolDao; + @Inject private PrimaryDataStoreDao storagePoolDao; @Inject private VMTemplateDao templateDao; @Inject private VMTemplateDetailsDao templateDetailsDao; @Inject private VMTemplateHostDao templateHostDao; @@ -410,7 +424,9 @@ public class ApiDBUtils { @Inject private SnapshotPolicyDao snapshotPolicyDao; @Inject private AsyncJobDao asyncJobDao; @Inject private HostDetailsDao hostDetailsDao; + @Inject private ClusterDetailsDao clusterDetailsDao; @Inject private VMSnapshotDao vmSnapshotDao; + @Inject private NicSecondaryIpDao nicSecondaryIpDao; @PostConstruct void init() { _ms = ms; @@ -421,6 +437,7 @@ public class ApiDBUtils { _networkModel = networkModel; _networkMgr = networkMgr; _configMgr = configMgr; + _templateMgr = templateMgr; _accountDao = accountDao; _accountVlanMapDao = accountVlanMapDao; @@ -508,7 +525,9 @@ public class ApiDBUtils { _snapshotPolicyDao = snapshotPolicyDao; _asyncJobDao = asyncJobDao; _hostDetailsDao = hostDetailsDao; + _clusterDetailsDao = clusterDetailsDao; _vmSnapshotDao = vmSnapshotDao; + _nicSecondaryIpDao = nicSecondaryIpDao; // Note: stats collector should already have been initialized by this time, otherwise a null instance is returned _statsCollector = StatsCollector.getInstance(); } @@ -609,7 +628,7 @@ public class ApiDBUtils { public static String getSnapshotIntervalTypes(long snapshotId) { SnapshotVO snapshot = _snapshotDao.findById(snapshotId); - return snapshot.getType().name(); + return snapshot.getRecurringType().name(); } public static String getStoragePoolTags(long poolId) { @@ -672,6 +691,10 @@ public class ApiDBUtils { return _clusterDao.findById(clusterId); } + public static ClusterDetailsVO findClusterDetails(long clusterId, String name){ + return _clusterDetailsDao.findDetail(clusterId,name); + } + public static DiskOfferingVO findDiskOfferingById(Long diskOfferingId) { return _diskOfferingDao.findByIdIncludingRemoved(diskOfferingId); } @@ -784,7 +807,7 @@ public class ApiDBUtils { List res = _templateHostDao.listByTemplateId(templateId); return res.size() == 0 ? null : res.get(0); } else { - return _storageMgr.getTemplateHostRef(zoneId, templateId, readyOnly); + return _templateMgr.getTemplateHostRef(zoneId, templateId, readyOnly); } } @@ -886,7 +909,7 @@ public class ApiDBUtils { throw new InvalidParameterValueException("Please specify a valid volume ID."); } - return _storageMgr.volumeOnSharedStoragePool(volume); + return _volumeMgr.volumeOnSharedStoragePool(volume); } public static List getNics(VirtualMachine vm) { @@ -1503,4 +1526,8 @@ public class ApiDBUtils { public static Map findHostDetailsById(long hostId){ return _hostDetailsDao.findDetails(hostId); } + + public static List findNicSecondaryIps(long nicId) { + return _nicSecondaryIpDao.listByNicId(nicId); + } } diff --git a/server/src/com/cloud/api/ApiDispatcher.java b/server/src/com/cloud/api/ApiDispatcher.java index 8e3c5e01bfd..f7a32364cf8 100755 --- a/server/src/com/cloud/api/ApiDispatcher.java +++ b/server/src/com/cloud/api/ApiDispatcher.java @@ -24,6 +24,7 @@ import java.text.ParseException; import java.util.ArrayList; import java.util.Calendar; import java.util.Date; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -35,6 +36,7 @@ import javax.inject.Inject; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.InfrastructureEntity; +import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; @@ -48,6 +50,8 @@ import org.apache.cloudstack.api.InternalIdentity; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.Validate; +import org.apache.cloudstack.api.command.user.event.ArchiveEventsCmd; +import org.apache.cloudstack.api.command.user.event.DeleteEventsCmd; import org.apache.cloudstack.api.command.user.event.ListEventsCmd; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -87,7 +91,7 @@ public class ApiDispatcher { public ApiDispatcher() { } - + @PostConstruct void init() { s_instance = this; @@ -106,7 +110,7 @@ public class ApiDispatcher { } - private void doAccessChecks(BaseCmd cmd, List entitiesToAccess) { + private void doAccessChecks(BaseCmd cmd, Map entitiesToAccess) { Account caller = UserContext.current().getCaller(); Account owner = _accountMgr.getActiveAccountById(cmd.getEntityOwnerId()); @@ -118,9 +122,9 @@ public class ApiDispatcher { if(!entitiesToAccess.isEmpty()){ //check that caller can access the owner account. _accountMgr.checkAccess(caller, null, true, owner); - for(Object entity : entitiesToAccess) { + for (Object entity : entitiesToAccess.keySet()) { if (entity instanceof ControlledEntity) { - _accountMgr.checkAccess(caller, null, true, (ControlledEntity) entity); + _accountMgr.checkAccess(caller, entitiesToAccess.get(entity), true, (ControlledEntity) entity); } else if (entity instanceof InfrastructureEntity) { //FIXME: Move this code in adapter, remove code from Account manager @@ -133,7 +137,9 @@ public class ApiDispatcher { processParameters(cmd, params); UserContext ctx = UserContext.current(); ctx.setAccountId(cmd.getEntityOwnerId()); - if (cmd instanceof BaseAsyncCmd) { + + BaseCmd realCmdObj = ComponentContext.getTargetObject(cmd); + if (realCmdObj instanceof BaseAsyncCmd) { BaseAsyncCmd asyncCmd = (BaseAsyncCmd) cmd; String startEventId = params.get("ctxStartEventId"); @@ -162,11 +168,11 @@ public class ApiDispatcher { @SuppressWarnings({ "unchecked", "rawtypes" }) public static void processParameters(BaseCmd cmd, Map params) { - List entitiesToAccess = new ArrayList(); + Map entitiesToAccess = new HashMap(); Map unpackedParams = cmd.unpackParams(params); - + cmd = ComponentContext.getTargetObject(cmd); - + if (cmd instanceof BaseListCmd) { Object pageSizeObj = unpackedParams.get(ApiConstants.PAGE_SIZE); Long pageSize = null; @@ -258,7 +264,7 @@ public class ApiDispatcher { List listParam = (List) field.get(cmd); for (Long entityId : listParam) { Object entityObj = s_instance._entityMgr.findById(entity, entityId); - entitiesToAccess.add(entityObj); + entitiesToAccess.put(entityObj, checkAccess.accessType()); } break; /* @@ -279,7 +285,7 @@ public class ApiDispatcher { case LONG: case UUID: Object entityObj = s_instance._entityMgr.findById(entity, (Long) field.get(cmd)); - entitiesToAccess.add(entityObj); + entitiesToAccess.put(entityObj, checkAccess.accessType()); break; default: break; @@ -368,8 +374,8 @@ public class ApiDispatcher { if (internalId == null) { if (s_logger.isDebugEnabled()) s_logger.debug("Object entity uuid = " + uuid + " does not exist in the database."); - throw new InvalidParameterValueException("Invalid parameter value=" + uuid - + " due to incorrect long value format, or entity was not found as it may have been deleted, or due to incorrect parameter annotation for the field in api cmd."); + throw new InvalidParameterValueException("Invalid parameter " + annotation.name() + " value=" + uuid + + " due to incorrect long value format, or entity does not exist or due to incorrect parameter annotation for the field in api cmd class."); } return internalId; } @@ -387,7 +393,7 @@ public class ApiDispatcher { // This piece of code is for maintaining backward compatibility // and support both the date formats(Bug 9724) // Do the date messaging for ListEventsCmd only - if (cmdObj instanceof ListEventsCmd) { + if (cmdObj instanceof ListEventsCmd || cmdObj instanceof DeleteEventsCmd || cmdObj instanceof ArchiveEventsCmd) { boolean isObjInNewDateFormat = isObjInNewDateFormat(paramObj.toString()); if (isObjInNewDateFormat) { DateFormat newFormat = BaseCmd.NEW_INPUT_FORMAT; @@ -402,6 +408,8 @@ public class ApiDispatcher { date = messageDate(date, 0, 0, 0); } else if (field.getName().equals("endDate")) { date = messageDate(date, 23, 59, 59); + } else if (field.getName().equals("olderThan")) { + date = messageDate(date, 0, 0, 0); } field.set(cmdObj, date); } diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java index a94e93568e2..2546f292883 100755 --- a/server/src/com/cloud/api/ApiResponseHelper.java +++ b/server/src/com/cloud/api/ApiResponseHelper.java @@ -19,6 +19,7 @@ package com.cloud.api; import com.cloud.api.query.ViewResponseHelper; import com.cloud.api.query.vo.*; import com.cloud.api.response.ApiResponseSerializer; + import org.apache.cloudstack.api.response.AsyncJobResponse; import org.apache.cloudstack.api.response.AutoScalePolicyResponse; import org.apache.cloudstack.api.response.AutoScaleVmGroupResponse; @@ -52,6 +53,7 @@ import org.apache.cloudstack.api.response.LoadBalancerResponse; import org.apache.cloudstack.api.response.NetworkACLResponse; import org.apache.cloudstack.api.response.NetworkOfferingResponse; import org.apache.cloudstack.api.response.NetworkResponse; +import org.apache.cloudstack.api.response.NicResponse; import org.apache.cloudstack.api.response.PhysicalNetworkResponse; import org.apache.cloudstack.api.response.PodResponse; import org.apache.cloudstack.api.response.PrivateGatewayResponse; @@ -94,6 +96,7 @@ import org.apache.cloudstack.api.response.VpcResponse; import org.apache.cloudstack.api.response.VpnUsersResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.api.response.S3Response; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.springframework.stereotype.Component; import com.cloud.async.AsyncJob; @@ -165,6 +168,7 @@ import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; + import com.cloud.storage.snapshot.SnapshotPolicy; import com.cloud.storage.snapshot.SnapshotSchedule; import com.cloud.template.VirtualMachineTemplate; @@ -178,10 +182,15 @@ import com.cloud.utils.StringUtils; import com.cloud.utils.net.NetUtils; import com.cloud.vm.ConsoleProxyVO; import com.cloud.vm.InstanceGroup; +import com.cloud.vm.Nic; import com.cloud.vm.NicProfile; +import com.cloud.vm.NicVO; import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.NicSecondaryIp; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Type; +import com.cloud.vm.dao.NicSecondaryIpVO; + import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.api.ApiConstants.HostDetails; @@ -339,7 +348,7 @@ public class ApiResponseHelper implements ResponseGenerator { populateOwner(snapshotResponse, snapshot); VolumeVO volume = findVolumeById(snapshot.getVolumeId()); - String snapshotTypeStr = snapshot.getType().name(); + String snapshotTypeStr = snapshot.getRecurringType().name(); snapshotResponse.setSnapshotType(snapshotTypeStr); if (volume != null) { snapshotResponse.setVolumeId(volume.getUuid()); @@ -549,6 +558,9 @@ public class ApiResponseHelper implements ResponseGenerator { } } } + if (ipAddr.getVmIp() != null) { + ipResponse.setVirtualMachineIp(ipAddr.getVmIp()); + } if (ipAddr.getAssociatedWithNetworkId() != null) { Network ntwk = ApiDBUtils.findNetworkById(ipAddr.getAssociatedWithNetworkId()); @@ -688,14 +700,12 @@ public class ApiResponseHelper implements ResponseGenerator { if (showCapacities != null && showCapacities) { List capacities = ApiDBUtils.getCapacityByClusterPodZone(null, pod.getId(), null); Set capacityResponses = new HashSet(); - float cpuOverprovisioningFactor = ApiDBUtils.getCpuOverprovisioningFactor(); - for (SummedCapacity capacity : capacities) { CapacityResponse capacityResponse = new CapacityResponse(); capacityResponse.setCapacityType(capacity.getCapacityType()); capacityResponse.setCapacityUsed(capacity.getUsedCapacity()); if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_CPU) { - capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity() * cpuOverprovisioningFactor))); + capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity()))); } else if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED) { List c = ApiDBUtils.findNonSharedStorageForClusterPodZone(null, pod.getId(), null); capacityResponse.setCapacityTotal(capacity.getTotalCapacity() - c.get(0).getTotalCapacity()); @@ -825,12 +835,15 @@ public class ApiResponseHelper implements ResponseGenerator { clusterResponse.setClusterType(cluster.getClusterType().toString()); clusterResponse.setAllocationState(cluster.getAllocationState().toString()); clusterResponse.setManagedState(cluster.getManagedState().toString()); + String cpuOvercommitRatio=ApiDBUtils.findClusterDetails(cluster.getId(),"cpuOvercommitRatio").getValue(); + String memoryOvercommitRatio=ApiDBUtils.findClusterDetails(cluster.getId(),"memoryOvercommitRatio").getValue(); + clusterResponse.setCpuovercommitratio(cpuOvercommitRatio); + clusterResponse.setRamovercommitratio(memoryOvercommitRatio); if (showCapacities != null && showCapacities) { List capacities = ApiDBUtils.getCapacityByClusterPodZone(null, null, cluster.getId()); Set capacityResponses = new HashSet(); - float cpuOverprovisioningFactor = ApiDBUtils.getCpuOverprovisioningFactor(); for (SummedCapacity capacity : capacities) { CapacityResponse capacityResponse = new CapacityResponse(); @@ -838,8 +851,11 @@ public class ApiResponseHelper implements ResponseGenerator { capacityResponse.setCapacityUsed(capacity.getUsedCapacity()); if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_CPU) { - capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity() * cpuOverprovisioningFactor))); - } else if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED) { + capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity() * Float.parseFloat(cpuOvercommitRatio)))); + }else if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_MEMORY){ + capacityResponse.setCapacityTotal(new Long((long) (capacity.getTotalCapacity() * Float.parseFloat(memoryOvercommitRatio)))); + } + else if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED) { List c = ApiDBUtils.findNonSharedStorageForClusterPodZone(null, null, cluster.getId()); capacityResponse.setCapacityTotal(capacity.getTotalCapacity() - c.get(0).getTotalCapacity()); capacityResponse.setCapacityUsed(capacity.getUsedCapacity() - c.get(0).getUsedCapacity()); @@ -2151,13 +2167,48 @@ public class ApiResponseHelper implements ResponseGenerator { // FIXME - either set netmask or cidr response.setCidr(network.getCidr()); - if (network.getCidr() != null) { + response.setNetworkCidr((network.getNetworkCidr())); + // If network has reservation its entire network cidr is defined by getNetworkCidr() + // if no reservation is present then getCidr() will define the entire network cidr + if (network.getNetworkCidr() != null) { + response.setNetmask(NetUtils.cidr2Netmask(network.getNetworkCidr())); + } + if (((network.getCidr()) != null) && (network.getNetworkCidr() == null)) { response.setNetmask(NetUtils.cidr2Netmask(network.getCidr())); } response.setIp6Gateway(network.getIp6Gateway()); response.setIp6Cidr(network.getIp6Cidr()); + // create response for reserved IP ranges that can be used for non-cloudstack purposes + String reservation = null; + if ((network.getCidr() != null) && (NetUtils.isNetworkAWithinNetworkB(network.getCidr(), network.getNetworkCidr()))) { + String[] guestVmCidrPair = network.getCidr().split("\\/"); + String[] guestCidrPair = network.getNetworkCidr().split("\\/"); + + Long guestVmCidrSize = Long.valueOf(guestVmCidrPair[1]); + Long guestCidrSize = Long.valueOf(guestCidrPair[1]); + + String[] guestVmIpRange = NetUtils.getIpRangeFromCidr(guestVmCidrPair[0], guestVmCidrSize); + String[] guestIpRange = NetUtils.getIpRangeFromCidr(guestCidrPair[0], guestCidrSize); + long startGuestIp = NetUtils.ip2Long(guestIpRange[0]); + long endGuestIp = NetUtils.ip2Long(guestIpRange[1]); + long startVmIp = NetUtils.ip2Long(guestVmIpRange[0]); + long endVmIp = NetUtils.ip2Long(guestVmIpRange[1]); + + if (startVmIp == startGuestIp && endVmIp < endGuestIp -1) { + reservation = (NetUtils.long2Ip(endVmIp + 1) + "-" + NetUtils.long2Ip(endGuestIp)); + } + if (endVmIp == endGuestIp && startVmIp > startGuestIp + 1) { + reservation = (NetUtils.long2Ip(startGuestIp) + "-" + NetUtils.long2Ip(startVmIp-1)); + } + if(startVmIp > startGuestIp + 1 && endVmIp < endGuestIp - 1) { + reservation = (NetUtils.long2Ip(startGuestIp) + "-" + NetUtils.long2Ip(startVmIp-1) + " , " + + NetUtils.long2Ip(endVmIp + 1) + "-"+ NetUtils.long2Ip(endGuestIp)); + } + } + response.setReservedIpRange(reservation); + //return vlan information only to Root admin if (network.getBroadcastUri() != null && UserContext.current().getCaller().getType() == Account.ACCOUNT_TYPE_ADMIN) { String broadcastUri = network.getBroadcastUri().toString(); @@ -3394,4 +3445,53 @@ public class ApiResponseHelper implements ResponseGenerator { response.setTimeout(tmDetails.get("timeout")); return response; } + + public NicSecondaryIpResponse createSecondaryIPToNicResponse(String ipAddr, Long nicId, Long networkId) { + NicSecondaryIpResponse response = new NicSecondaryIpResponse(); + NicVO nic = _entityMgr.findById(NicVO.class, nicId); + NetworkVO network = _entityMgr.findById(NetworkVO.class, networkId); + response.setIpAddr(ipAddr); + response.setNicId(nic.getUuid()); + response.setNwId(network.getUuid()); + response.setObjectName("nicsecondaryip"); + return response; + } + + public NicResponse createNicResponse(Nic result) { + NicResponse response = new NicResponse(); + response.setId(result.getUuid()); + response.setIpaddress(result.getIp4Address()); + + if (result.getSecondaryIp()) { + List secondaryIps = ApiDBUtils.findNicSecondaryIps(result.getId()); + if (secondaryIps != null) { + List ipList = new ArrayList(); + for (NicSecondaryIpVO ip: secondaryIps) { + NicSecondaryIpResponse ipRes = new NicSecondaryIpResponse(); + ipRes.setId(ip.getUuid()); + ipRes.setIpAddr(ip.getIp4Address()); + ipList.add(ipRes); + } + response.setSecondaryIps(ipList); + } + } + + response.setGateway(result.getGateway()); + response.setId(result.getUuid()); + response.setGateway(result.getGateway()); + response.setNetmask(result.getNetmask()); + response.setMacAddress(result.getMacAddress()); + if (result.getBroadcastUri() != null) { + response.setBroadcastUri(result.getBroadcastUri().toString()); + } + if (result.getIsolationUri() != null) { + response.setIsolationUri(result.getIsolationUri().toString()); + } + if (result.getIp6Address() != null) { + response.setId(result.getIp6Address()); + } + + response.setIsDefault(result.isDefaultNic()); + return response; + } } diff --git a/server/src/com/cloud/api/ApiServer.java b/server/src/com/cloud/api/ApiServer.java index d99d188b5d5..0439c6e2cc9 100755 --- a/server/src/com/cloud/api/ApiServer.java +++ b/server/src/com/cloud/api/ApiServer.java @@ -5,7 +5,7 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, @@ -139,9 +139,10 @@ import com.cloud.utils.component.PluggableService; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; @Component -public class ApiServer implements HttpRequestHandler { +public class ApiServer implements HttpRequestHandler, ApiServerService { private static final Logger s_logger = Logger.getLogger(ApiServer.class.getName()); private static final Logger s_accessLogger = Logger.getLogger("apiserver." + ApiServer.class.getName()); @@ -157,10 +158,8 @@ public class ApiServer implements HttpRequestHandler { @Inject List _pluggableServices; @Inject List _apiAccessCheckers; - private Account _systemAccount = null; - private User _systemUser = null; @Inject private RegionManager _regionMgr = null; - + private static int _workerCount = 0; private static ApiServer s_instance = null; private static final DateFormat _dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ"); @@ -182,9 +181,6 @@ public class ApiServer implements HttpRequestHandler { } public void init() { - _systemAccount = _accountMgr.getSystemAccount(); - _systemUser = _accountMgr.getSystemUser(); - Integer apiPort = null; // api port, null by default SearchCriteria sc = _configDao.createSearchCriteria(); sc.addAnd("name", SearchCriteria.Op.EQ, "integration.api.port"); @@ -212,8 +208,12 @@ public class ApiServer implements HttpRequestHandler { for(PluggableService pluggableService: _pluggableServices) cmdClasses.addAll(pluggableService.getCommands()); - for(Class cmdClass: cmdClasses) { - String apiName = cmdClass.getAnnotation(APICommand.class).name(); + for(Class cmdClass: cmdClasses) { + APICommand at = cmdClass.getAnnotation(APICommand.class); + if (at == null) { + throw new CloudRuntimeException(String.format("%s is claimed as a API command, but it doesn't have @APICommand annotation", cmdClass.getName())); + } + String apiName = at.name(); if (_apiNameCmdClassMap.containsKey(apiName)) { s_logger.error("API Cmd class " + cmdClass.getName() + " has non-unique apiname" + apiName); continue; @@ -278,7 +278,7 @@ public class ApiServer implements HttpRequestHandler { try { // always trust commands from API port, user context will always be UID_SYSTEM/ACCOUNT_ID_SYSTEM - UserContext.registerContext(_systemUser.getId(), _systemAccount, null, true); + UserContext.registerContext(_accountMgr.getSystemUser().getId(), _accountMgr.getSystemAccount(), null, true); sb.insert(0, "(userId=" + User.UID_SYSTEM + " accountId=" + Account.ACCOUNT_ID_SYSTEM + " sessionId=" + null + ") "); String responseText = handleRequest(parameterMap, responseType, sb); sb.append(" 200 " + ((responseText == null) ? 0 : responseText.length())); @@ -326,6 +326,14 @@ public class ApiServer implements HttpRequestHandler { continue; } String[] value = (String[]) params.get(key); + // fail if parameter value contains ASCII control (non-printable) characters + if (value[0] != null) { + String newValue = StringUtils.stripControlCharacters(value[0]); + if ( !newValue.equals(value[0]) ) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, "Received value " + value[0] + " for parameter " + + key + " is invalid, contains illegal ASCII non-printable characters"); + } + } paramMap.put(key, value[0]); } @@ -494,22 +502,22 @@ public class ApiServer implements HttpRequestHandler { // if the command is of the listXXXCommand, we will need to also return the // the job id and status if possible // For those listXXXCommand which we have already created DB views, this step is not needed since async job is joined in their db views. - if (cmdObj instanceof BaseListCmd && !(cmdObj instanceof ListVMsCmd) && !(cmdObj instanceof ListRoutersCmd) - && !(cmdObj instanceof ListSecurityGroupsCmd) - && !(cmdObj instanceof ListTagsCmd) - && !(cmdObj instanceof ListEventsCmd) - && !(cmdObj instanceof ListVMGroupsCmd) - && !(cmdObj instanceof ListProjectsCmd) - && !(cmdObj instanceof ListProjectAccountsCmd) - && !(cmdObj instanceof ListProjectInvitationsCmd) - && !(cmdObj instanceof ListHostsCmd) - && !(cmdObj instanceof ListVolumesCmd) - && !(cmdObj instanceof ListUsersCmd) - && !(cmdObj instanceof ListAccountsCmd) - && !(cmdObj instanceof ListStoragePoolsCmd) - && !(cmdObj instanceof ListDiskOfferingsCmd) - && !(cmdObj instanceof ListServiceOfferingsCmd) - && !(cmdObj instanceof ListZonesByCmd) + if (realCmdObj instanceof BaseListCmd && !(realCmdObj instanceof ListVMsCmd) && !(realCmdObj instanceof ListRoutersCmd) + && !(realCmdObj instanceof ListSecurityGroupsCmd) + && !(realCmdObj instanceof ListTagsCmd) + && !(realCmdObj instanceof ListEventsCmd) + && !(realCmdObj instanceof ListVMGroupsCmd) + && !(realCmdObj instanceof ListProjectsCmd) + && !(realCmdObj instanceof ListProjectAccountsCmd) + && !(realCmdObj instanceof ListProjectInvitationsCmd) + && !(realCmdObj instanceof ListHostsCmd) + && !(realCmdObj instanceof ListVolumesCmd) + && !(realCmdObj instanceof ListUsersCmd) + && !(realCmdObj instanceof ListAccountsCmd) + && !(realCmdObj instanceof ListStoragePoolsCmd) + && !(realCmdObj instanceof ListDiskOfferingsCmd) + && !(realCmdObj instanceof ListServiceOfferingsCmd) + && !(realCmdObj instanceof ListZonesByCmd) ) { buildAsyncListResponse((BaseListCmd) cmdObj, caller); } @@ -590,14 +598,14 @@ public class ApiServer implements HttpRequestHandler { try{ checkCommandAvailable(user, commandName); } - catch (PermissionDeniedException ex){ - s_logger.debug("The given command:" + commandName + " does not exist or it is not available for user with id:" + userId); - throw new ServerApiException(ApiErrorCode.UNSUPPORTED_ACTION_ERROR, "The given command does not exist or it is not available for user"); - } catch (RequestLimitException ex){ s_logger.debug(ex.getMessage()); throw new ServerApiException(ApiErrorCode.API_LIMIT_EXCEED, ex.getMessage()); } + catch (PermissionDeniedException ex){ + s_logger.debug("The given command:" + commandName + " does not exist or it is not available for user with id:" + userId); + throw new ServerApiException(ApiErrorCode.UNSUPPORTED_ACTION_ERROR, "The given command does not exist or it is not available for user"); + } return true; } else { // check against every available command to see if the command exists or not @@ -999,7 +1007,7 @@ public class ApiServer implements HttpRequestHandler { } catch (Exception e) { s_logger.error("Exception responding to http request", e); - } + } return responseText; } @@ -1011,7 +1019,7 @@ public class ApiServer implements HttpRequestHandler { if (ex == null){ // this call should not be invoked with null exception return getSerializedApiError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Some internal error happened", apiCommandParams, responseType); - } + } try { if (ex.getErrorCode() == ApiErrorCode.UNSUPPORTED_ACTION_ERROR || apiCommandParams == null || apiCommandParams.isEmpty()) { responseName = "errorresponse"; @@ -1037,7 +1045,7 @@ public class ApiServer implements HttpRequestHandler { if (idList != null) { for (int i=0; i < idList.size(); i++) { apiResponse.addProxyObject(idList.get(i)); - } + } } // Also copy over the cserror code and the function/layer in which // it was thrown. @@ -1047,7 +1055,7 @@ public class ApiServer implements HttpRequestHandler { responseText = ApiResponseSerializer.toSerializedString(apiResponse, responseType); } catch (Exception e) { - s_logger.error("Exception responding to http request", e); + s_logger.error("Exception responding to http request", e); } return responseText; } diff --git a/server/src/com/cloud/api/ApiServerService.java b/server/src/com/cloud/api/ApiServerService.java new file mode 100644 index 00000000000..12d8b52fa83 --- /dev/null +++ b/server/src/com/cloud/api/ApiServerService.java @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.api; + +import java.util.Map; + +import javax.servlet.http.HttpSession; + +import org.apache.cloudstack.api.ServerApiException; +import com.cloud.exception.CloudAuthenticationException; + +public interface ApiServerService { + public boolean verifyRequest(Map requestParameters, Long userId) throws ServerApiException; + public Long fetchDomainId(String domainUUID); + public void loginUser(HttpSession session, String username, String password, Long domainId, String domainPath, String loginIpAddress ,Map requestParameters) throws CloudAuthenticationException; + public void logoutUser(long userId); + public boolean verifyUser(Long userId); + + public String getSerializedApiError(int errorCode, String errorText, Map apiCommandParams, String responseType); + public String getSerializedApiError(ServerApiException ex, Map apiCommandParams, String responseType); + + public String handleRequest(Map params, String responseType, StringBuffer auditTrailSb) throws ServerApiException; +} diff --git a/server/src/com/cloud/api/ApiServlet.java b/server/src/com/cloud/api/ApiServlet.java index e5c1db78c7b..03bfb5f2d49 100755 --- a/server/src/com/cloud/api/ApiServlet.java +++ b/server/src/com/cloud/api/ApiServlet.java @@ -50,7 +50,7 @@ public class ApiServlet extends HttpServlet { public static final Logger s_logger = Logger.getLogger(ApiServlet.class.getName()); private static final Logger s_accessLogger = Logger.getLogger("apiserver." + ApiServer.class.getName()); - @Inject ApiServer _apiServer; + @Inject ApiServerService _apiServer; @Inject AccountService _accountMgr; public ApiServlet() { diff --git a/server/src/com/cloud/api/query/QueryManagerImpl.java b/server/src/com/cloud/api/query/QueryManagerImpl.java index 51312a60eb1..951d09ed185 100644 --- a/server/src/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/com/cloud/api/query/QueryManagerImpl.java @@ -397,6 +397,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { sb.and("state", sb.entity().getState(), SearchCriteria.Op.NEQ); sb.and("startId", sb.entity().getStartId(), SearchCriteria.Op.EQ); sb.and("createDate", sb.entity().getCreateDate(), SearchCriteria.Op.BETWEEN); + sb.and("archived", sb.entity().getArchived(), SearchCriteria.Op.EQ); SearchCriteria sc = sb.create(); // building ACL condition @@ -430,6 +431,8 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { sc.setParameters("createDateL", endDate); } + sc.setParameters("archived", false); + Pair, Integer> eventPair = null; // event_view will not have duplicate rows for each event, so searchAndCount should be good enough. if ((entryTime != null) && (duration != null)) { @@ -1515,7 +1518,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { // pagination _accountMgr.buildACLViewSearchBuilder(sb, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria); - sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.EQ); sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); sb.and("volumeType", sb.entity().getVolumeType(), SearchCriteria.Op.LIKE); sb.and("instanceId", sb.entity().getVmId(), SearchCriteria.Op.EQ); @@ -1552,7 +1555,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { } if (name != null) { - sc.setParameters("name", "%" + name + "%"); + sc.setParameters("name", name); } sc.setParameters("systemUse", 1); @@ -1996,7 +1999,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { } if (domainIds != null ){ - sc.setParameters("domainIdIn", domainIds); + sc.setParameters("domainIdIn", domainIds.toArray()); } if (includePublicOfferings){ @@ -2102,7 +2105,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { domainRecord = _domainDao.findById(domainRecord.getParent()); domainIds.add(domainRecord.getId()); } - sc.addAnd("domainId", SearchCriteria.Op.IN, domainIds); + sc.addAnd("domainId", SearchCriteria.Op.IN, domainIds.toArray()); // include also public offering if no keyword, name and id specified if ( keyword == null && name == null && id == null ){ @@ -2190,12 +2193,15 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { Long domainId = cmd.getDomainId(); Long id = cmd.getId(); String keyword = cmd.getKeyword(); + String name = cmd.getName(); Filter searchFilter = new Filter(DataCenterJoinVO.class, null, false, cmd.getStartIndex(), cmd.getPageSizeVal()); SearchCriteria sc = _dcJoinDao.createSearchCriteria(); if (id != null) { sc.addAnd("id", SearchCriteria.Op.EQ, id); + } else if (name != null) { + sc.addAnd("name", SearchCriteria.Op.EQ, name); } else { if (keyword != null) { SearchCriteria ssc = _dcJoinDao.createSearchCriteria(); @@ -2232,7 +2238,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { } // domainId == null (public zones) or domainId IN [all domain id up to root domain] SearchCriteria sdc = _dcJoinDao.createSearchCriteria(); - sdc.addOr("domainId", SearchCriteria.Op.IN, domainIds); + sdc.addOr("domainId", SearchCriteria.Op.IN, domainIds.toArray()); sdc.addOr("domainId", SearchCriteria.Op.NULL); sc.addAnd("domain", SearchCriteria.Op.SC, sdc); @@ -2262,7 +2268,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { // domainId == null (public zones) or domainId IN [all domain id up to root domain] SearchCriteria sdc = _dcJoinDao.createSearchCriteria(); - sdc.addOr("domainId", SearchCriteria.Op.IN, domainIds); + sdc.addOr("domainId", SearchCriteria.Op.IN, domainIds.toArray()); sdc.addOr("domainId", SearchCriteria.Op.NULL); sc.addAnd("domain", SearchCriteria.Op.SC, sdc); @@ -2283,7 +2289,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { return new Pair, Integer>(new ArrayList(), 0); } else{ - sc.addAnd("idIn", SearchCriteria.Op.IN, dcIds); + sc.addAnd("idIn", SearchCriteria.Op.IN, dcIds.toArray()); } } diff --git a/server/src/com/cloud/api/query/ViewResponseHelper.java b/server/src/com/cloud/api/query/ViewResponseHelper.java index 55d84bb5af4..9e612b07d1b 100644 --- a/server/src/com/cloud/api/query/ViewResponseHelper.java +++ b/server/src/com/cloud/api/query/ViewResponseHelper.java @@ -67,7 +67,6 @@ import com.cloud.user.UserContext; /** * Helper class to generate response from DB view VO objects. - * @author minc * */ public class ViewResponseHelper { diff --git a/server/src/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java index 667d8553eb1..4c8b545f343 100644 --- a/server/src/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java @@ -70,6 +70,8 @@ public class DataCenterJoinDaoImpl extends GenericDaoBase 0) { + TrafficType ty = vr.getTrafficType(); + if (ty != null) { + // legacy code, public/control/guest nic info is kept in + // nics response object + if (ty == TrafficType.Public) { + vrData.setPublicIp(vr.getIpAddress()); + vrData.setPublicMacAddress(vr.getMacAddress()); + vrData.setPublicNetmask(vr.getNetmask()); + vrData.setGateway(vr.getGateway()); + vrData.setPublicNetworkId(vr.getNetworkUuid()); + } else if (ty == TrafficType.Control) { + vrData.setLinkLocalIp(vr.getIpAddress()); + vrData.setLinkLocalMacAddress(vr.getMacAddress()); + vrData.setLinkLocalNetmask(vr.getNetmask()); + vrData.setLinkLocalNetworkId(vr.getNetworkUuid()); + } else if (ty == TrafficType.Guest) { + vrData.setGuestIpAddress(vr.getIpAddress()); + vrData.setGuestMacAddress(vr.getMacAddress()); + vrData.setGuestNetmask(vr.getNetmask()); + vrData.setGuestNetworkId(vr.getNetworkUuid()); + vrData.setNetworkDomain(vr.getNetworkDomain()); + } + } NicResponse nicResponse = new NicResponse(); nicResponse.setId(vr.getNicUuid()); nicResponse.setIpaddress(vr.getIpAddress()); @@ -171,6 +200,9 @@ public class DomainRouterJoinDaoImpl extends GenericDaoBase implem nicResponse.setNetmask(userVm.getNetmask()); nicResponse.setNetworkid(userVm.getNetworkUuid()); nicResponse.setMacAddress(userVm.getMacAddress()); + nicResponse.setIp6Address(userVm.getIp6Address()); + nicResponse.setIp6Gateway(userVm.getIp6Gateway()); + nicResponse.setIp6Cidr(userVm.getIp6Cidr()); if (userVm.getBroadcastUri() != null) { nicResponse.setBroadcastUri(userVm.getBroadcastUri().toString()); } @@ -244,6 +247,9 @@ public class UserVmJoinDaoImpl extends GenericDaoBase implem nicResponse.setNetmask(uvo.getNetmask()); nicResponse.setNetworkid(uvo.getNetworkUuid()); nicResponse.setMacAddress(uvo.getMacAddress()); + nicResponse.setIp6Address(uvo.getIp6Address()); + nicResponse.setIp6Gateway(uvo.getIp6Gateway()); + nicResponse.setIp6Cidr(uvo.getIp6Cidr()); if (uvo.getBroadcastUri() != null) { nicResponse.setBroadcastUri(uvo.getBroadcastUri().toString()); } @@ -327,7 +333,15 @@ public class UserVmJoinDaoImpl extends GenericDaoBase implem } Set vmIdSet = userVmDataHash.keySet(); - return searchByIds(vmIdSet.toArray(new Long[vmIdSet.size()])); + List uvms = searchByIds(vmIdSet.toArray(new Long[vmIdSet.size()])); + // populate transit password field from UserVm + if ( uvms != null ){ + for (UserVmJoinVO uvm : uvms){ + UserVm v = userVmDataHash.get(uvm.getId()); + uvm.setPassword(v.getPassword()); + } + } + return uvms; } } diff --git a/server/src/com/cloud/api/query/vo/ControlledViewEntity.java b/server/src/com/cloud/api/query/vo/ControlledViewEntity.java index 12557504807..014abfaa3c0 100644 --- a/server/src/com/cloud/api/query/vo/ControlledViewEntity.java +++ b/server/src/com/cloud/api/query/vo/ControlledViewEntity.java @@ -24,7 +24,6 @@ import org.apache.cloudstack.api.InternalIdentity; /** * This is the interface for all VO classes representing DB views created for previous ControlledEntity. * - * @author minc * */ public interface ControlledViewEntity extends ControlledEntity, InternalIdentity, Identity { diff --git a/server/src/com/cloud/api/query/vo/DataCenterJoinVO.java b/server/src/com/cloud/api/query/vo/DataCenterJoinVO.java index 67a3f2715f0..84becf2cbe1 100644 --- a/server/src/com/cloud/api/query/vo/DataCenterJoinVO.java +++ b/server/src/com/cloud/api/query/vo/DataCenterJoinVO.java @@ -55,6 +55,12 @@ public class DataCenterJoinVO extends BaseViewVO implements InternalIdentity, Id @Column(name="dns2") private String dns2 = null; + @Column(name="ip6_dns1") + private String ip6Dns1 = null; + + @Column(name="ip6_dns2") + private String ip6Dns2 = null; + @Column(name="internal_dns1") private String internalDns1 = null; @@ -280,5 +286,21 @@ public class DataCenterJoinVO extends BaseViewVO implements InternalIdentity, Id this.domainPath = domainPath; } + public String getIp6Dns1() { + return ip6Dns1; + } + + public void setIp6Dns1(String ip6Dns1) { + this.ip6Dns1 = ip6Dns1; + } + + public String getIp6Dns2() { + return ip6Dns2; + } + + public void setIp6Dns2(String ip6Dns2) { + this.ip6Dns2 = ip6Dns2; + } + } diff --git a/server/src/com/cloud/api/query/vo/DomainRouterJoinVO.java b/server/src/com/cloud/api/query/vo/DomainRouterJoinVO.java index a9c04586a92..b04120a71b2 100644 --- a/server/src/com/cloud/api/query/vo/DomainRouterJoinVO.java +++ b/server/src/com/cloud/api/query/vo/DomainRouterJoinVO.java @@ -107,6 +107,11 @@ public class DomainRouterJoinVO extends BaseViewVO implements ControlledViewEnti @Column(name="dns2") private String dns2 = null; + @Column(name="ip6_dns1") + private String ip6Dns1 = null; + + @Column(name="ip6_dns2") + private String ip6Dns2 = null; @Column(name="host_id", updatable=true, nullable=true) private long hostId; @@ -157,6 +162,15 @@ public class DomainRouterJoinVO extends BaseViewVO implements ControlledViewEnti @Column(name = "netmask") private String netmask; + @Column(name = "ip6_address") + private String ip6Address; + + @Column(name = "ip6_gateway") + private String ip6Gateway; + + @Column(name = "ip6_cidr") + private String ip6Cidr; + @Column(name = "mac_address") private String macAddress; @@ -920,4 +934,61 @@ public class DomainRouterJoinVO extends BaseViewVO implements ControlledViewEnti } + + + public String getIp6Address() { + return ip6Address; + } + + + + + public void setIp6Address(String ip6Address) { + this.ip6Address = ip6Address; + } + + + + + public String getIp6Gateway() { + return ip6Gateway; + } + + + + + public void setIp6Gateway(String ip6Gateway) { + this.ip6Gateway = ip6Gateway; + } + + + + + public String getIp6Cidr() { + return ip6Cidr; + } + + + + + public void setIp6Cidr(String ip6Cidr) { + this.ip6Cidr = ip6Cidr; + } + + + public String getIp6Dns1() { + return ip6Dns1; + } + + public void setIp6Dns1(String ip6Dns1) { + this.ip6Dns1 = ip6Dns1; + } + + public String getIp6Dns2() { + return ip6Dns2; + } + + public void setIp6Dns2(String ip6Dns2) { + this.ip6Dns2 = ip6Dns2; + } } diff --git a/server/src/com/cloud/api/query/vo/EventJoinVO.java b/server/src/com/cloud/api/query/vo/EventJoinVO.java index f29a942a59f..12d7e5ae4d0 100644 --- a/server/src/com/cloud/api/query/vo/EventJoinVO.java +++ b/server/src/com/cloud/api/query/vo/EventJoinVO.java @@ -104,6 +104,8 @@ public class EventJoinVO extends BaseViewVO implements ControlledViewEntity { @Column(name="project_name") private String projectName; + @Column(name="archived") + private boolean archived; public EventJoinVO() { @@ -313,5 +315,12 @@ public class EventJoinVO extends BaseViewVO implements ControlledViewEntity { this.parameters = parameters; } + public boolean getArchived() { + return archived; + } + + public void setArchived(Boolean archived) { + this.archived = archived; + } } diff --git a/server/src/com/cloud/api/query/vo/HostJoinVO.java b/server/src/com/cloud/api/query/vo/HostJoinVO.java index a3796b97eba..0b8f6721325 100644 --- a/server/src/com/cloud/api/query/vo/HostJoinVO.java +++ b/server/src/com/cloud/api/query/vo/HostJoinVO.java @@ -39,7 +39,6 @@ import org.apache.cloudstack.api.InternalIdentity; /** * Host DB view. - * @author minc * */ @Entity diff --git a/server/src/com/cloud/api/query/vo/ProjectInvitationJoinVO.java b/server/src/com/cloud/api/query/vo/ProjectInvitationJoinVO.java index a60c9370530..f6e67609c79 100644 --- a/server/src/com/cloud/api/query/vo/ProjectInvitationJoinVO.java +++ b/server/src/com/cloud/api/query/vo/ProjectInvitationJoinVO.java @@ -25,8 +25,9 @@ import javax.persistence.Enumerated; import javax.persistence.Id; import javax.persistence.Table; +import com.cloud.projects.ProjectInvitation.State; import com.cloud.utils.db.GenericDao; -import com.cloud.vm.VirtualMachine.State; + @Entity @Table(name="project_invitation_view") diff --git a/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java b/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java index fd837bd5d88..89e79e5eea5 100644 --- a/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java +++ b/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.api.InternalIdentity; /** * Storage Pool DB view. - * @author minc * */ @Entity diff --git a/server/src/com/cloud/api/query/vo/UserVmJoinVO.java b/server/src/com/cloud/api/query/vo/UserVmJoinVO.java index 025db47d599..d7238224e4e 100644 --- a/server/src/com/cloud/api/query/vo/UserVmJoinVO.java +++ b/server/src/com/cloud/api/query/vo/UserVmJoinVO.java @@ -269,6 +269,15 @@ public class UserVmJoinVO extends BaseViewVO implements ControlledViewEntity { @Column(name = "netmask") private String netmask; + @Column(name = "ip6_address") + private String ip6Address; + + @Column(name = "ip6_gateway") + private String ip6Gateway; + + @Column(name = "ip6_cidr") + private String ip6Cidr; + @Column(name = "mac_address") private String macAddress; @@ -1611,4 +1620,42 @@ public class UserVmJoinVO extends BaseViewVO implements ControlledViewEntity { return toString; } + public String getIp6Address() { + return ip6Address; + } + + + + + public void setIp6Address(String ip6Address) { + this.ip6Address = ip6Address; + } + + + + + public String getIp6Gateway() { + return ip6Gateway; + } + + + + + public void setIp6Gateway(String ip6Gateway) { + this.ip6Gateway = ip6Gateway; + } + + + + + public String getIp6Cidr() { + return ip6Cidr; + } + + + + + public void setIp6Cidr(String ip6Cidr) { + this.ip6Cidr = ip6Cidr; + } } diff --git a/server/src/com/cloud/baremetal/BareMetalDiscoverer.java b/server/src/com/cloud/baremetal/BareMetalDiscoverer.java deleted file mode 100755 index e7518853ef0..00000000000 --- a/server/src/com/cloud/baremetal/BareMetalDiscoverer.java +++ /dev/null @@ -1,245 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -import java.net.InetAddress; -import java.net.URI; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; - -import com.cloud.agent.api.StartupCommand; -import com.cloud.agent.api.StartupRoutingCommand; -import org.apache.cloudstack.api.ApiConstants; -import com.cloud.dc.ClusterVO; -import com.cloud.dc.DataCenterVO; -import com.cloud.dc.dao.ClusterDao; -import com.cloud.dc.dao.DataCenterDao; -import com.cloud.exception.DiscoveryException; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.network.Network; -import com.cloud.resource.Discoverer; -import com.cloud.resource.DiscovererBase; -import com.cloud.resource.ResourceManager; -import com.cloud.resource.ResourceStateAdapter; -import com.cloud.resource.ServerResource; -import com.cloud.resource.UnableDeleteHostException; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.script.Script; -import com.cloud.utils.script.Script2; -import com.cloud.utils.script.Script2.ParamType; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine.State; -import com.cloud.vm.dao.VMInstanceDao; - -@Local(value=Discoverer.class) -public class BareMetalDiscoverer extends DiscovererBase implements Discoverer, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(BareMetalDiscoverer.class); - @Inject ClusterDao _clusterDao; - @Inject protected HostDao _hostDao; - @Inject DataCenterDao _dcDao; - @Inject VMInstanceDao _vmDao = null; - @Inject ResourceManager _resourceMgr; - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - _resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this); - return super.configure(name, params); - } - - @Override - public boolean stop() { - _resourceMgr.unregisterResourceStateAdapter(this.getClass().getSimpleName()); - return super.stop(); - } - - @Override - public Map> find(long dcId, Long podId, Long clusterId, URI url, String username, String password, List hostTags) - throws DiscoveryException { - Map> resources = new HashMap>(); - Map details = new HashMap(); - - if (!url.getScheme().equals("http")) { - String msg = "urlString is not http so we're not taking care of the discovery for this: " + url; - s_logger.debug(msg); - return null; - } - if (clusterId == null) { - String msg = "must specify cluster Id when add host"; - s_logger.debug(msg); - throw new RuntimeException(msg); - } - - if (podId == null) { - String msg = "must specify pod Id when add host"; - s_logger.debug(msg); - throw new RuntimeException(msg); - } - - ClusterVO cluster = _clusterDao.findById(clusterId); - if (cluster == null || (cluster.getHypervisorType() != HypervisorType.BareMetal)) { - if (s_logger.isInfoEnabled()) - s_logger.info("invalid cluster id or cluster is not for Bare Metal hosts"); - return null; - } - - DataCenterVO zone = _dcDao.findById(dcId); - if (zone == null) { - throw new RuntimeException("Cannot find zone " + dcId); - } - - try { - String hostname = url.getHost(); - InetAddress ia = InetAddress.getByName(hostname); - String ipmiIp = ia.getHostAddress(); - String guid = UUID.nameUUIDFromBytes(ipmiIp.getBytes()).toString(); - - String injectScript = "scripts/util/ipmi.py"; - String scriptPath = Script.findScript("", injectScript); - if (scriptPath == null) { - throw new CloudRuntimeException("Unable to find key ipmi script " - + injectScript); - } - - final Script2 command = new Script2(scriptPath, s_logger); - command.add("ping"); - command.add("hostname="+ipmiIp); - command.add("usrname="+username); - command.add("password="+password, ParamType.PASSWORD); - final String result = command.execute(); - if (result != null) { - s_logger.warn(String.format("Can not set up ipmi connection(ip=%1$s, username=%2$s, password=%3$s, args) because %4$s", ipmiIp, username, "******", result)); - return null; - } - - ClusterVO clu = _clusterDao.findById(clusterId); - if (clu.getGuid() == null) { - clu.setGuid(UUID.randomUUID().toString()); - _clusterDao.update(clusterId, clu); - } - - Map params = new HashMap(); - params.putAll(_params); - params.put("zone", Long.toString(dcId)); - params.put("pod", Long.toString(podId)); - params.put("cluster", Long.toString(clusterId)); - params.put("guid", guid); - params.put(ApiConstants.PRIVATE_IP, ipmiIp); - params.put(ApiConstants.USERNAME, username); - params.put(ApiConstants.PASSWORD, password); - BareMetalResourceBase resource = new BareMetalResourceBase(); - resource.configure("Bare Metal Agent", params); - - String memCapacity = (String)params.get(ApiConstants.MEMORY); - String cpuCapacity = (String)params.get(ApiConstants.CPU_SPEED); - String cpuNum = (String)params.get(ApiConstants.CPU_NUMBER); - String mac = (String)params.get(ApiConstants.HOST_MAC); - if (hostTags != null && hostTags.size() != 0) { - details.put("hostTag", hostTags.get(0)); - } - details.put(ApiConstants.MEMORY, memCapacity); - details.put(ApiConstants.CPU_SPEED, cpuCapacity); - details.put(ApiConstants.CPU_NUMBER, cpuNum); - details.put(ApiConstants.HOST_MAC, mac); - details.put(ApiConstants.USERNAME, username); - details.put(ApiConstants.PASSWORD, password); - details.put(ApiConstants.PRIVATE_IP, ipmiIp); - - resources.put(resource, details); - resource.start(); - - zone.setGatewayProvider(Network.Provider.ExternalGateWay.getName()); - zone.setDnsProvider(Network.Provider.ExternalDhcpServer.getName()); - zone.setDhcpProvider(Network.Provider.ExternalDhcpServer.getName()); - _dcDao.update(zone.getId(), zone); - - s_logger.debug(String.format("Discover Bare Metal host successfully(ip=%1$s, username=%2$s, password=%3%s," + - "cpuNum=%4$s, cpuCapacity-%5$s, memCapacity=%6$s)", ipmiIp, username, "******", cpuNum, cpuCapacity, memCapacity)); - return resources; - } catch (Exception e) { - s_logger.warn("Can not set up bare metal agent", e); - } - - return null; - } - - @Override - public void postDiscovery(List hosts, long msId) - throws DiscoveryException { - } - - @Override - public boolean matchHypervisor(String hypervisor) { - return hypervisor.equalsIgnoreCase(Hypervisor.HypervisorType.BareMetal.toString()); - } - - @Override - public HypervisorType getHypervisorType() { - return Hypervisor.HypervisorType.BareMetal; - } - - @Override - public HostVO createHostVOForConnectedAgent(HostVO host, StartupCommand[] cmd) { - // TODO Auto-generated method stub - return null; - } - - @Override - public HostVO createHostVOForDirectConnectAgent(HostVO host, StartupCommand[] startup, ServerResource resource, Map details, - List hostTags) { - StartupCommand firstCmd = startup[0]; - if (!(firstCmd instanceof StartupRoutingCommand)) { - return null; - } - - StartupRoutingCommand ssCmd = ((StartupRoutingCommand) firstCmd); - if (ssCmd.getHypervisorType() != HypervisorType.BareMetal) { - return null; - } - - return _resourceMgr.fillRoutingHostVO(host, ssCmd, HypervisorType.BareMetal, details, hostTags); - } - - @Override - public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, boolean isForceDeleteStorage) throws UnableDeleteHostException { - if (host.getType() != Host.Type.Routing || host.getHypervisorType() != HypervisorType.BareMetal) { - return null; - } - - List deadVms = _vmDao.listByLastHostId(host.getId()); - for (VMInstanceVO vm : deadVms) { - if (vm.getState() == State.Running || vm.getHostId() != null) { - throw new CloudRuntimeException("VM " + vm.getId() + "is still running on host " + host.getId()); - } - _vmDao.remove(vm.getId()); - } - - return new DeleteHostAnswer(true); - } - -} diff --git a/server/src/com/cloud/baremetal/BareMetalGuru.java b/server/src/com/cloud/baremetal/BareMetalGuru.java deleted file mode 100755 index 9268415b08d..00000000000 --- a/server/src/com/cloud/baremetal/BareMetalGuru.java +++ /dev/null @@ -1,59 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -import javax.ejb.Local; -import javax.inject.Inject; - -import com.cloud.agent.api.to.VirtualMachineTO; -import com.cloud.hypervisor.HypervisorGuru; -import com.cloud.hypervisor.HypervisorGuruBase; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.GuestOSVO; -import com.cloud.storage.dao.GuestOSDao; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachineProfile; - -@Local(value=HypervisorGuru.class) -public class BareMetalGuru extends HypervisorGuruBase implements HypervisorGuru { - @Inject GuestOSDao _guestOsDao; - - protected BareMetalGuru() { - super(); - } - - @Override - public HypervisorType getHypervisorType() { - return HypervisorType.BareMetal; - } - - @Override - public VirtualMachineTO implement(VirtualMachineProfile vm) { - VirtualMachineTO to = toVirtualMachineTO(vm); - - // Determine the VM's OS description - GuestOSVO guestOS = _guestOsDao.findById(vm.getVirtualMachine().getGuestOSId()); - to.setOs(guestOS.getDisplayName()); - - return to; - } - - @Override - public boolean trackVmHostChange() { - return true; - } -} diff --git a/server/src/com/cloud/baremetal/BareMetalPingServiceImpl.java b/server/src/com/cloud/baremetal/BareMetalPingServiceImpl.java deleted file mode 100755 index 3ccf29849b9..00000000000 --- a/server/src/com/cloud/baremetal/BareMetalPingServiceImpl.java +++ /dev/null @@ -1,199 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -import java.net.URI; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import javax.ejb.Local; -import javax.inject.Inject; - -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.baremetal.PreparePxeServerAnswer; -import com.cloud.agent.api.baremetal.PreparePxeServerCommand; -import com.cloud.agent.api.baremetal.prepareCreateTemplateCommand; -import com.cloud.baremetal.PxeServerManager.PxeServerType; -import com.cloud.dc.DataCenterVO; -import com.cloud.dc.HostPodVO; -import com.cloud.deploy.DeployDestination; -import com.cloud.exception.InvalidParameterValueException; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.resource.ResourceManager; -import com.cloud.resource.ServerResource; -import com.cloud.uservm.UserVm; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.vm.NicProfile; -import com.cloud.vm.NicVO; -import com.cloud.vm.ReservationContext; -import com.cloud.vm.UserVmVO; -import com.cloud.vm.VirtualMachineProfile; - -@Component -@Local(value=PxeServerService.class) -public class BareMetalPingServiceImpl extends BareMetalPxeServiceBase implements PxeServerService { - private static final Logger s_logger = Logger.getLogger(BareMetalPingServiceImpl.class); - @Inject ResourceManager _resourceMgr; - - @Override - public Host addPxeServer(PxeServerProfile profile) { - Long zoneId = profile.getZoneId(); - Long podId = profile.getPodId(); - - DataCenterVO zone = _dcDao.findById(zoneId); - if (zone == null) { - throw new InvalidParameterValueException("Could not find zone with ID: " + zoneId); - } - - List pxeServers = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.PxeServer, null, podId, zoneId); - if (pxeServers.size() != 0) { - InvalidParameterValueException ex = new InvalidParameterValueException("Already had a PXE server in Pod with specified podId and zone with specified zoneId"); - ex.addProxyObject("pod", podId, "podId"); - ex.addProxyObject(zone, zoneId, "zoneId"); - } - - - String ipAddress = profile.getUrl(); - String username = profile.getUsername(); - String password = profile.getPassword(); - - ServerResource resource = null; - Map params = new HashMap(); - params.put("type", PxeServerType.PING.getName()); - params.put("zone", Long.toString(zoneId)); - params.put("pod", podId.toString()); - params.put("ip", ipAddress); - params.put("username", username); - params.put("password", password); - if (profile.getType().equalsIgnoreCase(PxeServerType.PING.getName())) { - String storageServerIp = profile.getPingStorageServerIp(); - if (storageServerIp == null) { - throw new InvalidParameterValueException("No IP for storage server specified"); - } - String pingDir = profile.getPingDir(); - if (pingDir == null) { - throw new InvalidParameterValueException("No direcotry for storage server specified"); - } - String tftpDir = profile.getTftpDir(); - if (tftpDir == null) { - throw new InvalidParameterValueException("No TFTP directory specified"); - } - String cifsUsername = profile.getPingCifsUserName(); - if (cifsUsername == null || cifsUsername.equalsIgnoreCase("")) { - cifsUsername = "xxx"; - } - String cifsPassword = profile.getPingCifspassword(); - if (cifsPassword == null || cifsPassword.equalsIgnoreCase("")) { - cifsPassword = "xxx"; - } - String guid = getPxeServerGuid(Long.toString(zoneId) + "-" + Long.toString(podId), PxeServerType.PING.getName(), ipAddress); - - params.put("storageServer", storageServerIp); - params.put("pingDir", pingDir); - params.put("tftpDir", tftpDir); - params.put("cifsUserName", cifsUsername); - params.put("cifsPassword", cifsPassword); - params.put("guid", guid); - - resource = new PingPxeServerResource(); - try { - resource.configure("PING PXE resource", params); - } catch (Exception e) { - s_logger.debug(e); - throw new CloudRuntimeException(e.getMessage()); - } - - } else { - throw new CloudRuntimeException("Unsupport PXE server type:" + profile.getType()); - } - - Host pxeServer = _resourceMgr.addHost(zoneId, resource, Host.Type.PxeServer, params); - if (pxeServer == null) { - throw new CloudRuntimeException("Cannot add PXE server as a host"); - } - - return pxeServer; - } - - - @Override - public boolean prepare(VirtualMachineProfile profile, DeployDestination dest, ReservationContext context, Long pxeServerId) { - List nics = profile.getNics(); - if (nics.size() == 0) { - throw new CloudRuntimeException("Cannot do PXE start without nic"); - } - - NicProfile pxeNic = nics.get(0); - String mac = pxeNic.getMacAddress(); - String ip = pxeNic.getIp4Address(); - String gateway = pxeNic.getGateway(); - String mask = pxeNic.getNetmask(); - String dns = pxeNic.getDns1(); - if (dns == null) { - dns = pxeNic.getDns2(); - } - - try { - String tpl = profile.getTemplate().getUrl(); - assert tpl != null : "How can a null template get here!!!"; - PreparePxeServerCommand cmd = new PreparePxeServerCommand(ip, mac, mask, gateway, dns, tpl, - profile.getVirtualMachine().getInstanceName(), dest.getHost().getName()); - PreparePxeServerAnswer ans = (PreparePxeServerAnswer) _agentMgr.send(pxeServerId, cmd); - return ans.getResult(); - } catch (Exception e) { - s_logger.warn("Cannot prepare PXE server", e); - return false; - } - } - - - @Override - public boolean prepareCreateTemplate(Long pxeServerId, UserVm vm, String templateUrl) { - List nics = _nicDao.listByVmId(vm.getId()); - if (nics.size() != 1) { - throw new CloudRuntimeException("Wrong nic number " + nics.size() + " of vm " + vm.getId()); - } - - /* use last host id when VM stopped */ - Long hostId = (vm.getHostId() == null ? vm.getLastHostId() : vm.getHostId()); - HostVO host = _hostDao.findById(hostId); - DataCenterVO dc = _dcDao.findById(host.getDataCenterId()); - NicVO nic = nics.get(0); - String mask = nic.getNetmask(); - String mac = nic.getMacAddress(); - String ip = nic.getIp4Address(); - String gateway = nic.getGateway(); - String dns = dc.getDns1(); - if (dns == null) { - dns = dc.getDns2(); - } - - try { - prepareCreateTemplateCommand cmd = new prepareCreateTemplateCommand(ip, mac, mask, gateway, dns, templateUrl); - Answer ans = _agentMgr.send(pxeServerId, cmd); - return ans.getResult(); - } catch (Exception e) { - s_logger.debug("Prepare for creating baremetal template failed", e); - return false; - } - } -} diff --git a/server/src/com/cloud/baremetal/BareMetalPxeServiceBase.java b/server/src/com/cloud/baremetal/BareMetalPxeServiceBase.java deleted file mode 100644 index 0df06509c25..00000000000 --- a/server/src/com/cloud/baremetal/BareMetalPxeServiceBase.java +++ /dev/null @@ -1,56 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -import java.util.Map; - -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import com.cloud.agent.AgentManager; -import com.cloud.dc.dao.DataCenterDao; -import com.cloud.dc.dao.HostPodDao; -import com.cloud.deploy.DeployDestination; -import com.cloud.host.Host; -import com.cloud.host.dao.HostDao; -import com.cloud.utils.component.ManagerBase; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.vm.ReservationContext; -import com.cloud.vm.UserVmVO; -import com.cloud.vm.VirtualMachineProfile; -import com.cloud.vm.dao.NicDao; - -public abstract class BareMetalPxeServiceBase extends ManagerBase implements PxeServerService { - @Inject DataCenterDao _dcDao; - @Inject HostDao _hostDao; - @Inject AgentManager _agentMgr; - @Inject ExternalDhcpManager exDhcpMgr; - @Inject HostPodDao _podDao; - @Inject NicDao _nicDao; - - @Override - public boolean prepare(VirtualMachineProfile profile, DeployDestination dest, ReservationContext context, Long pxeServerId) { - throw new CloudRuntimeException("Dervied class should implement this method"); - } - - protected String getPxeServerGuid(String zoneId, String name, String ip) { - return zoneId + "-" + name + "-" + ip; - } - - @Override - public abstract Host addPxeServer(PxeServerProfile profile); -} diff --git a/server/src/com/cloud/baremetal/BareMetalResourceBase.java b/server/src/com/cloud/baremetal/BareMetalResourceBase.java deleted file mode 100755 index 274cf077176..00000000000 --- a/server/src/com/cloud/baremetal/BareMetalResourceBase.java +++ /dev/null @@ -1,630 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -import java.util.HashMap; -import java.util.Map; - -import javax.ejb.Local; -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; - -import com.cloud.agent.IAgentControl; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.CheckNetworkAnswer; -import com.cloud.agent.api.CheckNetworkCommand; -import com.cloud.agent.api.CheckVirtualMachineAnswer; -import com.cloud.agent.api.CheckVirtualMachineCommand; -import com.cloud.agent.api.Command; -import com.cloud.agent.api.MaintainAnswer; -import com.cloud.agent.api.MaintainCommand; -import com.cloud.agent.api.MigrateAnswer; -import com.cloud.agent.api.MigrateCommand; -import com.cloud.agent.api.PingCommand; -import com.cloud.agent.api.PingRoutingCommand; -import com.cloud.agent.api.PrepareForMigrationAnswer; -import com.cloud.agent.api.PrepareForMigrationCommand; -import com.cloud.agent.api.ReadyAnswer; -import com.cloud.agent.api.ReadyCommand; -import com.cloud.agent.api.RebootAnswer; -import com.cloud.agent.api.RebootCommand; -import com.cloud.agent.api.StartAnswer; -import com.cloud.agent.api.StartCommand; -import com.cloud.agent.api.StartupCommand; -import com.cloud.agent.api.StartupRoutingCommand; -import com.cloud.agent.api.StopAnswer; -import com.cloud.agent.api.StopCommand; -import com.cloud.agent.api.baremetal.IpmISetBootDevCommand; -import com.cloud.agent.api.baremetal.IpmISetBootDevCommand.BootDev; -import com.cloud.agent.api.baremetal.IpmiBootorResetCommand; -import com.cloud.agent.api.to.VirtualMachineTO; -import org.apache.cloudstack.api.ApiConstants; -import com.cloud.host.Host.Type; -import com.cloud.hypervisor.Hypervisor; -import com.cloud.resource.ServerResource; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.script.OutputInterpreter; -import com.cloud.utils.script.Script; -import com.cloud.utils.script.Script2; -import com.cloud.utils.script.Script2.ParamType; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachine.State; - -@Local(value = ServerResource.class) -public class BareMetalResourceBase implements ServerResource { - private static final Logger s_logger = Logger.getLogger(BareMetalResourceBase.class); - protected HashMap _vms = new HashMap(2); - protected String _name; - protected String _uuid; - protected String _zone; - protected String _pod; - protected String _cluster; - protected long _memCapacity; - protected long _cpuCapacity; - protected long _cpuNum; - protected String _mac; - protected String _username; - protected String _password; - protected String _ip; - protected IAgentControl _agentControl; - protected Script2 _pingCommand; - protected Script2 _setPxeBootCommand; - protected Script2 _setDiskBootCommand; - protected Script2 _rebootCommand; - protected Script2 _getStatusCommand; - protected Script2 _powerOnCommand; - protected Script2 _powerOffCommand; - protected Script2 _forcePowerOffCommand; - protected Script2 _bootOrRebootCommand; - protected String _vmName; - - private void changeVmState(String vmName, VirtualMachine.State state) { - synchronized (_vms) { - _vms.put(vmName, state); - } - } - - private State removeVmState(String vmName) { - synchronized (_vms) { - return _vms.remove(vmName); - } - } - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - _name = name; - _uuid = (String) params.get("guid"); - try { - _memCapacity = Long.parseLong((String)params.get(ApiConstants.MEMORY)) * 1024L * 1024L; - _cpuCapacity = Long.parseLong((String)params.get(ApiConstants.CPU_SPEED)); - _cpuNum = Long.parseLong((String)params.get(ApiConstants.CPU_NUMBER)); - } catch (NumberFormatException e) { - throw new ConfigurationException(String.format("Unable to parse number of CPU or memory capacity " + - "or cpu capacity(cpu number = %1$s memCapacity=%2$s, cpuCapacity=%3$s", (String)params.get(ApiConstants.CPU_NUMBER), - (String)params.get(ApiConstants.MEMORY), (String)params.get(ApiConstants.CPU_SPEED))); - } - - _zone = (String) params.get("zone"); - _pod = (String) params.get("pod"); - _cluster = (String) params.get("cluster"); - _ip = (String)params.get(ApiConstants.PRIVATE_IP); - _mac = (String)params.get(ApiConstants.HOST_MAC); - _username = (String)params.get(ApiConstants.USERNAME); - _password = (String)params.get(ApiConstants.PASSWORD); - _vmName = (String)params.get("vmName"); - - if (_pod == null) { - throw new ConfigurationException("Unable to get the pod"); - } - - if (_cluster == null) { - throw new ConfigurationException("Unable to get the pod"); - } - - if (_ip == null) { - throw new ConfigurationException("Unable to get the host address"); - } - - if (_mac.equalsIgnoreCase("unknown")) { - throw new ConfigurationException("Unable to get the host mac address"); - } - - if (_mac.split(":").length != 6) { - throw new ConfigurationException("Wrong MAC format(" + _mac + "). It must be in format of for example 00:11:ba:33:aa:dd which is not case sensitive"); - } - - if (_uuid == null) { - throw new ConfigurationException("Unable to get the uuid"); - } - - String injectScript = "scripts/util/ipmi.py"; - String scriptPath = Script.findScript("", injectScript); - if (scriptPath == null) { - throw new ConfigurationException("Cannot find ping script " + scriptPath); - } - _pingCommand = new Script2(scriptPath, s_logger); - _pingCommand.add("ping"); - _pingCommand.add("hostname="+_ip); - _pingCommand.add("usrname="+_username); - _pingCommand.add("password="+_password, ParamType.PASSWORD); - - _setPxeBootCommand = new Script2(scriptPath, s_logger); - _setPxeBootCommand.add("boot_dev"); - _setPxeBootCommand.add("hostname="+_ip); - _setPxeBootCommand.add("usrname="+_username); - _setPxeBootCommand.add("password="+_password, ParamType.PASSWORD); - _setPxeBootCommand.add("dev=pxe"); - - _setDiskBootCommand = new Script2(scriptPath, s_logger); - _setDiskBootCommand.add("boot_dev"); - _setDiskBootCommand.add("hostname="+_ip); - _setDiskBootCommand.add("usrname="+_username); - _setDiskBootCommand.add("password="+_password, ParamType.PASSWORD); - _setDiskBootCommand.add("dev=disk"); - - _rebootCommand = new Script2(scriptPath, s_logger); - _rebootCommand.add("reboot"); - _rebootCommand.add("hostname="+_ip); - _rebootCommand.add("usrname="+_username); - _rebootCommand.add("password="+_password, ParamType.PASSWORD); - - _getStatusCommand = new Script2(scriptPath, s_logger); - _getStatusCommand.add("ping"); - _getStatusCommand.add("hostname="+_ip); - _getStatusCommand.add("usrname="+_username); - _getStatusCommand.add("password="+_password, ParamType.PASSWORD); - - _powerOnCommand = new Script2(scriptPath, s_logger); - _powerOnCommand.add("power"); - _powerOnCommand.add("hostname="+_ip); - _powerOnCommand.add("usrname="+_username); - _powerOnCommand.add("password="+_password, ParamType.PASSWORD); - _powerOnCommand.add("action=on"); - - _powerOffCommand = new Script2(scriptPath, s_logger); - _powerOffCommand.add("power"); - _powerOffCommand.add("hostname="+_ip); - _powerOffCommand.add("usrname="+_username); - _powerOffCommand.add("password="+_password, ParamType.PASSWORD); - _powerOffCommand.add("action=soft"); - - _forcePowerOffCommand = new Script2(scriptPath, s_logger); - _forcePowerOffCommand.add("power"); - _forcePowerOffCommand.add("hostname=" + _ip); - _forcePowerOffCommand.add("usrname=" + _username); - _forcePowerOffCommand.add("password=" + _password, ParamType.PASSWORD); - _forcePowerOffCommand.add("action=off"); - - _bootOrRebootCommand = new Script2(scriptPath, s_logger); - _bootOrRebootCommand.add("boot_or_reboot"); - _bootOrRebootCommand.add("hostname="+_ip); - _bootOrRebootCommand.add("usrname="+_username); - _bootOrRebootCommand.add("password="+_password, ParamType.PASSWORD); - - return true; - } - - protected boolean doScript(Script cmd) { - return doScript(cmd, null); - } - - protected boolean doScript(Script cmd, OutputInterpreter interpreter) { - int retry = 5; - String res = null; - while (retry-- > 0) { - if (interpreter == null) { - res = cmd.execute(); - } else { - res = cmd.execute(interpreter); - } - if (res != null && res.startsWith("Error: Unable to establish LAN")) { - s_logger.warn("IPMI script timeout(" + cmd.toString() + "), will retry " + retry + " times"); - continue; - } else if (res == null) { - return true; - } else { - break; - } - } - - s_logger.warn("IPMI Scirpt failed due to " + res + "(" + cmd.toString() +")"); - return false; - } - - @Override - public boolean start() { - return true; - } - - @Override - public boolean stop() { - return true; - } - - @Override - public String getName() { - return _name; - } - - @Override - public Type getType() { - return com.cloud.host.Host.Type.Routing; - } - - protected State getVmState() { - OutputInterpreter.AllLinesParser interpreter = new OutputInterpreter.AllLinesParser(); - if (!doScript(_getStatusCommand, interpreter)) { - s_logger.warn("Cannot get power status of " + _name + ", assume VM state was not changed"); - return null; - } - if (isPowerOn(interpreter.getLines())) { - return State.Running; - } else { - return State.Stopped; - } - } - - protected Map fullSync() { - Map changes = new HashMap(); - - if (_vmName != null) { - State state = getVmState(); - if (state != null) { - changes.put(_vmName, state); - } - } - - return changes; - } - - @Override - public StartupCommand[] initialize() { - StartupRoutingCommand cmd = new StartupRoutingCommand(0, 0, 0, 0, null, Hypervisor.HypervisorType.BareMetal, - new HashMap(), null); - cmd.setDataCenter(_zone); - cmd.setPod(_pod); - cmd.setCluster(_cluster); - cmd.setGuid(_uuid); - cmd.setName(_ip); - cmd.setPrivateIpAddress(_ip); - cmd.setStorageIpAddress(_ip); - cmd.setVersion(BareMetalResourceBase.class.getPackage().getImplementationVersion()); - cmd.setCpus((int)_cpuNum); - cmd.setSpeed(_cpuCapacity); - cmd.setMemory(_memCapacity); - cmd.setPrivateMacAddress(_mac); - cmd.setPublicMacAddress(_mac); - cmd.setStateChanges(fullSync()); - return new StartupCommand[] {cmd}; - } - - private boolean ipmiPing() { - return doScript(_pingCommand); - } - - @Override - public PingCommand getCurrentStatus(long id) { - try { - if (!ipmiPing()) { - Thread.sleep(1000); - if (!ipmiPing()) { - s_logger.warn("Cannot ping ipmi nic " + _ip); - return null; - } - } - } catch (Exception e) { - s_logger.debug("Cannot ping ipmi nic " + _ip, e); - return null; - } - - return new PingRoutingCommand(getType(), id, deltaSync()); - } - - protected Answer execute(IpmISetBootDevCommand cmd) { - Script bootCmd = null; - if (cmd.getBootDev() == BootDev.disk) { - bootCmd = _setDiskBootCommand; - } else if (cmd.getBootDev() == BootDev.pxe) { - bootCmd = _setPxeBootCommand; - } else { - throw new CloudRuntimeException("Unkonwn boot dev " + cmd.getBootDev()); - } - - String bootDev = cmd.getBootDev().name(); - if (!doScript(bootCmd)) { - s_logger.warn("Set " + _ip + " boot dev to " + bootDev + "failed"); - return new Answer(cmd, false, "Set " + _ip + " boot dev to " + bootDev + "failed"); - } - - s_logger.warn("Set " + _ip + " boot dev to " + bootDev + "Success"); - return new Answer(cmd, true, "Set " + _ip + " boot dev to " + bootDev + "Success"); - } - - protected MaintainAnswer execute(MaintainCommand cmd) { - return new MaintainAnswer(cmd, false); - } - - protected PrepareForMigrationAnswer execute(PrepareForMigrationCommand cmd) { - return new PrepareForMigrationAnswer(cmd); - } - - protected MigrateAnswer execute(MigrateCommand cmd) { - if (!doScript(_powerOffCommand)) { - return new MigrateAnswer(cmd, false, "IPMI power off failed", null); - } - return new MigrateAnswer(cmd, true, "success", null); - } - - protected CheckVirtualMachineAnswer execute(final CheckVirtualMachineCommand cmd) { - return new CheckVirtualMachineAnswer(cmd, State.Stopped, null); - } - - protected Answer execute(IpmiBootorResetCommand cmd) { - if (!doScript(_bootOrRebootCommand)) { - return new Answer(cmd ,false, "IPMI boot or reboot failed"); - } - return new Answer(cmd, true, "Success"); - - } - - protected CheckNetworkAnswer execute(CheckNetworkCommand cmd) { - return new CheckNetworkAnswer(cmd, true, "Success"); - } - - @Override - public Answer executeRequest(Command cmd) { - if (cmd instanceof ReadyCommand) { - return execute((ReadyCommand)cmd); - } else if (cmd instanceof StartCommand) { - return execute((StartCommand)cmd); - } else if (cmd instanceof StopCommand) { - return execute((StopCommand)cmd); - } else if (cmd instanceof RebootCommand) { - return execute((RebootCommand)cmd); - } else if (cmd instanceof IpmISetBootDevCommand) { - return execute((IpmISetBootDevCommand)cmd); - } else if (cmd instanceof MaintainCommand) { - return execute((MaintainCommand)cmd); - } else if (cmd instanceof PrepareForMigrationCommand) { - return execute((PrepareForMigrationCommand)cmd); - } else if (cmd instanceof MigrateCommand) { - return execute((MigrateCommand)cmd); - } else if (cmd instanceof CheckVirtualMachineCommand) { - return execute((CheckVirtualMachineCommand)cmd); - } else if (cmd instanceof IpmiBootorResetCommand) { - return execute((IpmiBootorResetCommand)cmd); - } else if (cmd instanceof CheckNetworkCommand) { - return execute((CheckNetworkCommand)cmd); - } else { - return Answer.createUnsupportedCommandAnswer(cmd); - } - } - - protected boolean isPowerOn(String str) { - if (str.startsWith("Chassis Power is on")) { - return true; - } else if (str.startsWith("Chassis Power is off")) { - return false; - } else { - throw new CloudRuntimeException("Cannot parse IPMI power status " + str); - } - } - - protected RebootAnswer execute(final RebootCommand cmd) { - if (!doScript(_rebootCommand)) { - return new RebootAnswer(cmd, "IPMI reboot failed", false); - } - - return new RebootAnswer(cmd, "reboot succeeded", true); - } - - protected StopAnswer execute(final StopCommand cmd) { - boolean success = false; - int count = 0; - Script powerOff = _powerOffCommand; - - while (count < 10) { - if (!doScript(powerOff)) { - break; - } - - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - break; - } - - OutputInterpreter.AllLinesParser interpreter = new OutputInterpreter.AllLinesParser(); - if (!doScript(_getStatusCommand, interpreter)) { - s_logger.warn("Cannot get power status of " + _name + ", assume VM state was not changed"); - break; - } - - if (!isPowerOn(interpreter.getLines())) { - success = true; - break; - } else { - powerOff = _forcePowerOffCommand; - } - - count++; - } - - return success ? new StopAnswer(cmd, "Success", null, true) : new StopAnswer(cmd, "IPMI power off failed", false); - } - - protected StartAnswer execute(StartCommand cmd) { - VirtualMachineTO vm = cmd.getVirtualMachine(); - State state = State.Stopped; - - try { - changeVmState(vm.getName(), State.Starting); - - boolean pxeBoot = false; - String[] bootArgs = vm.getBootArgs().split(" "); - for (int i = 0; i < bootArgs.length; i++) { - if (bootArgs[i].equalsIgnoreCase("PxeBoot")) { - pxeBoot = true; - break; - } - } - - if (pxeBoot) { - if (!doScript(_setPxeBootCommand)) { - return new StartAnswer(cmd, "Set boot device to PXE failed"); - } - s_logger.debug("Set " + vm.getHostName() + " to PXE boot successfully"); - } else { - execute(new IpmISetBootDevCommand(BootDev.disk)); - } - - OutputInterpreter.AllLinesParser interpreter = new OutputInterpreter.AllLinesParser(); - if (!doScript(_getStatusCommand, interpreter)) { - return new StartAnswer(cmd, "Cannot get current power status of " + _name); - } - - if (isPowerOn(interpreter.getLines())) { - if (pxeBoot) { - if (!doScript(_rebootCommand)) { - return new StartAnswer(cmd, "IPMI reboot failed"); - } - s_logger.debug("IPMI reboot " + vm.getHostName() + " successfully"); - } else { - s_logger.warn("Machine " + _name + " is alreay power on, why we still get a Start command? ignore it"); - - } - } else { - if (!doScript(_powerOnCommand)) { - return new StartAnswer(cmd, "IPMI power on failed"); - } - } - - s_logger.debug("Start bare metal vm " + vm.getName() + "successfully"); - state = State.Running; - _vmName = vm.getName(); - return new StartAnswer(cmd); - } finally { - if (state != State.Stopped) { - changeVmState(vm.getName(), state); - } else { - removeVmState(vm.getName()); - } - } - } - - protected HashMap deltaSync() { - final HashMap changes = new HashMap(); - /* - * Disable sync until we find a way that only tracks status but not does action - * - * The scenario is: Baremetal will reboot host when creating template. Given most - * servers take a long time to boot up, there would be a period that mgmt server finds - * the host is stopped through fullsync. Then mgmt server updates database with marking the host as - * stopped, after that, the host comes up and full sync then indicates it's running. Because - * in database the host is already stopped, mgmt server sends out a stop command. - * As a result, creating image gets never happened. - * - if (_vmName == null) { - return null; - } - - State newState = getVmState(); - if (newState == null) { - s_logger.warn("Cannot get power state of VM " + _vmName); - return null; - } - - final State oldState = removeVmState(_vmName); - if (oldState == null) { - changeVmState(_vmName, newState); - changes.put(_vmName, newState); - } else if (oldState == State.Starting) { - if (newState == State.Running) { - changeVmState(_vmName, newState); - } else if (newState == State.Stopped) { - s_logger.debug("Ignoring vm " + _vmName + " because of a lag in starting the vm."); - } - } else if (oldState == State.Migrating) { - s_logger.warn("How can baremetal VM get into migrating state???"); - } else if (oldState == State.Stopping) { - if (newState == State.Stopped) { - changeVmState(_vmName, newState); - } else if (newState == State.Running) { - s_logger.debug("Ignoring vm " + _vmName + " because of a lag in stopping the vm. "); - } - } else if (oldState != newState) { - changeVmState(_vmName, newState); - changes.put(_vmName, newState); - } - */ - return changes; - - } - - protected ReadyAnswer execute(ReadyCommand cmd) { - // derived resource should check if the PXE server is ready - s_logger.debug("Bare metal resource " + _name + " is ready"); - return new ReadyAnswer(cmd); - } - - @Override - public void disconnected() { - - } - - @Override - public IAgentControl getAgentControl() { - return _agentControl; - } - - @Override - public void setAgentControl(IAgentControl agentControl) { - _agentControl = agentControl; - } - - @Override - public void setName(String name) { - // TODO Auto-generated method stub - - } - - @Override - public void setConfigParams(Map params) { - // TODO Auto-generated method stub - - } - - @Override - public Map getConfigParams() { - // TODO Auto-generated method stub - return null; - } - - @Override - public int getRunLevel() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public void setRunLevel(int level) { - // TODO Auto-generated method stub - - } - -} diff --git a/server/src/com/cloud/baremetal/BareMetalTemplateAdapter.java b/server/src/com/cloud/baremetal/BareMetalTemplateAdapter.java deleted file mode 100755 index 4440b7a3a10..00000000000 --- a/server/src/com/cloud/baremetal/BareMetalTemplateAdapter.java +++ /dev/null @@ -1,218 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -import java.util.Date; -import java.util.List; - -import javax.ejb.Local; -import javax.inject.Inject; - -import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; -import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; -import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - -import com.cloud.configuration.Resource.ResourceType; -import com.cloud.dc.DataCenterVO; -import com.cloud.event.EventTypes; -import com.cloud.event.UsageEventUtils; -import com.cloud.exception.ResourceAllocationException; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; -import com.cloud.resource.ResourceManager; -import com.cloud.storage.VMTemplateHostVO; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; -import com.cloud.storage.TemplateProfile; -import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.VMTemplateZoneVO; -import com.cloud.template.TemplateAdapter; -import com.cloud.template.TemplateAdapterBase; -import com.cloud.user.Account; -import com.cloud.utils.db.DB; -import com.cloud.utils.exception.CloudRuntimeException; - -@Component -@Local(value=TemplateAdapter.class) -public class BareMetalTemplateAdapter extends TemplateAdapterBase implements TemplateAdapter { - private final static Logger s_logger = Logger.getLogger(BareMetalTemplateAdapter.class); - @Inject HostDao _hostDao; - @Inject ResourceManager _resourceMgr; - - @Override - public String getName() { - return TemplateAdapterType.BareMetal.getName(); - } - - @Override - public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException { - TemplateProfile profile = super.prepare(cmd); - - if (profile.getZoneId() == null || profile.getZoneId() == -1) { - List dcs = _dcDao.listAllIncludingRemoved(); - for (DataCenterVO dc : dcs) { - List pxeServers = _resourceMgr.listAllHostsInOneZoneByType(Host.Type.PxeServer, dc.getId()); - if (pxeServers.size() == 0) { - throw new CloudRuntimeException("Please add PXE server before adding baremetal template in zone " + dc.getName()); - } - } - } else { - List pxeServers = _resourceMgr.listAllHostsInOneZoneByType(Host.Type.PxeServer, profile.getZoneId()); - if (pxeServers.size() == 0) { - throw new CloudRuntimeException("Please add PXE server before adding baremetal template in zone " + profile.getZoneId()); - } - } - - return profile; - } - - @Override - public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationException { - throw new CloudRuntimeException("Baremetal doesn't support ISO template"); - } - - private void templateCreateUsage(VMTemplateVO template, HostVO host) { - if (template.getAccountId() != Account.ACCOUNT_ID_SYSTEM) { - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_TEMPLATE_CREATE, template.getAccountId(), host.getDataCenterId(), - template.getId(), template.getName(), null, template.getSourceTemplateId(), 0L, - template.getClass().getName(), template.getUuid()); - } - } - - @Override - public VMTemplateVO create(TemplateProfile profile) { - VMTemplateVO template = persistTemplate(profile); - Long zoneId = profile.getZoneId(); - - /* There is no secondary storage vm for baremetal, we use pxe server id. - * Tempalte is not bound to pxeserver right now, and we assume the pxeserver - * cannot be removed once it was added. so we use host id of first found pxe - * server as reference in template_host_ref. - * This maybe a FIXME in future. - */ - VMTemplateHostVO vmTemplateHost = null; - if (zoneId == null || zoneId == -1) { - List dcs = _dcDao.listAllIncludingRemoved(); - for (DataCenterVO dc : dcs) { - HostVO pxe = _resourceMgr.listAllHostsInOneZoneByType(Host.Type.PxeServer, dc.getId()).get(0); - - vmTemplateHost = _tmpltHostDao.findByHostTemplate(dc.getId(), template.getId()); - if (vmTemplateHost == null) { - vmTemplateHost = new VMTemplateHostVO(pxe.getId(), template.getId(), new Date(), 100, - Status.DOWNLOADED, null, null, null, null, template.getUrl()); - _tmpltHostDao.persist(vmTemplateHost); - templateCreateUsage(template, pxe); - } - } - } else { - HostVO pxe = _resourceMgr.listAllHostsInOneZoneByType(Host.Type.PxeServer, zoneId).get(0); - vmTemplateHost = new VMTemplateHostVO(pxe.getId(), template.getId(), new Date(), 100, - Status.DOWNLOADED, null, null, null, null, template.getUrl()); - _tmpltHostDao.persist(vmTemplateHost); - templateCreateUsage(template, pxe); - } - - _resourceLimitMgr.incrementResourceCount(profile.getAccountId(), ResourceType.template); - return template; - } - - public TemplateProfile prepareDelete(DeleteIsoCmd cmd) { - throw new CloudRuntimeException("Baremetal doesn't support ISO, how the delete get here???"); - } - - @Override @DB - public boolean delete(TemplateProfile profile) { - VMTemplateVO template = (VMTemplateVO)profile.getTemplate(); - Long templateId = template.getId(); - boolean success = true; - String zoneName; - boolean isAllZone; - - if (!template.isCrossZones() && profile.getZoneId() != null) { - isAllZone = false; - zoneName = profile.getZoneId().toString(); - } else { - zoneName = "all zones"; - isAllZone = true; - } - - s_logger.debug("Attempting to mark template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); - Account account = _accountDao.findByIdIncludingRemoved(template.getAccountId()); - String eventType = EventTypes.EVENT_TEMPLATE_DELETE; - List templateHostVOs = _tmpltHostDao.listByTemplateId(templateId); - - for (VMTemplateHostVO vo : templateHostVOs) { - VMTemplateHostVO lock = null; - try { - HostVO pxeServer = _hostDao.findById(vo.getHostId()); - if (!isAllZone && pxeServer.getDataCenterId() != profile.getZoneId()) { - continue; - } - - lock = _tmpltHostDao.acquireInLockTable(vo.getId()); - if (lock == null) { - s_logger.debug("Failed to acquire lock when deleting templateHostVO with ID: " + vo.getId()); - success = false; - break; - } - - vo.setDestroyed(true); - _tmpltHostDao.update(vo.getId(), vo); - VMTemplateZoneVO templateZone = _tmpltZoneDao.findByZoneTemplate(pxeServer.getDataCenterId(), templateId); - if (templateZone != null) { - _tmpltZoneDao.remove(templateZone.getId()); - } - - UsageEventUtils.publishUsageEvent(eventType, account.getId(), pxeServer.getDataCenterId(), - templateId, null, template.getClass().getName(), template.getUuid()); - } finally { - if (lock != null) { - _tmpltHostDao.releaseFromLockTable(lock.getId()); - } - } - } - - s_logger.debug("Successfully marked template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); - - // If there are no more non-destroyed template host entries for this template, delete it - if (success && (_tmpltHostDao.listByTemplateId(templateId).size() == 0)) { - long accountId = template.getAccountId(); - - VMTemplateVO lock = _tmpltDao.acquireInLockTable(templateId); - - try { - if (lock == null) { - s_logger.debug("Failed to acquire lock when deleting template with ID: " + templateId); - success = false; - } else if (_tmpltDao.remove(templateId)) { - // Decrement the number of templates - _resourceLimitMgr.decrementResourceCount(accountId, ResourceType.template); - } - - } finally { - if (lock != null) { - _tmpltDao.releaseFromLockTable(lock.getId()); - } - } - s_logger.debug("Removed template: " + template.getName() + " because all of its template host refs were marked as destroyed."); - } - - return success; - } -} diff --git a/server/src/com/cloud/baremetal/BareMetalVmManagerImpl.java b/server/src/com/cloud/baremetal/BareMetalVmManagerImpl.java deleted file mode 100755 index 5de5ccdd059..00000000000 --- a/server/src/com/cloud/baremetal/BareMetalVmManagerImpl.java +++ /dev/null @@ -1,551 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Executors; - -import javax.annotation.PostConstruct; -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.cloudstack.api.command.user.template.CreateTemplateCmd; -import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; -import org.apache.cloudstack.api.command.user.vm.UpgradeVMCmd; -import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; -import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; -import org.apache.log4j.Logger; - -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.StopAnswer; -import com.cloud.agent.api.baremetal.IpmISetBootDevCommand; -import com.cloud.agent.api.baremetal.IpmiBootorResetCommand; -import com.cloud.agent.manager.Commands; -import org.apache.cloudstack.api.command.user.vm.StartVMCmd; - -import com.cloud.baremetal.PxeServerManager.PxeServerType; -import com.cloud.configuration.Resource.ResourceType; -import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.dc.DataCenter.NetworkType; -import com.cloud.dc.DataCenterVO; -import com.cloud.deploy.DataCenterDeployment; -import com.cloud.deploy.DeployDestination; -import com.cloud.domain.DomainVO; -import com.cloud.event.EventTypes; -import com.cloud.event.UsageEventUtils; -import com.cloud.exception.*; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.network.Network; -import com.cloud.network.Networks.TrafficType; -import com.cloud.network.dao.NetworkVO; -import com.cloud.org.Grouping; -import com.cloud.resource.ResourceManager; -import com.cloud.service.ServiceOfferingVO; -import com.cloud.storage.Storage; -import com.cloud.storage.Storage.TemplateType; -import com.cloud.storage.TemplateProfile; -import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.Volume; -import com.cloud.template.TemplateAdapter; -import com.cloud.template.TemplateAdapter.TemplateAdapterType; -import com.cloud.user.Account; -import com.cloud.user.AccountVO; -import com.cloud.user.SSHKeyPair; -import com.cloud.user.User; -import com.cloud.user.UserContext; -import com.cloud.user.*; -import com.cloud.uservm.UserVm; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.Pair; -import com.cloud.utils.component.AdapterBase; -import com.cloud.utils.component.Manager; -import com.cloud.utils.concurrency.NamedThreadFactory; -import com.cloud.utils.db.DB; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.fsm.StateListener; -import com.cloud.utils.net.NetUtils; -import com.cloud.vm.*; -import com.cloud.vm.VirtualMachine.Event; -import com.cloud.vm.VirtualMachine.State; -import com.cloud.vm.VirtualMachine.Type; -import com.cloud.vm.VirtualMachineProfile.Param; - -@Local(value={BareMetalVmManager.class, BareMetalVmService.class}) -public class BareMetalVmManagerImpl extends UserVmManagerImpl implements BareMetalVmManager, BareMetalVmService, - StateListener { - private static final Logger s_logger = Logger.getLogger(BareMetalVmManagerImpl.class); - @Inject ConfigurationDao _configDao; - @Inject PxeServerManager _pxeMgr; - @Inject ResourceManager _resourceMgr; - - @Inject protected List _adapters; - - @PostConstruct - public void init() { - } - - @Override - public boolean attachISOToVM(long vmId, long isoId, boolean attach) { - s_logger.warn("attachISOToVM is not supported by Bare Metal, just fake a true"); - return true; - } - - @Override - public Volume attachVolumeToVM(AttachVolumeCmd command) { - s_logger.warn("attachVolumeToVM is not supported by Bare Metal, return null"); - return null; - } - - @Override - public Volume detachVolumeFromVM(DetachVolumeCmd cmd) { - s_logger.warn("detachVolumeFromVM is not supported by Bare Metal, return null"); - return null; - } - - @Override - public UserVm upgradeVirtualMachine(UpgradeVMCmd cmd) { - s_logger.warn("upgradeVirtualMachine is not supported by Bare Metal, return null"); - return null; - } - - @Override - public VMTemplateVO createPrivateTemplateRecord(CreateTemplateCmd cmd, Account templateOwner) throws ResourceAllocationException { - /*Baremetal creates record after host rebooting for imaging, in createPrivateTemplate*/ - return null; - } - - @Override @DB - public VMTemplateVO createPrivateTemplate(CreateTemplateCmd cmd) throws CloudRuntimeException { - Long vmId = cmd.getVmId(); - if (vmId == null) { - throw new InvalidParameterValueException("VM ID is null"); - } - - UserVmVO vm = _vmDao.findById(vmId); - if (vm == null) { - throw new InvalidParameterValueException("Cannot find VM for ID " + vmId); - } - - Long hostId = (vm.getHostId() == null ? vm.getLastHostId() : vm.getHostId()); - HostVO host = _hostDao.findById(hostId); - if (host == null) { - throw new InvalidParameterValueException("Cannot find host with id " + hostId); - } - - List pxes = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.PxeServer, null, host.getPodId(), host.getDataCenterId()); - if (pxes.size() == 0) { - throw new CloudRuntimeException("Please add PXE server in Pod before taking image"); - } - - if (pxes.size() > 1) { - CloudRuntimeException ex = new CloudRuntimeException("Multiple PXE servers found in Pod " + host.getPodId() + " in Zone with specified id"); - ex.addProxyObject("data_center", host.getDataCenterId(), "zoneId"); - throw ex; - } - - HostVO pxe = pxes.get(0); - /* - * prepare() will check if current account has right for creating - * template - */ - TemplateAdapter adapter = AdapterBase.getAdapterByName(_adapters, TemplateAdapterType.BareMetal.getName()); - Long userId = UserContext.current().getCallerUserId(); - userId = (userId == null ? User.UID_SYSTEM : userId); - AccountVO account = _accountDao.findById(vm.getAccountId()); - - try { - TemplateProfile tmplProfile; - tmplProfile = adapter.prepare(false, userId, cmd.getTemplateName(), cmd.getDisplayText(), cmd.getBits(), false, false, cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(), false, - "BareMetal", cmd.getOsTypeId(), pxe.getDataCenterId(), HypervisorType.BareMetal, account.getAccountName(), account.getDomainId(), "0", true, cmd.getDetails()); - - if (!_pxeMgr.prepareCreateTemplate(_pxeMgr.getPxeServerType(pxe), pxe.getId(), vm, cmd.getUrl())) { - throw new Exception("Prepare PXE boot file for host " + hostId + " failed"); - } - - IpmISetBootDevCommand setBootDev = new IpmISetBootDevCommand(IpmISetBootDevCommand.BootDev.pxe); - Answer ans = _agentMgr.send(hostId, setBootDev); - if (!ans.getResult()) { - throw new Exception("Set host " + hostId + " to PXE boot failed"); - } - - IpmiBootorResetCommand boot = new IpmiBootorResetCommand(); - ans = _agentMgr.send(hostId, boot); - if (!ans.getResult()) { - throw new Exception("Boot/Reboot host " + hostId + " failed"); - } - - VMTemplateVO tmpl = adapter.create(tmplProfile); - s_logger.debug("Create baremetal template for host " + hostId + " successfully, template id:" + tmpl.getId()); - return tmpl; - } catch (Exception e) { - s_logger.debug("Create baremetal tempalte for host " + hostId + " failed", e); - throw new CloudRuntimeException(e.getMessage()); - } - } - - @Override - public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, - StorageUnavailableException, ResourceAllocationException { - Account caller = UserContext.current().getCaller(); - - String accountName = cmd.getAccountName(); - Long domainId = cmd.getDomainId(); - List networkList = cmd.getNetworkIds(); - String group = cmd.getGroup(); - - Account owner = _accountDao.findActiveAccount(accountName, domainId); - if (owner == null) { - throw new InvalidParameterValueException("Unable to find account " + accountName + " in domain " + domainId); - } - - _accountMgr.checkAccess(caller, null, true, owner); - long accountId = owner.getId(); - - DataCenterVO dc = _dcDao.findById(cmd.getZoneId()); - if (dc == null) { - throw new InvalidParameterValueException("Unable to find zone: " + cmd.getZoneId()); - } - - if(Grouping.AllocationState.Disabled == dc.getAllocationState() && !_accountMgr.isRootAdmin(caller.getType())){ - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: "+ cmd.getZoneId() ); - } - - if (dc.getDomainId() != null) { - DomainVO domain = _domainDao.findById(dc.getDomainId()); - if (domain == null) { - throw new CloudRuntimeException("Unable to find the domain " + dc.getDomainId() + " for the zone: " + dc); - } - _configMgr.checkZoneAccess(caller, dc); - _configMgr.checkZoneAccess(owner, dc); - } - - ServiceOfferingVO offering = _serviceOfferingDao.findById(cmd.getServiceOfferingId()); - if (offering == null || offering.getRemoved() != null) { - throw new InvalidParameterValueException("Unable to find service offering: " + cmd.getServiceOfferingId()); - } - - // check if account/domain is with in resource limits to create a new vm - resourceLimitCheck(owner, new Long(offering.getCpu()), new Long(offering.getRamSize())); - - VMTemplateVO template = _templateDao.findById(cmd.getTemplateId()); - // Make sure a valid template ID was specified - if (template == null || template.getRemoved() != null) { - throw new InvalidParameterValueException("Unable to use template " + cmd.getTemplateId()); - } - - if (template.getTemplateType().equals(TemplateType.SYSTEM)) { - throw new InvalidParameterValueException("Unable to use system template " + cmd.getTemplateId()+" to deploy a user vm"); - } - - if (template.getFormat() != Storage.ImageFormat.BAREMETAL) { - throw new InvalidParameterValueException("Unable to use non Bare Metal template" + cmd.getTemplateId() +" to deploy a bare metal vm"); - } - - String userData = cmd.getUserData(); - byte [] decodedUserData = null; - if (userData != null) { - if (userData.length() >= 2 * MAX_USER_DATA_LENGTH_BYTES) { - throw new InvalidParameterValueException("User data is too long"); - } - decodedUserData = org.apache.commons.codec.binary.Base64.decodeBase64(userData.getBytes()); - if (decodedUserData.length > MAX_USER_DATA_LENGTH_BYTES){ - throw new InvalidParameterValueException("User data is too long"); - } - if (decodedUserData.length < 1) { - throw new InvalidParameterValueException("User data is too short"); - } - } - - // Find an SSH public key corresponding to the key pair name, if one is given - String sshPublicKey = null; - if (cmd.getSSHKeyPairName() != null && !cmd.getSSHKeyPairName().equals("")) { - Account account = UserContext.current().getCaller(); - SSHKeyPair pair = _sshKeyPairDao.findByName(account.getAccountId(), account.getDomainId(), cmd.getSSHKeyPairName()); - if (pair == null) { - throw new InvalidParameterValueException("A key pair with name '" + cmd.getSSHKeyPairName() + "' was not found."); - } - - sshPublicKey = pair.getPublicKey(); - } - - _accountMgr.checkAccess(caller, null, true, template); - - DataCenterDeployment plan = new DataCenterDeployment(dc.getId()); - - s_logger.debug("Allocating in the DB for bare metal vm"); - - if (dc.getNetworkType() != NetworkType.Basic || networkList != null) { - s_logger.warn("Bare Metal only supports basical network mode now, switch to baisc network automatically"); - } - - Network defaultNetwork = _networkModel.getExclusiveGuestNetwork(dc.getId()); - if (defaultNetwork == null) { - throw new InvalidParameterValueException("Unable to find a default network to start a vm"); - } - - - networkList = new ArrayList(); - networkList.add(defaultNetwork.getId()); - - List> networks = new ArrayList>(); - for (Long networkId : networkList) { - NetworkVO network = _networkDao.findById(networkId); - if (network == null) { - throw new InvalidParameterValueException("Unable to find network by id " + networkId); - } else { - if (network.getGuestType() != Network.GuestType.Shared) { - //Check account permissions - List networkMap = _networkDao.listBy(accountId, networkId); - if (networkMap == null || networkMap.isEmpty()) { - throw new PermissionDeniedException("Unable to create a vm using network with id " + networkId + ", permission denied"); - } - } - networks.add(new Pair(network, null)); - } - } - - long id = _vmDao.getNextInSequence(Long.class, "id"); - - String hostName = cmd.getName(); - String instanceName = VirtualMachineName.getVmName(id, owner.getId(), _instance); - if (hostName == null) { - hostName = instanceName; - } else { - //verify hostName (hostname doesn't have to be unique) - if (!NetUtils.verifyDomainNameLabel(hostName, true)) { - throw new InvalidParameterValueException("Invalid name. Vm name can contain ASCII letters 'a' through 'z', the digits '0' through '9', " + - "and the hyphen ('-'), must be between 1 and 63 characters long, and can't start or end with \"-\" and can't start with digit"); - } - } - - UserVmVO vm = new UserVmVO(id, instanceName, cmd.getDisplayName(), template.getId(), HypervisorType.BareMetal, - template.getGuestOSId(), offering.getOfferHA(), false, domainId, owner.getId(), offering.getId(), userData, hostName, null); - - if (sshPublicKey != null) { - vm.setDetail("SSH.PublicKey", sshPublicKey); - } - - if (_itMgr.allocate(vm, template, offering, null, null, networks, null, plan, cmd.getHypervisor(), owner) == null) { - return null; - } - - - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully allocated DB entry for " + vm); - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully allocated DB entry for " + vm); - } - UserContext.current().setEventDetails("Vm Id: " + vm.getId()); - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VM_CREATE, accountId, cmd.getZoneId(), vm.getId(), - vm.getHostName(), offering.getId(), template.getId(), HypervisorType.BareMetal.toString(), - VirtualMachine.class.getName(), vm.getUuid()); - - resourceCountIncrement(accountId, new Long(offering.getCpu()), new Long(offering.getRamSize())); - - // Assign instance to the group - try { - if (group != null) { - boolean addToGroup = addInstanceToGroup(Long.valueOf(id), group); - if (!addToGroup) { - throw new CloudRuntimeException("Unable to assign Vm to the group " + group); - } - } - } catch (Exception ex) { - throw new CloudRuntimeException("Unable to assign Vm to the group " + group); - } - - return vm; - } - - public UserVm startVirtualMachine(DeployVMCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException, ConcurrentOperationException { - UserVmVO vm = _vmDao.findById(cmd.getInstanceId()); - - List servers = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByType(Host.Type.PxeServer, vm.getDataCenterId()); - if (servers.size() == 0) { - throw new CloudRuntimeException("Cannot find PXE server, please make sure there is one PXE server per zone"); - } - HostVO pxeServer = servers.get(0); - - VMTemplateVO template = _templateDao.findById(vm.getTemplateId()); - if (template == null || template.getFormat() != Storage.ImageFormat.BAREMETAL) { - throw new InvalidParameterValueException("Invalid template with id = " + vm.getTemplateId()); - } - - Map params = new HashMap(); - params.put(Param.PxeSeverType, _pxeMgr.getPxeServerType(pxeServer)); - - return startVirtualMachine(cmd, params); - } - - - public UserVm startVirtualMachine(StartVMCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException, ConcurrentOperationException { - UserVmVO vm = _vmDao.findById(cmd.getInstanceId()); - - VMTemplateVO template = _templateDao.findById(vm.getTemplateId()); - if (template == null || template.getFormat() != Storage.ImageFormat.BAREMETAL) { - throw new InvalidParameterValueException("Invalid template with id = " + vm.getTemplateId()); - } - - Map params = null; - if (vm.isUpdateParameters()) { - List servers = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByType(Host.Type.PxeServer, vm.getDataCenterId()); - if (servers.size() == 0) { - throw new CloudRuntimeException("Cannot find PXE server, please make sure there is one PXE server per zone"); - } - HostVO pxeServer = servers.get(0); - params = new HashMap(); - params.put(Param.PxeSeverType, _pxeMgr.getPxeServerType(pxeServer)); - } - - Pair> vmDetailsPair = super.startVirtualMachine(vm.getId(), cmd.getHostId(), params); - return vmDetailsPair.first(); - } - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - _name = name; - - Map configs = _configDao.getConfiguration("AgentManager", params); - - _instance = configs.get("instance.name"); - if (_instance == null) { - _instance = "DEFAULT"; - } - - String workers = configs.get("expunge.workers"); - int wrks = NumbersUtil.parseInt(workers, 10); - - String time = configs.get("expunge.interval"); - _expungeInterval = NumbersUtil.parseInt(time, 86400); - - time = configs.get("expunge.delay"); - _expungeDelay = NumbersUtil.parseInt(time, _expungeInterval); - - _executor = Executors.newScheduledThreadPool(wrks, new NamedThreadFactory("UserVm-Scavenger")); - - _itMgr.registerGuru(Type.UserBareMetal, this); - VirtualMachine.State.getStateMachine().registerListener(this); - - s_logger.info("User VM Manager is configured."); - - return true; - } - - @Override - public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, DeployDestination dest, ReservationContext context) { - UserVmVO vm = profile.getVirtualMachine(); - Account owner = _accountDao.findById(vm.getAccountId()); - - if (owner == null || owner.getState() == Account.State.disabled) { - throw new PermissionDeniedException("The owner of " + vm + " either does not exist or is disabled: " + vm.getAccountId()); - } - - PxeServerType pxeType = (PxeServerType) profile.getParameter(Param.PxeSeverType); - if (pxeType == null) { - s_logger.debug("This is a normal IPMI start, skip prepartion of PXE server"); - return true; - } - s_logger.debug("This is a PXE start, prepare PXE server first"); - - List servers = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.PxeServer, null, dest.getPod().getId(), dest.getDataCenter().getId()); - if (servers.size() == 0) { - throw new CloudRuntimeException("Cannot find PXE server, please make sure there is one PXE server per zone"); - } - if (servers.size() > 1) { - throw new CloudRuntimeException("Find more than one PXE server, please make sure there is only one PXE server per zone in pod " + dest.getPod().getId() + " zone " + dest.getDataCenter().getId()); - } - HostVO pxeServer = servers.get(0); - - if (!_pxeMgr.prepare(pxeType, profile, dest, context, pxeServer.getId())) { - throw new CloudRuntimeException("Pepare PXE server failed"); - } - - profile.addBootArgs("PxeBoot"); - - return true; - } - - @Override - public boolean finalizeDeployment(Commands cmds, VirtualMachineProfile profile, DeployDestination dest, ReservationContext context) { - UserVmVO userVm = profile.getVirtualMachine(); - List nics = _nicDao.listByVmId(userVm.getId()); - for (NicVO nic : nics) { - NetworkVO network = _networkDao.findById(nic.getNetworkId()); - if (network.getTrafficType() == TrafficType.Guest) { - userVm.setPrivateIpAddress(nic.getIp4Address()); - userVm.setPrivateMacAddress(nic.getMacAddress()); - } - } - _vmDao.update(userVm.getId(), userVm); - return true; - } - - @Override - public void finalizeStop(VirtualMachineProfile profile, StopAnswer answer) { - super.finalizeStop(profile, answer); - } - - @Override - public UserVm destroyVm(long vmId) throws ResourceUnavailableException, ConcurrentOperationException { - return super.destroyVm(vmId); - } - - @Override - public boolean preStateTransitionEvent(State oldState, Event event, State newState, VirtualMachine vo, boolean status, Object opaque) { - return true; - } - - @Override - public boolean postStateTransitionEvent(State oldState, Event event, State newState, VirtualMachine vo, boolean status, Object opaque) { - if (newState != State.Starting && newState != State.Error && newState != State.Expunging) { - return true; - } - - if (vo.getHypervisorType() != HypervisorType.BareMetal) { - return true; - } - - HostVO host = _hostDao.findById(vo.getHostId()); - if (host == null) { - s_logger.debug("Skip oldState " + oldState + " to " + "newState " + newState + " transimtion"); - return true; - } - _hostDao.loadDetails(host); - - if (newState == State.Starting) { - host.setDetail("vmName", vo.getInstanceName()); - s_logger.debug("Add vmName " + host.getDetail("vmName") + " to host " + host.getId() + " details"); - } else { - if (host.getDetail("vmName") != null && host.getDetail("vmName").equalsIgnoreCase(vo.getInstanceName())) { - s_logger.debug("Remove vmName " + host.getDetail("vmName") + " from host " + host.getId() + " details"); - host.getDetails().remove("vmName"); - } - } - _hostDao.saveDetails(host); - - - return true; - } -} diff --git a/server/src/com/cloud/baremetal/DhcpdResource.java b/server/src/com/cloud/baremetal/DhcpdResource.java deleted file mode 100755 index 436d27598f8..00000000000 --- a/server/src/com/cloud/baremetal/DhcpdResource.java +++ /dev/null @@ -1,133 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -import java.util.HashMap; -import java.util.Map; - -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; - -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.Command; -import com.cloud.agent.api.PingCommand; -import com.cloud.agent.api.PingRoutingCommand; -import com.cloud.agent.api.routing.DhcpEntryCommand; -import com.cloud.utils.script.Script; -import com.cloud.utils.ssh.SSHCmdHelper; -import com.cloud.vm.VirtualMachine.State; -import com.trilead.ssh2.SCPClient; - -public class DhcpdResource extends ExternalDhcpResourceBase { - private static final Logger s_logger = Logger.getLogger(DhcpdResource.class); - - public boolean configure(String name, Map params) throws ConfigurationException { - com.trilead.ssh2.Connection sshConnection = null; - try { - super.configure(name, params); - s_logger.debug(String.format("Trying to connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s)", _ip, _username, "******")); - sshConnection = SSHCmdHelper.acquireAuthorizedConnection(_ip, _username, _password); - if (sshConnection == null) { - throw new ConfigurationException( - String.format("Cannot connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); - } - - if (!SSHCmdHelper.sshExecuteCmd(sshConnection, "[ -f '/usr/sbin/dhcpd' ]")) { - throw new ConfigurationException("Cannot find dhcpd.conf /etc/dhcpd.conf at on " + _ip); - } - - SCPClient scp = new SCPClient(sshConnection); - - String editHosts = "scripts/network/exdhcp/dhcpd_edithosts.py"; - String editHostsPath = Script.findScript("", editHosts); - if (editHostsPath == null) { - throw new ConfigurationException("Can not find script dnsmasq_edithosts.sh at " + editHosts); - } - scp.put(editHostsPath, "/usr/bin/", "0755"); - - String prepareDhcpdScript = "scripts/network/exdhcp/prepare_dhcpd.sh"; - String prepareDhcpdScriptPath = Script.findScript("", prepareDhcpdScript); - if (prepareDhcpdScriptPath == null) { - throw new ConfigurationException("Can not find prepare_dhcpd.sh at " + prepareDhcpdScriptPath); - } - scp.put(prepareDhcpdScriptPath, "/usr/bin/", "0755"); - - //TODO: tooooooooooooooo ugly here!!! - String[] ips = _ip.split("\\."); - ips[3] = "0"; - StringBuffer buf = new StringBuffer(); - int i; - for (i=0;i()); - } - } - - Answer execute(DhcpEntryCommand cmd) { - com.trilead.ssh2.Connection sshConnection = null; - try { - sshConnection = SSHCmdHelper.acquireAuthorizedConnection(_ip, _username, _password); - if (sshConnection == null) { - return new Answer(cmd, false, "ssh authenticate failed"); - } - String addDhcp = String.format("python /usr/bin/dhcpd_edithosts.py %1$s %2$s %3$s %4$s %5$s %6$s", - cmd.getVmMac(), cmd.getVmIpAddress(), cmd.getVmName(), cmd.getDns(), cmd.getGateway(), cmd.getNextServer()); - if (!SSHCmdHelper.sshExecuteCmd(sshConnection, addDhcp)) { - return new Answer(cmd, false, "add Dhcp entry failed"); - } else { - return new Answer(cmd); - } - } finally { - SSHCmdHelper.releaseSshConnection(sshConnection); - } - } - - @Override - public Answer executeRequest(Command cmd) { - if (cmd instanceof DhcpEntryCommand) { - return execute((DhcpEntryCommand)cmd); - } else { - return super.executeRequest(cmd); - } - } -} diff --git a/server/src/com/cloud/baremetal/DnsmasqResource.java b/server/src/com/cloud/baremetal/DnsmasqResource.java deleted file mode 100644 index 1001dfc5c12..00000000000 --- a/server/src/com/cloud/baremetal/DnsmasqResource.java +++ /dev/null @@ -1,123 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; - -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.Command; -import com.cloud.agent.api.PingCommand; -import com.cloud.agent.api.PingRoutingCommand; -import com.cloud.agent.api.routing.DhcpEntryCommand; -import com.cloud.utils.script.Script; -import com.cloud.utils.ssh.SSHCmdHelper; -import com.cloud.vm.VirtualMachine.State; -import com.trilead.ssh2.SCPClient; - -public class DnsmasqResource extends ExternalDhcpResourceBase { - private static final Logger s_logger = Logger.getLogger(DnsmasqResource.class); - - public boolean configure(String name, Map params) throws ConfigurationException { - com.trilead.ssh2.Connection sshConnection = null; - try { - super.configure(name, params); - s_logger.debug(String.format("Trying to connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s)", _ip, _username, _password)); - sshConnection = SSHCmdHelper.acquireAuthorizedConnection(_ip, _username, _password); - if (sshConnection == null) { - throw new ConfigurationException( - String.format("Cannot connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password)); - } - - if (!SSHCmdHelper.sshExecuteCmd(sshConnection, "[ -f '/usr/sbin/dnsmasq' ]")) { - throw new ConfigurationException("Cannot find dnsmasq at /usr/sbin/dnsmasq on " + _ip); - } - - SCPClient scp = new SCPClient(sshConnection); - - String editHosts = "scripts/network/exdhcp/dnsmasq_edithosts.sh"; - String editHostsPath = Script.findScript("", editHosts); - if (editHostsPath == null) { - throw new ConfigurationException("Can not find script dnsmasq_edithosts.sh at " + editHosts); - } - scp.put(editHostsPath, "/usr/bin/", "0755"); - - String prepareDnsmasq = "scripts/network/exdhcp/prepare_dnsmasq.sh"; - String prepareDnsmasqPath = Script.findScript("", prepareDnsmasq); - if (prepareDnsmasqPath == null) { - throw new ConfigurationException("Can not find script prepare_dnsmasq.sh at " + prepareDnsmasq); - } - scp.put(prepareDnsmasqPath, "/usr/bin/", "0755"); - - String prepareCmd = String.format("sh /usr/bin/prepare_dnsmasq.sh %1$s %2$s %3$s", _gateway, _dns, _ip); - if (!SSHCmdHelper.sshExecuteCmd(sshConnection, prepareCmd)) { - throw new ConfigurationException("prepare dnsmasq at " + _ip + " failed"); - } - - s_logger.debug("Dnsmasq resource configure successfully"); - return true; - } catch (Exception e) { - s_logger.debug("Dnsmasq resorce configure failed", e); - throw new ConfigurationException(e.getMessage()); - } finally { - SSHCmdHelper.releaseSshConnection(sshConnection); - } - } - - @Override - public PingCommand getCurrentStatus(long id) { - com.trilead.ssh2.Connection sshConnection = SSHCmdHelper.acquireAuthorizedConnection(_ip, _username, _password); - if (sshConnection == null) { - return null; - } else { - SSHCmdHelper.releaseSshConnection(sshConnection); - return new PingRoutingCommand(getType(), id, new HashMap()); - } - } - - Answer execute(DhcpEntryCommand cmd) { - com.trilead.ssh2.Connection sshConnection = null; - try { - sshConnection = SSHCmdHelper.acquireAuthorizedConnection(_ip, _username, _password); - if (sshConnection == null) { - return new Answer(cmd, false, "ssh authenticate failed"); - } - String addDhcp = String.format("/usr/bin/dnsmasq_edithosts.sh %1$s %2$s %3$s", cmd.getVmMac(), cmd.getVmIpAddress(), cmd.getVmName()); - if (!SSHCmdHelper.sshExecuteCmd(sshConnection, addDhcp)) { - return new Answer(cmd, false, "add Dhcp entry failed"); - } else { - return new Answer(cmd); - } - } finally { - SSHCmdHelper.releaseSshConnection(sshConnection); - } - } - - @Override - public Answer executeRequest(Command cmd) { - if (cmd instanceof DhcpEntryCommand) { - return execute((DhcpEntryCommand)cmd); - } else { - return super.executeRequest(cmd); - } - } -} diff --git a/server/src/com/cloud/baremetal/ExternalDhcpEntryListener.java b/server/src/com/cloud/baremetal/ExternalDhcpEntryListener.java deleted file mode 100644 index d27d7972139..00000000000 --- a/server/src/com/cloud/baremetal/ExternalDhcpEntryListener.java +++ /dev/null @@ -1,44 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -public interface ExternalDhcpEntryListener { - public class DhcpEntryState { - String _name; - - public static final DhcpEntryState add = new DhcpEntryState("add"); - public static final DhcpEntryState old = new DhcpEntryState("old"); - public static final DhcpEntryState del = new DhcpEntryState("del"); - - public DhcpEntryState(String name) { - _name = name; - } - - public String getName() { - return _name; - } - } - - /** - * Notify that DHCP entry state change - * @param ip - * @param mac - * @param DHCP entry state - * @return: true means continuous listen on the entry, false cancels the listener - */ - public boolean notify(String ip, String mac, DhcpEntryState state, Object userData); -} diff --git a/server/src/com/cloud/baremetal/ExternalDhcpManager.java b/server/src/com/cloud/baremetal/ExternalDhcpManager.java deleted file mode 100644 index d256ef14267..00000000000 --- a/server/src/com/cloud/baremetal/ExternalDhcpManager.java +++ /dev/null @@ -1,54 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -import com.cloud.baremetal.ExternalDhcpEntryListener.DhcpEntryState; -import com.cloud.deploy.DeployDestination; -import com.cloud.exception.ResourceUnavailableException; -import com.cloud.host.Host; -import com.cloud.network.Network; -import com.cloud.uservm.UserVm; -import com.cloud.utils.component.Manager; -import com.cloud.vm.NicProfile; -import com.cloud.vm.ReservationContext; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachineProfile; - -public interface ExternalDhcpManager extends Manager { - public static class DhcpServerType { - private String _name; - - public static final DhcpServerType Dnsmasq = new DhcpServerType("Dnsmasq"); - public static final DhcpServerType Dhcpd = new DhcpServerType("Dhcpd"); - - public DhcpServerType(String name) { - _name = name; - } - - public String getName() { - return _name; - } - - } - - - DhcpServerResponse getApiResponse(Host dhcpServer); - - boolean addVirtualMachineIntoNetwork(Network network, NicProfile nic, VirtualMachineProfile profile, DeployDestination dest, ReservationContext context) throws ResourceUnavailableException; - - Host addDhcpServer(Long zoneId, Long podId, String type, String url, String username, String password); -} diff --git a/server/src/com/cloud/baremetal/ExternalDhcpManagerImpl.java b/server/src/com/cloud/baremetal/ExternalDhcpManagerImpl.java deleted file mode 100755 index c534df17381..00000000000 --- a/server/src/com/cloud/baremetal/ExternalDhcpManagerImpl.java +++ /dev/null @@ -1,250 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - -import com.cloud.agent.AgentManager; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.StartupCommand; -import com.cloud.agent.api.StartupExternalDhcpCommand; -import com.cloud.agent.api.routing.DhcpEntryCommand; -import com.cloud.dc.DataCenter; -import com.cloud.dc.DataCenterVO; -import com.cloud.dc.HostPodVO; -import com.cloud.dc.dao.DataCenterDao; -import com.cloud.dc.dao.HostPodDao; -import com.cloud.deploy.DeployDestination; -import com.cloud.exception.InvalidParameterValueException; -import com.cloud.exception.ResourceUnavailableException; -import com.cloud.host.Host; -import com.cloud.host.Host.Type; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.network.Network; -import com.cloud.resource.ResourceManager; -import com.cloud.resource.ResourceStateAdapter; -import com.cloud.resource.ServerResource; -import com.cloud.resource.UnableDeleteHostException; -import com.cloud.utils.component.ManagerBase; -import com.cloud.utils.db.DB; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.vm.NicProfile; -import com.cloud.vm.ReservationContext; -import com.cloud.vm.UserVmVO; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachineProfile; -import com.cloud.vm.dao.NicDao; -import com.cloud.vm.dao.UserVmDao; - -@Component -@Local(value = {ExternalDhcpManager.class}) -public class ExternalDhcpManagerImpl extends ManagerBase implements ExternalDhcpManager, ResourceStateAdapter { - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(ExternalDhcpManagerImpl.class); - @Inject DataCenterDao _dcDao; - @Inject HostDao _hostDao; - @Inject AgentManager _agentMgr; - @Inject HostPodDao _podDao; - @Inject UserVmDao _userVmDao; - @Inject ResourceManager _resourceMgr; - @Inject NicDao _nicDao; - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - _resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this); - return true; - } - - @Override - public boolean start() { - return true; - } - - @Override - public boolean stop() { - _resourceMgr.unregisterResourceStateAdapter(this.getClass().getSimpleName()); - return true; - } - - protected String getDhcpServerGuid(String zoneId, String name, String ip) { - return zoneId + "-" + name + "-" + ip; - } - - - @Override @DB - public Host addDhcpServer(Long zoneId, Long podId, String type, String url, String username, String password) { - DataCenterVO zone = _dcDao.findById(zoneId); - if (zone == null) { - throw new InvalidParameterValueException("Could not find zone with ID: " + zoneId); - } - - HostPodVO pod = _podDao.findById(podId); - if (pod == null) { - throw new InvalidParameterValueException("Could not find pod with ID: " + podId); - } - - List dhcps = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.ExternalDhcp, null, podId, zoneId); - if (dhcps.size() != 0) { - throw new InvalidParameterValueException("Already had a DHCP server in Pod: " + podId + " zone: " + zoneId); - } - - - String ipAddress = url; - String guid = getDhcpServerGuid(Long.toString(zoneId) + "-" + Long.toString(podId), "ExternalDhcp", ipAddress); - Map params = new HashMap(); - params.put("type", type); - params.put("zone", Long.toString(zoneId)); - params.put("pod", podId.toString()); - params.put("ip", ipAddress); - params.put("username", username); - params.put("password", password); - params.put("guid", guid); - params.put("pod", Long.toString(podId)); - params.put("gateway", pod.getGateway()); - String dns = zone.getDns1(); - if (dns == null) { - dns = zone.getDns2(); - } - params.put("dns", dns); - - ServerResource resource = null; - try { - if (type.equalsIgnoreCase(DhcpServerType.Dnsmasq.getName())) { - resource = new DnsmasqResource(); - resource.configure("Dnsmasq resource", params); - } else if (type.equalsIgnoreCase(DhcpServerType.Dhcpd.getName())) { - resource = new DhcpdResource(); - resource.configure("Dhcpd resource", params); - } else { - throw new CloudRuntimeException("Unsupport DHCP server " + type); - } - } catch (Exception e) { - s_logger.debug(e); - throw new CloudRuntimeException(e.getMessage()); - } - - Host dhcpServer = _resourceMgr.addHost(zoneId, resource, Host.Type.ExternalDhcp, params); - if (dhcpServer == null) { - throw new CloudRuntimeException("Cannot add external Dhcp server as a host"); - } - - Transaction txn = Transaction.currentTxn(); - txn.start(); - pod.setExternalDhcp(true); - _podDao.update(pod.getId(), pod); - txn.commit(); - return dhcpServer; - } - - @Override - public DhcpServerResponse getApiResponse(Host dhcpServer) { - DhcpServerResponse response = new DhcpServerResponse(); - response.setId(dhcpServer.getUuid()); - return response; - } - - private void prepareBareMetalDhcpEntry(NicProfile nic, DhcpEntryCommand cmd) { - Long vmId = nic.getVmId(); - UserVmVO vm = _userVmDao.findById(vmId); - if (vm == null || vm.getHypervisorType() != HypervisorType.BareMetal) { - s_logger.debug("VM " + vmId + " is not baremetal machine, skip preparing baremetal DHCP entry"); - return; - } - - List servers = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.PxeServer, null, vm.getPodIdToDeployIn(), vm.getDataCenterId()); - if (servers.size() != 1) { - throw new CloudRuntimeException("Wrong number of PXE server found in zone " + vm.getDataCenterId() - + " Pod " + vm.getPodIdToDeployIn() + ", number is " + servers.size()); - } - HostVO pxeServer = servers.get(0); - cmd.setNextServer(pxeServer.getPrivateIpAddress()); - s_logger.debug("Set next-server to " + pxeServer.getPrivateIpAddress() + " for VM " + vm.getId()); - } - - @Override - public boolean addVirtualMachineIntoNetwork(Network network, NicProfile nic, VirtualMachineProfile profile, DeployDestination dest, - ReservationContext context) throws ResourceUnavailableException { - Long zoneId = profile.getVirtualMachine().getDataCenterId(); - Long podId = profile.getVirtualMachine().getPodIdToDeployIn(); - List hosts = _resourceMgr.listAllUpAndEnabledHosts(Type.ExternalDhcp, null, podId, zoneId); - if (hosts.size() == 0) { - throw new CloudRuntimeException("No external Dhcp found in zone " + zoneId + " pod " + podId); - } - - if (hosts.size() > 1) { - throw new CloudRuntimeException("Something wrong, more than 1 external Dhcp found in zone " + zoneId + " pod " + podId); - } - - HostVO h = hosts.get(0); - String dns = nic.getDns1(); - if (dns == null) { - dns = nic.getDns2(); - } - DhcpEntryCommand dhcpCommand = new DhcpEntryCommand(nic.getMacAddress(), nic.getIp4Address(), profile.getVirtualMachine().getHostName(), null, dns, nic.getGateway(), null); - String errMsg = String.format("Set dhcp entry on external DHCP %1$s failed(ip=%2$s, mac=%3$s, vmname=%4$s)", - h.getPrivateIpAddress(), nic.getIp4Address(), nic.getMacAddress(), profile.getVirtualMachine().getHostName()); - //prepareBareMetalDhcpEntry(nic, dhcpCommand); - try { - Answer ans = _agentMgr.send(h.getId(), dhcpCommand); - if (ans.getResult()) { - s_logger.debug(String.format("Set dhcp entry on external DHCP %1$s successfully(ip=%2$s, mac=%3$s, vmname=%4$s)", - h.getPrivateIpAddress(), nic.getIp4Address(), nic.getMacAddress(), profile.getVirtualMachine().getHostName())); - return true; - } else { - s_logger.debug(errMsg + " " + ans.getDetails()); - throw new ResourceUnavailableException(errMsg, DataCenter.class, zoneId); - } - } catch (Exception e) { - s_logger.debug(errMsg, e); - throw new ResourceUnavailableException(errMsg + e.getMessage(), DataCenter.class, zoneId); - } - } - - @Override - public HostVO createHostVOForConnectedAgent(HostVO host, StartupCommand[] cmd) { - // TODO Auto-generated method stub - return null; - } - - @Override - public HostVO createHostVOForDirectConnectAgent(HostVO host, StartupCommand[] startup, ServerResource resource, Map details, - List hostTags) { - if (!(startup[0] instanceof StartupExternalDhcpCommand)) { - return null; - } - - host.setType(Host.Type.ExternalDhcp); - return host; - } - - @Override - public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, boolean isForceDeleteStorage) throws UnableDeleteHostException { - // TODO Auto-generated method stub - return null; - } -} diff --git a/server/src/com/cloud/baremetal/ExternalDhcpResourceBase.java b/server/src/com/cloud/baremetal/ExternalDhcpResourceBase.java deleted file mode 100644 index 937b4a7f30c..00000000000 --- a/server/src/com/cloud/baremetal/ExternalDhcpResourceBase.java +++ /dev/null @@ -1,198 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -import java.util.HashMap; -import java.util.Map; - -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; - -import com.cloud.agent.IAgentControl; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.Command; -import com.cloud.agent.api.PingCommand; -import com.cloud.agent.api.PingRoutingCommand; -import com.cloud.agent.api.ReadyAnswer; -import com.cloud.agent.api.ReadyCommand; -import com.cloud.agent.api.StartupCommand; -import com.cloud.agent.api.StartupExternalDhcpCommand; -import com.cloud.agent.api.StartupPxeServerCommand; -import com.cloud.host.Host.Type; -import com.cloud.resource.ServerResource; -import com.cloud.utils.script.Script; -import com.cloud.utils.ssh.SSHCmdHelper; -import com.cloud.vm.VirtualMachine.State; -import com.trilead.ssh2.SCPClient; - -public class ExternalDhcpResourceBase implements ServerResource { - private static final Logger s_logger = Logger.getLogger(ExternalDhcpResourceBase.class); - String _name; - String _guid; - String _username; - String _password; - String _ip; - String _zoneId; - String _podId; - String _gateway; - String _dns; - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - _name = name; - _guid = (String)params.get("guid"); - _ip = (String)params.get("ip"); - _username = (String)params.get("username"); - _password = (String)params.get("password"); - _zoneId = (String)params.get("zone"); - _podId = (String)params.get("pod"); - _gateway = (String)params.get("gateway"); - _dns = (String)params.get("dns"); - - if (_guid == null) { - throw new ConfigurationException("No Guid specified"); - } - - if (_zoneId == null) { - throw new ConfigurationException("No Zone specified"); - } - - if (_podId == null) { - throw new ConfigurationException("No Pod specified"); - } - - if (_ip == null) { - throw new ConfigurationException("No IP specified"); - } - - if (_username == null) { - throw new ConfigurationException("No username specified"); - } - - if (_password == null) { - throw new ConfigurationException("No password specified"); - } - - if (_gateway == null) { - throw new ConfigurationException("No gateway specified"); - } - - if (_dns == null) { - throw new ConfigurationException("No dns specified"); - } - - return true; - } - - @Override - public boolean start() { - return true; - } - - @Override - public boolean stop() { - return true; - } - - @Override - public String getName() { - return _name; - } - - @Override - public Type getType() { - return Type.ExternalDhcp; - } - - @Override - public StartupCommand[] initialize() { - StartupExternalDhcpCommand cmd = new StartupExternalDhcpCommand(); - cmd.setName(_name); - cmd.setDataCenter(_zoneId); - cmd.setPod(_podId); - cmd.setPrivateIpAddress(_ip); - cmd.setStorageIpAddress(""); - cmd.setVersion(ExternalDhcpResourceBase.class.getPackage().getImplementationVersion()); - cmd.setGuid(_guid); - return new StartupCommand[]{cmd}; - } - - @Override - public PingCommand getCurrentStatus(long id) { - //TODO: check server - return new PingRoutingCommand(getType(), id, new HashMap()); - } - - protected ReadyAnswer execute(ReadyCommand cmd) { - s_logger.debug("External DHCP resource " + _name + " is ready"); - return new ReadyAnswer(cmd); - } - - @Override - public Answer executeRequest(Command cmd) { - if (cmd instanceof ReadyCommand) { - return execute((ReadyCommand) cmd); - } else { - return Answer.createUnsupportedCommandAnswer(cmd); - } - } - - @Override - public void disconnected() { - } - - @Override - public IAgentControl getAgentControl() { - return null; - } - - @Override - public void setAgentControl(IAgentControl agentControl) { - } - - @Override - public void setName(String name) { - // TODO Auto-generated method stub - - } - - @Override - public void setConfigParams(Map params) { - // TODO Auto-generated method stub - - } - - @Override - public Map getConfigParams() { - // TODO Auto-generated method stub - return null; - } - - @Override - public int getRunLevel() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public void setRunLevel(int level) { - // TODO Auto-generated method stub - - } - -} diff --git a/server/src/com/cloud/baremetal/PingPxeServerResource.java b/server/src/com/cloud/baremetal/PingPxeServerResource.java deleted file mode 100755 index 6655fd80efd..00000000000 --- a/server/src/com/cloud/baremetal/PingPxeServerResource.java +++ /dev/null @@ -1,196 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -import java.util.HashMap; -import java.util.Map; - -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; - -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.Command; -import com.cloud.agent.api.PingCommand; -import com.cloud.agent.api.PingRoutingCommand; -import com.cloud.agent.api.baremetal.PreparePxeServerAnswer; -import com.cloud.agent.api.baremetal.PreparePxeServerCommand; -import com.cloud.agent.api.baremetal.prepareCreateTemplateCommand; -import com.cloud.utils.script.Script; -import com.cloud.utils.ssh.SSHCmdHelper; -import com.cloud.vm.VirtualMachine.State; -import com.trilead.ssh2.SCPClient; - -public class PingPxeServerResource extends PxeServerResourceBase { - private static final Logger s_logger = Logger.getLogger(PingPxeServerResource.class); - String _storageServer; - String _pingDir; - String _share; - String _dir; - String _tftpDir; - String _cifsUserName; - String _cifsPassword; - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - super.configure(name, params); - - _storageServer = (String)params.get("storageServer"); - _pingDir = (String)params.get("pingDir"); - _tftpDir = (String)params.get("tftpDir"); - _cifsUserName = (String)params.get("cifsUserName"); - _cifsPassword = (String)params.get("cifsPassword"); - - if (_storageServer == null) { - throw new ConfigurationException("No stroage server specified"); - } - - if (_tftpDir == null) { - throw new ConfigurationException("No tftp directory specified"); - } - - if (_pingDir == null) { - throw new ConfigurationException("No PING directory specified"); - } - - if (_cifsUserName == null || _cifsUserName.equalsIgnoreCase("")) { - _cifsUserName = "xxx"; - } - - if (_cifsPassword == null || _cifsPassword.equalsIgnoreCase("")) { - _cifsPassword = "xxx"; - } - - String pingDirs[]= _pingDir.split("/"); - if (pingDirs.length != 2) { - throw new ConfigurationException("PING dir should have format like myshare/direcotry, eg: windows/64bit"); - } - _share = pingDirs[0]; - _dir = pingDirs[1]; - - com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(_ip, 22); - - s_logger.debug(String.format("Trying to connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******")); - try { - sshConnection.connect(null, 60000, 60000); - if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); - throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, - "******")); - } - - String cmd = String.format("[ -f /%1$s/pxelinux.0 ] && [ -f /%2$s/kernel ] && [ -f /%3$s/initrd.gz ] ", _tftpDir, _tftpDir, _tftpDir); - if (!SSHCmdHelper.sshExecuteCmd(sshConnection, cmd)) { - throw new ConfigurationException("Miss files in TFTP directory at " + _tftpDir + " check if pxelinux.0, kernel initrd.gz are here"); - } - - SCPClient scp = new SCPClient(sshConnection); - String prepareScript = "scripts/network/ping/prepare_tftp_bootfile.py"; - String prepareScriptPath = Script.findScript("", prepareScript); - if (prepareScriptPath == null) { - throw new ConfigurationException("Can not find prepare_tftp_bootfile.py at " + prepareScriptPath); - } - scp.put(prepareScriptPath, "/usr/bin/", "0755"); - - return true; - } catch (Exception e) { - throw new ConfigurationException(e.getMessage()); - } finally { - if (sshConnection != null) { - sshConnection.close(); - } - } - } - - @Override - public PingCommand getCurrentStatus(long id) { - com.trilead.ssh2.Connection sshConnection = SSHCmdHelper.acquireAuthorizedConnection(_ip, _username, _password); - if (sshConnection == null) { - return null; - } else { - SSHCmdHelper.releaseSshConnection(sshConnection); - return new PingRoutingCommand(getType(), id, new HashMap()); - } - } - - protected PreparePxeServerAnswer execute(PreparePxeServerCommand cmd) { - com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(_ip, 22); - try { - sshConnection.connect(null, 60000, 60000); - if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); - throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, - _password)); - } - - String script = String.format("python /usr/bin/prepare_tftp_bootfile.py restore %1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s %10$s %11$s", - _tftpDir, cmd.getMac(), _storageServer, _share, _dir, cmd.getTemplate(), _cifsUserName, _cifsPassword, cmd.getIp(), cmd.getNetMask(), cmd.getGateWay()); - s_logger.debug("Prepare Ping PXE server successfully"); - if (!SSHCmdHelper.sshExecuteCmd(sshConnection, script)) { - return new PreparePxeServerAnswer(cmd, "prepare PING at " + _ip + " failed, command:" + script); - } - - return new PreparePxeServerAnswer(cmd); - } catch (Exception e){ - s_logger.debug("Prepare PING pxe server failed", e); - return new PreparePxeServerAnswer(cmd, e.getMessage()); - } finally { - if (sshConnection != null) { - sshConnection.close(); - } - } - } - - protected Answer execute(prepareCreateTemplateCommand cmd) { - com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(_ip, 22); - try { - sshConnection.connect(null, 60000, 60000); - if (!sshConnection.authenticateWithPassword(_username, _password)) { - s_logger.debug("SSH Failed to authenticate"); - throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, - _password)); - } - - String script = String.format("python /usr/bin/prepare_tftp_bootfile.py backup %1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s %10$s %11$s", - _tftpDir, cmd.getMac(), _storageServer, _share, _dir, cmd.getTemplate(), _cifsUserName, _cifsPassword, cmd.getIp(), cmd.getNetMask(), cmd.getGateWay()); - s_logger.debug("Prepare for creating template successfully"); - if (!SSHCmdHelper.sshExecuteCmd(sshConnection, script)) { - return new Answer(cmd, false, "prepare for creating template failed, command:" + script); - } - - return new Answer(cmd, true, "Success"); - } catch (Exception e){ - s_logger.debug("Prepare for creating baremetal template failed", e); - return new Answer(cmd, false, e.getMessage()); - } finally { - if (sshConnection != null) { - sshConnection.close(); - } - } - } - - @Override - public Answer executeRequest(Command cmd) { - if (cmd instanceof PreparePxeServerCommand) { - return execute((PreparePxeServerCommand) cmd); - } else if (cmd instanceof prepareCreateTemplateCommand) { - return execute((prepareCreateTemplateCommand)cmd); - } else { - return super.executeRequest(cmd); - } - } -} diff --git a/server/src/com/cloud/baremetal/PxeServerManager.java b/server/src/com/cloud/baremetal/PxeServerManager.java deleted file mode 100644 index 1d2dde7f3f8..00000000000 --- a/server/src/com/cloud/baremetal/PxeServerManager.java +++ /dev/null @@ -1,54 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -import com.cloud.deploy.DeployDestination; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.uservm.UserVm; -import com.cloud.utils.component.Manager; -import com.cloud.vm.ReservationContext; -import com.cloud.vm.UserVmVO; -import com.cloud.vm.VirtualMachineProfile; - -public interface PxeServerManager extends Manager { - public static class PxeServerType { - private String _name; - - public static final PxeServerType PING = new PxeServerType("PING"); - public static final PxeServerType DMCD = new PxeServerType("DMCD"); - - public PxeServerType(String name) { - _name = name; - } - - public String getName() { - return _name; - } - - } - - public PxeServerResponse getApiResponse(Host pxeServer); - - public boolean prepare(PxeServerType type, VirtualMachineProfile profile, DeployDestination dest, ReservationContext context, Long pxeServerId); - - Host addPxeServer(PxeServerProfile profile); - - public boolean prepareCreateTemplate(PxeServerType type, Long pxeServerId, UserVm vm, String templateUrl); - - public PxeServerType getPxeServerType(HostVO host); -} diff --git a/server/src/com/cloud/baremetal/PxeServerManagerImpl.java b/server/src/com/cloud/baremetal/PxeServerManagerImpl.java deleted file mode 100755 index f45b2757424..00000000000 --- a/server/src/com/cloud/baremetal/PxeServerManagerImpl.java +++ /dev/null @@ -1,145 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - - -import java.util.List; -import java.util.Map; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - -import com.cloud.agent.AgentManager; -import com.cloud.agent.api.StartupCommand; -import com.cloud.agent.api.StartupPxeServerCommand; -import com.cloud.baremetal.PxeServerManager.PxeServerType; -import com.cloud.dc.dao.DataCenterDao; -import com.cloud.deploy.DeployDestination; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; -import com.cloud.resource.ResourceManager; -import com.cloud.resource.ResourceStateAdapter; -import com.cloud.resource.ServerResource; -import com.cloud.resource.UnableDeleteHostException; -import com.cloud.uservm.UserVm; -import com.cloud.utils.component.AdapterBase; -import com.cloud.utils.component.ManagerBase; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.vm.ReservationContext; -import com.cloud.vm.UserVmVO; -import com.cloud.vm.VirtualMachineProfile; -import com.cloud.vm.VirtualMachineProfile.Param; - -@Component -@Local(value = {PxeServerManager.class}) -public class PxeServerManagerImpl extends ManagerBase implements PxeServerManager, ResourceStateAdapter { - private static final org.apache.log4j.Logger s_logger = Logger.getLogger(PxeServerManagerImpl.class); - @Inject DataCenterDao _dcDao; - @Inject HostDao _hostDao; - @Inject AgentManager _agentMgr; - @Inject ExternalDhcpManager exDhcpMgr; - @Inject ResourceManager _resourceMgr; - - // @com.cloud.utils.component.Inject(adapter=PxeServerService.class) - @Inject protected List _services; - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - _resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this); - return true; - } - - @Override - public boolean start() { - return true; - } - - @Override - public boolean stop() { - _resourceMgr.unregisterResourceStateAdapter(this.getClass().getSimpleName()); - return true; - } - - protected PxeServerService getServiceByType(String type) { - PxeServerService _service; - _service = AdapterBase.getAdapterByName(_services, type); - if (_service == null) { - throw new CloudRuntimeException("Cannot find PXE service for " + type); - } - return _service; - } - - - @Override - public Host addPxeServer(PxeServerProfile profile) { - return getServiceByType(profile.getType()).addPxeServer(profile); - } - - @Override - public PxeServerResponse getApiResponse(Host pxeServer) { - PxeServerResponse response = new PxeServerResponse(); - response.setId(pxeServer.getUuid()); - return response; - } - - @Override - public boolean prepare(PxeServerType type, VirtualMachineProfile profile, DeployDestination dest, ReservationContext context, Long pxeServerId) { - return getServiceByType(type.getName()).prepare(profile, dest, context, pxeServerId); - } - - @Override - public boolean prepareCreateTemplate(PxeServerType type, Long pxeServerId, UserVm vm, String templateUrl) { - return getServiceByType(type.getName()).prepareCreateTemplate(pxeServerId, vm, templateUrl); - } - - @Override - public PxeServerType getPxeServerType(HostVO host) { - if (host.getResource().equalsIgnoreCase(PingPxeServerResource.class.getName())) { - return PxeServerType.PING; - } else { - throw new CloudRuntimeException("Unkown PXE server resource " + host.getResource()); - } - } - - @Override - public HostVO createHostVOForConnectedAgent(HostVO host, StartupCommand[] cmd) { - // TODO Auto-generated method stub - return null; - } - - @Override - public HostVO createHostVOForDirectConnectAgent(HostVO host, StartupCommand[] startup, ServerResource resource, Map details, - List hostTags) { - if (!(startup[0] instanceof StartupPxeServerCommand)) { - return null; - } - - host.setType(Host.Type.PxeServer); - return host; - } - - @Override - public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, boolean isForceDeleteStorage) throws UnableDeleteHostException { - // TODO Auto-generated method stub - return null; - } -} diff --git a/server/src/com/cloud/baremetal/PxeServerProfile.java b/server/src/com/cloud/baremetal/PxeServerProfile.java deleted file mode 100644 index e289adffee8..00000000000 --- a/server/src/com/cloud/baremetal/PxeServerProfile.java +++ /dev/null @@ -1,90 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -public class PxeServerProfile { - Long zoneId; - Long podId; - String url; - String username; - String password; - String type; - String pingStorageServerIp; - String pingDir; - String tftpDir; - String pingCifsUserName; - String pingCifspassword; - - public PxeServerProfile (Long zoneId, Long podId, String url, String username, String password, String type, - String pingStorageServerIp, String pingDir, String tftpDir, String pingCifsUserName, String pingCifsPassword) { - this.zoneId = zoneId; - this.podId = podId; - this.url = url; - this.username = username; - this.password = password; - this.type = type; - this.pingStorageServerIp = pingStorageServerIp; - this.pingDir = pingDir; - this.tftpDir = tftpDir; - this.pingCifsUserName = pingCifsUserName; - this.pingCifspassword = pingCifsPassword; - } - - public Long getZoneId() { - return zoneId; - } - - public Long getPodId() { - return podId; - } - - public String getUrl() { - return url; - } - - public String getUsername() { - return username; - } - - public String getPassword() { - return password; - } - - public String getType() { - return type; - } - - public String getPingStorageServerIp() { - return pingStorageServerIp; - } - - public String getPingDir() { - return pingDir; - } - - public String getTftpDir() { - return tftpDir; - } - - public String getPingCifsUserName() { - return pingCifsUserName; - } - - public String getPingCifspassword() { - return pingCifspassword; - } -} diff --git a/server/src/com/cloud/baremetal/PxeServerResourceBase.java b/server/src/com/cloud/baremetal/PxeServerResourceBase.java deleted file mode 100644 index 4df5ea8fabf..00000000000 --- a/server/src/com/cloud/baremetal/PxeServerResourceBase.java +++ /dev/null @@ -1,185 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -import java.util.Map; - -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; - -import com.cloud.agent.IAgentControl; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.Command; -import com.cloud.agent.api.PingCommand; -import com.cloud.agent.api.ReadyAnswer; -import com.cloud.agent.api.ReadyCommand; -import com.cloud.agent.api.StartupCommand; -import com.cloud.agent.api.StartupPxeServerCommand; -import com.cloud.host.Host.Type; -import com.cloud.resource.ServerResource; - -public class PxeServerResourceBase implements ServerResource { - private static final Logger s_logger = Logger.getLogger(PxeServerResourceBase.class); - String _name; - String _guid; - String _username; - String _password; - String _ip; - String _zoneId; - String _podId; - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - _name = name; - _guid = (String)params.get("guid"); - _ip = (String)params.get("ip"); - _username = (String)params.get("username"); - _password = (String)params.get("password"); - _zoneId = (String)params.get("zone"); - _podId = (String)params.get("pod"); - - if (_guid == null) { - throw new ConfigurationException("No Guid specified"); - } - - if (_zoneId == null) { - throw new ConfigurationException("No Zone specified"); - } - - if (_podId == null) { - throw new ConfigurationException("No Pod specified"); - } - - if (_ip == null) { - throw new ConfigurationException("No IP specified"); - } - - if (_username == null) { - throw new ConfigurationException("No username specified"); - } - - if (_password == null) { - throw new ConfigurationException("No password specified"); - } - - return true; - } - - protected ReadyAnswer execute(ReadyCommand cmd) { - s_logger.debug("Pxe resource " + _name + " is ready"); - return new ReadyAnswer(cmd); - } - - @Override - public boolean start() { - return true; - } - - @Override - public boolean stop() { - return true; - } - - @Override - public String getName() { - // TODO Auto-generated method stub - return _name; - } - - @Override - public Type getType() { - return Type.PxeServer; - } - - @Override - public StartupCommand[] initialize() { - StartupPxeServerCommand cmd = new StartupPxeServerCommand(); - cmd.setName(_name); - cmd.setDataCenter(_zoneId); - cmd.setPod(_podId); - cmd.setPrivateIpAddress(_ip); - cmd.setStorageIpAddress(""); - cmd.setVersion(PxeServerResourceBase.class.getPackage().getImplementationVersion()); - cmd.setGuid(_guid); - return new StartupCommand[]{cmd}; - } - - @Override - public PingCommand getCurrentStatus(long id) { - // TODO Auto-generated method stub - return null; - } - - @Override - public void disconnected() { - // TODO Auto-generated method stub - - } - - @Override - public IAgentControl getAgentControl() { - // TODO Auto-generated method stub - return null; - } - - @Override - public void setAgentControl(IAgentControl agentControl) { - // TODO Auto-generated method stub - - } - - @Override - public Answer executeRequest(Command cmd) { - if (cmd instanceof ReadyCommand) { - return execute((ReadyCommand) cmd); - } else { - return Answer.createUnsupportedCommandAnswer(cmd); - } - } - - @Override - public void setName(String name) { - // TODO Auto-generated method stub - - } - - @Override - public void setConfigParams(Map params) { - // TODO Auto-generated method stub - - } - - @Override - public Map getConfigParams() { - // TODO Auto-generated method stub - return null; - } - - @Override - public int getRunLevel() { - // TODO Auto-generated method stub - return 0; - } - - @Override - public void setRunLevel(int level) { - // TODO Auto-generated method stub - - } - -} diff --git a/server/src/com/cloud/baremetal/PxeServerService.java b/server/src/com/cloud/baremetal/PxeServerService.java deleted file mode 100644 index 0a9918450f5..00000000000 --- a/server/src/com/cloud/baremetal/PxeServerService.java +++ /dev/null @@ -1,35 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.baremetal; - -import com.cloud.baremetal.PxeServerManager.PxeServerType; -import com.cloud.deploy.DeployDestination; -import com.cloud.host.Host; -import com.cloud.uservm.UserVm; -import com.cloud.utils.component.Adapter; -import com.cloud.vm.ReservationContext; -import com.cloud.vm.UserVmVO; -import com.cloud.vm.VirtualMachineProfile; - -public interface PxeServerService extends Adapter { - - public Host addPxeServer(PxeServerProfile profile); - - public boolean prepare(VirtualMachineProfile profile, DeployDestination dest, ReservationContext context, Long pxeServerId); - - public boolean prepareCreateTemplate(Long pxeServerId, UserVm vm, String templateUrl); -} diff --git a/server/src/com/cloud/capacity/CapacityManager.java b/server/src/com/cloud/capacity/CapacityManager.java index fffb41f87e6..bdd9ccd155b 100755 --- a/server/src/com/cloud/capacity/CapacityManager.java +++ b/server/src/com/cloud/capacity/CapacityManager.java @@ -16,8 +16,9 @@ // under the License. package com.cloud.capacity; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + import com.cloud.host.HostVO; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.VMTemplateVO; import com.cloud.utils.component.Manager; import com.cloud.vm.VirtualMachine; @@ -38,8 +39,8 @@ public interface CapacityManager extends Manager { * @param ram required RAM * @param cpuOverprovisioningFactor factor to apply to the actual host cpu */ - boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, boolean considerReservedCapacity); - + boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, float memoryOvercommitRatio, boolean considerReservedCapacity); + void updateCapacityForHost(HostVO host); /** diff --git a/server/src/com/cloud/capacity/CapacityManagerImpl.java b/server/src/com/cloud/capacity/CapacityManagerImpl.java index 4787c7bb37f..292ef0abd5c 100755 --- a/server/src/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/com/cloud/capacity/CapacityManagerImpl.java @@ -27,6 +27,13 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.DataCenter; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.resource.ResourceState; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -57,10 +64,7 @@ import com.cloud.resource.ServerResource; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.StorageManager; -import com.cloud.storage.StoragePoolVO; -import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateStoragePoolVO; -import com.cloud.storage.VMTemplateSwiftVO; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VMTemplatePoolDao; @@ -121,26 +125,25 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, @Inject protected UserVmDao _userVMDao; + @Inject + ClusterDetailsDao _clusterDetailsDao; + @Inject + ClusterDao _clusterDao; private int _vmCapacityReleaseInterval; private ScheduledExecutorService _executor; private boolean _stopped; long _extraBytesPerVolume = 0; private float _storageOverProvisioningFactor = 1.0f; - private float _cpuOverProvisioningFactor = 1.0f; @Override public boolean configure(String name, Map params) throws ConfigurationException { _vmCapacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600); _storageOverProvisioningFactor = NumbersUtil.parseFloat(_configDao.getValue(Config.StorageOverprovisioningFactor.key()), 1.0f); - _cpuOverProvisioningFactor = NumbersUtil.parseFloat(_configDao.getValue(Config.CPUOverprovisioningFactor.key()), 1.0f); - if (_cpuOverProvisioningFactor < 1.0f) { - _cpuOverProvisioningFactor = 1.0f; - } _executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("HostCapacity-Checker")); VirtualMachine.State.getStateMachine().registerListener(this); _agentManager.registerForHostEvents(new StorageCapacityListener(_capacityDao, _storageOverProvisioningFactor), true, false, false); - _agentManager.registerForHostEvents(new ComputeCapacityListener(_capacityDao, this, _cpuOverProvisioningFactor), true, false, false); + _agentManager.registerForHostEvents(new ComputeCapacityListener(_capacityDao, this), true, false, false); return true; } @@ -165,7 +168,11 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, ServiceOfferingVO svo = _offeringsDao.findById(vm.getServiceOfferingId()); CapacityVO capacityCpu = _capacityDao.findByHostIdType(hostId, CapacityVO.CAPACITY_TYPE_CPU); CapacityVO capacityMemory = _capacityDao.findByHostIdType(hostId, CapacityVO.CAPACITY_TYPE_MEMORY); - + Long clusterId=null; + if (hostId != null) { + HostVO host = _hostDao.findById(hostId); + clusterId= host.getClusterId(); + } if (capacityCpu == null || capacityMemory == null || svo == null) { return false; } @@ -174,9 +181,6 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, try { txn.start(); - int vmCPU = svo.getCpu() * svo.getSpeed(); - long vmMem = svo.getRamSize() * 1024L * 1024L; - capacityCpu = _capacityDao.lockRow(capacityCpu.getId(), true); capacityMemory = _capacityDao.lockRow(capacityMemory.getId(), true); @@ -185,13 +189,18 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long reservedCpu = capacityCpu.getReservedCapacity(); long reservedMem = capacityMemory.getReservedCapacity(); long actualTotalCpu = capacityCpu.getTotalCapacity(); - String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); - float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1); - long totalCpu = (long) (actualTotalCpu * cpuOverprovisioningFactor); + float cpuOvercommitRatio =Float.parseFloat(_clusterDetailsDao.findDetail(clusterId,"cpuOvercommitRatio").getValue()); + float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId,"memoryOvercommitRatio").getValue()); + int vmCPU = (int) (svo.getCpu() * svo.getSpeed()); + long vmMem = (long) (svo.getRamSize() * 1024L * 1024L); + long actualTotalMem = capacityMemory.getTotalCapacity(); + long totalMem = (long) (actualTotalMem * memoryOvercommitRatio); + long totalCpu = (long) (actualTotalCpu * cpuOvercommitRatio); if (s_logger.isDebugEnabled()) { s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); + s_logger.debug("Hosts's actual total RAM: " + actualTotalMem + " and RAM after applying overprovisioning: " + totalMem); } - long totalMem = capacityMemory.getTotalCapacity(); + if (!moveFromReserved) { /* move resource from used */ @@ -243,6 +252,10 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, public void allocateVmCapacity(VirtualMachine vm, boolean fromLastHost) { long hostId = vm.getHostId(); + HostVO host = _hostDao.findById(hostId); + long clusterId = host.getClusterId(); + float cpuOvercommitRatio =Float.parseFloat(_clusterDetailsDao.findDetail(clusterId,"cpuOvercommitRatio").getValue()); + float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId,"memoryOvercommitRatio").getValue()); ServiceOfferingVO svo = _offeringsDao.findById(vm.getServiceOfferingId()); @@ -253,11 +266,9 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, return; } - int cpu = svo.getCpu() * svo.getSpeed(); - long ram = svo.getRamSize() * 1024L * 1024L; + int cpu = (int) (svo.getCpu() * svo.getSpeed()); + long ram = (long) (svo.getRamSize() * 1024L * 1024L); - String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); - float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1); Transaction txn = Transaction.currentTxn(); @@ -271,11 +282,12 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long reservedCpu = capacityCpu.getReservedCapacity(); long reservedMem = capacityMem.getReservedCapacity(); long actualTotalCpu = capacityCpu.getTotalCapacity(); - long totalCpu = (long) (actualTotalCpu * cpuOverprovisioningFactor); + long actualTotalMem = capacityMem.getTotalCapacity(); + long totalCpu = (long) (actualTotalCpu * cpuOvercommitRatio); + long totalMem = (long) (actualTotalMem * memoryOvercommitRatio); if (s_logger.isDebugEnabled()) { s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); } - long totalMem = capacityMem.getTotalCapacity(); long freeCpu = totalCpu - (reservedCpu + usedCpu); long freeMem = totalMem - (reservedMem + usedMem); @@ -327,12 +339,12 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } @Override - public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOverprovisioningFactor, boolean considerReservedCapacity) { + public boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean checkFromReservedCapacity, float cpuOvercommitRatio,float memoryOvercommitRatio, boolean considerReservedCapacity) { boolean hasCapacity = false; if (s_logger.isDebugEnabled()) { s_logger.debug("Checking if host: " + hostId + " has enough capacity for requested CPU: " + cpu + " and requested RAM: " + ram - + " , cpuOverprovisioningFactor: " + cpuOverprovisioningFactor); + + " , cpuOverprovisioningFactor: " + cpuOvercommitRatio); } CapacityVO capacityCpu = _capacityDao.findByHostIdType(hostId, CapacityVO.CAPACITY_TYPE_CPU); @@ -358,13 +370,13 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, long reservedCpu = capacityCpu.getReservedCapacity(); long reservedMem = capacityMem.getReservedCapacity(); long actualTotalCpu = capacityCpu.getTotalCapacity(); - long totalCpu = (long) (actualTotalCpu * cpuOverprovisioningFactor); + long actualTotalMem = capacityMem.getTotalCapacity(); + long totalCpu = (long) (actualTotalCpu * cpuOvercommitRatio ); + long totalMem = (long) (actualTotalMem *memoryOvercommitRatio ); if (s_logger.isDebugEnabled()) { s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); } - long totalMem = capacityMem.getTotalCapacity(); - String failureReason = ""; if (checkFromReservedCapacity) { long freeCpu = reservedCpu; @@ -499,28 +511,9 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, } // Add the size for the templateForVmCreation if its not already present - if ((templateForVmCreation != null) && !tmpinstalled) { - // If the template that was passed into this allocator is not installed in the storage pool, - // add 3 * (template size on secondary storage) to the running total - VMTemplateHostVO templateHostVO = _storageMgr.findVmTemplateHost(templateForVmCreation.getId(), pool); - - if (templateHostVO == null) { - VMTemplateSwiftVO templateSwiftVO = _swiftMgr.findByTmpltId(templateForVmCreation.getId()); - if (templateSwiftVO != null) { - long templateSize = templateSwiftVO.getPhysicalSize(); - if (templateSize == 0) { - templateSize = templateSwiftVO.getSize(); - } - totalAllocatedSize += (templateSize + _extraBytesPerVolume); - } - } else { - long templateSize = templateHostVO.getPhysicalSize(); - if ( templateSize == 0 ){ - templateSize = templateHostVO.getSize(); - } - totalAllocatedSize += (templateSize + _extraBytesPerVolume); - } - } + /*if ((templateForVmCreation != null) && !tmpinstalled) { + + }*/ return totalAllocatedSize; } @@ -716,10 +709,12 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, capacityCPU.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId()); capacityCPU.addAnd("capacityType", SearchCriteria.Op.EQ, CapacityVO.CAPACITY_TYPE_CPU); List capacityVOCpus = _capacityDao.search(capacitySC, null); + Float cpuovercommitratio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(),"cpuOvercommitRatio").getValue()); + Float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(),"memoryOvercommitRatio").getValue()); if (capacityVOCpus != null && !capacityVOCpus.isEmpty()) { CapacityVO CapacityVOCpu = capacityVOCpus.get(0); - long newTotalCpu = (long) (server.getCpus().longValue() * server.getSpeed().longValue() * _cpuOverProvisioningFactor); + long newTotalCpu = (long) (server.getCpus().longValue() * server.getSpeed().longValue() * cpuovercommitratio); if ((CapacityVOCpu.getTotalCapacity() <= newTotalCpu) || ((CapacityVOCpu.getUsedCapacity() + CapacityVOCpu.getReservedCapacity()) <= newTotalCpu)) { CapacityVOCpu.setTotalCapacity(newTotalCpu); @@ -734,7 +729,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, _capacityDao.update(CapacityVOCpu.getId(), CapacityVOCpu); } else { CapacityVO capacity = new CapacityVO(server.getId(), server.getDataCenterId(), server.getPodId(), server.getClusterId(), 0L, - (long) (server.getCpus().longValue() * server.getSpeed().longValue() * _cpuOverProvisioningFactor), + (long) (server.getCpus().longValue() * server.getSpeed().longValue()), CapacityVO.CAPACITY_TYPE_CPU); _capacityDao.persist(capacity); } @@ -748,7 +743,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, if (capacityVOMems != null && !capacityVOMems.isEmpty()) { CapacityVO CapacityVOMem = capacityVOMems.get(0); - long newTotalMem = server.getTotalMemory(); + long newTotalMem = (long)((server.getTotalMemory())* memoryOvercommitRatio); if (CapacityVOMem.getTotalCapacity() <= newTotalMem || (CapacityVOMem.getUsedCapacity() + CapacityVOMem.getReservedCapacity() <= newTotalMem)) { CapacityVOMem.setTotalCapacity(newTotalMem); diff --git a/server/src/com/cloud/capacity/ComputeCapacityListener.java b/server/src/com/cloud/capacity/ComputeCapacityListener.java index 8ea695ad072..16e154a80a6 100755 --- a/server/src/com/cloud/capacity/ComputeCapacityListener.java +++ b/server/src/com/cloud/capacity/ComputeCapacityListener.java @@ -42,12 +42,11 @@ public class ComputeCapacityListener implements Listener { public ComputeCapacityListener(CapacityDao _capacityDao, - CapacityManager _capacityMgr, - float _overProvisioningFactor) { + CapacityManager _capacityMgr + ) { super(); this._capacityDao = _capacityDao; this._capacityMgr = _capacityMgr; - this._cpuOverProvisioningFactor = _overProvisioningFactor; } diff --git a/server/src/com/cloud/capacity/dao/CapacityDao.java b/server/src/com/cloud/capacity/dao/CapacityDao.java index 0c0723b837e..0132f69cd50 100755 --- a/server/src/com/cloud/capacity/dao/CapacityDao.java +++ b/server/src/com/cloud/capacity/dao/CapacityDao.java @@ -26,20 +26,20 @@ import com.cloud.utils.db.GenericDao; public interface CapacityDao extends GenericDao { CapacityVO findByHostIdType(Long hostId, short capacityType); - List listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor); - List listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType, float cpuOverprovisioningFactor); + List listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone); + List listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType); boolean removeBy(Short capacityType, Long zoneId, Long podId, Long clusterId, Long hostId); List findByClusterPodZone(Long zoneId, Long podId, Long clusterId); List findNonSharedStorageForClusterPodZone(Long zoneId,Long podId, Long clusterId); - Pair, Map> orderClustersByAggregateCapacity(long id, short capacityType, boolean isZone, float cpuOverprovisioningFactor); + Pair, Map> orderClustersByAggregateCapacity(long id, short capacityType, boolean isZone); List findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId); - List listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType, float cpuOverprovisioningFactor); - Pair, Map> orderPodsByAggregateCapacity(long zoneId, short capacityType, float cpuOverprovisioningFactor); + List listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType); + Pair, Map> orderPodsByAggregateCapacity(long zoneId, short capacityType); List findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId, String resourceState); List listCapacitiesGroupedByLevelAndType(Integer capacityType, Long zoneId, Long podId, Long clusterId, int level, Long limit); void updateCapacityState(Long dcId, Long podId, Long clusterId, Long hostId, String capacityState); - List listClustersCrossingThreshold(short capacityType, Long zoneId, Float disableThreshold, long computeRequested, Float overProvFactor); -} + List listClustersCrossingThreshold(short capacityType, Long zoneId, Float disableThreshold, long computeRequested); +} diff --git a/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java b/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java index baaf39164cd..c3d98173a5c 100755 --- a/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java +++ b/server/src/com/cloud/capacity/dao/CapacityDaoImpl.java @@ -27,14 +27,14 @@ import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityVO; import com.cloud.storage.Storage; -import com.cloud.storage.StoragePoolVO; -import com.cloud.storage.dao.StoragePoolDao; import com.cloud.utils.Pair; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; @@ -55,59 +55,64 @@ public class CapacityDaoImpl extends GenericDaoBase implements private static final String ADD_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity + ? WHERE host_id = ? AND capacity_type = ?"; private static final String SUBTRACT_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity - ? WHERE host_id = ? AND capacity_type = ?"; - private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART1 = "SELECT DISTINCT capacity.cluster_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster on (cluster.id = capacity.cluster_id AND cluster.removed is NULL) WHERE "; - private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART2 = " AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ? " + - "AND cluster_id IN (SELECT distinct cluster_id FROM `cloud`.`op_host_capacity` WHERE "; - private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART3 = " AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ?) "; + private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART1 = "SELECT DISTINCT capacity.cluster_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster on (cluster.id = capacity.cluster_id AND cluster.removed is NULL) INNER JOIN `cloud`.`cluster_details` cluster_details ON (cluster.id = cluster_details.cluster_id ) WHERE "; + private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART2 = " AND capacity_type = ? AND cluster_details.name= ? AND ((total_capacity * cluster_details.value ) - used_capacity + reserved_capacity) >= ? AND capacity.cluster_id IN (SELECT distinct capacity.cluster_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id = cluster_details.cluster_id ) WHERE "; + private static final String LIST_CLUSTERSINZONE_BY_HOST_CAPACITIES_PART3 = " AND capacity_type = ? AND cluster_details.name= ? AND ((total_capacity * cluster_details.value) - used_capacity + reserved_capacity) >= ?) "; private final SearchBuilder _hostIdTypeSearch; private final SearchBuilder _hostOrPoolIdSearch; - protected GenericSearchBuilder SummedCapacitySearch; private final SearchBuilder _allFieldsSearch; - @Inject protected StoragePoolDao _storagePoolDao; + @Inject protected PrimaryDataStoreDao _storagePoolDao; - private static final String LIST_HOSTS_IN_CLUSTER_WITH_ENOUGH_CAPACITY = "SELECT a.host_id FROM (host JOIN op_host_capacity a ON host.id = a.host_id AND host.cluster_id = ? AND host.type = ? " + - "AND (a.total_capacity * ? - a.used_capacity) >= ? and a.capacity_type = 1) " + - "JOIN op_host_capacity b ON a.host_id = b.host_id AND b.total_capacity - b.used_capacity >= ? AND b.capacity_type = 0"; + private static final String LIST_HOSTS_IN_CLUSTER_WITH_ENOUGH_CAPACITY = " SELECT host_capacity.host_id FROM (`cloud`.`host` JOIN `cloud`.`op_host_capacity` host_capacity ON (host.id = host_capacity.host_id AND host.cluster_id = ?) JOIN `cloud`.`cluster_details` cluster_details ON (host_capacity.cluster_id = cluster_details.cluster_id) AND host.type = ? AND cluster_details.name='cpuOvercommitRatio' AND ((host_capacity.total_capacity *cluster_details.value ) - host_capacity.used_capacity) >= ? and host_capacity.capacity_type = '1' " + + " AND host_capacity.host_id IN (SELECT capacity.host_id FROM `cloud`.`op_host_capacity` capacity JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id= cluster_details.cluster_id) where capacity_type='0' AND cluster_details.name='memoryOvercommitRatio' AND ((total_capacity* cluster_details.value) - used_capacity ) >= ?)) "; - private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1 = "SELECT cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) FROM `cloud`.`op_host_capacity` WHERE " ; - private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2 = " AND capacity_type = ? GROUP BY cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) ASC"; + private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1= "SELECT capacity.cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity ) FROM `cloud`.`op_host_capacity` capacity WHERE "; - private static final String LIST_PODSINZONE_BY_HOST_CAPACITIES = "SELECT DISTINCT capacity.pod_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod " + - " ON (pod.id = capacity.pod_id AND pod.removed is NULL) WHERE " + - " capacity.data_center_id = ? AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ? " + - " AND pod_id IN (SELECT distinct pod_id FROM `cloud`.`op_host_capacity` WHERE " + - " capacity.data_center_id = ? AND capacity_type = ? AND ((total_capacity * ?) - used_capacity + reserved_capacity) >= ?) "; + private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2= " AND capacity_type = ? AND cluster_details.name =? GROUP BY capacity.cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) ASC"; - private static final String ORDER_PODS_BY_AGGREGATE_CAPACITY = "SELECT pod_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) FROM `cloud`.`op_host_capacity` WHERE data_center_id = ? " + - " AND capacity_type = ? GROUP BY pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * ?) ASC"; + private static final String ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART1= "SELECT capacity.cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id = cluster_details.cluster_id) WHERE "; + + private static final String ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART2= " AND capacity_type = ? AND cluster_details.name =? GROUP BY capacity.cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) ASC"; + + private static final String LIST_PODSINZONE_BY_HOST_CAPACITY_TYPE = "SELECT DISTINCT capacity.pod_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod " + + " ON (pod.id = capacity.pod_id AND pod.removed is NULL) INNER JOIN `cloud`.`cluster_details` cluster ON (capacity.cluster_id = cluster.cluster_id ) WHERE capacity.data_center_id = ? AND capacity_type = ? AND cluster_details.name= ? ((total_capacity * cluster.value ) - used_capacity + reserved_capacity) >= ? "; + + private static final String ORDER_PODS_BY_AGGREGATE_CAPACITY = " SELECT capacity.pod_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity) FROM `cloud`.`op_host_capacity` capacity WHERE data_center_id= ? AND capacity_type = ? GROUP BY capacity.pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity) ASC "; + + private static final String ORDER_PODS_BY_AGGREGATE_OVERCOMMIT_CAPACITY ="SELECT capacity.pod_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id = cluster_details.cluster_id) WHERE data_center_id=? AND capacity_type = ? AND cluster_details.name = ? GROUP BY capacity.pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) ASC"; private static final String LIST_CAPACITY_BY_RESOURCE_STATE = "SELECT capacity.data_center_id, sum(capacity.used_capacity), sum(capacity.reserved_quantity), sum(capacity.total_capacity), capacity_capacity_type "+ - "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`data_center` dc ON (dc.id = capacity.data_center_id AND dc.removed is NULL)"+ - "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod ON (pod.id = capacity.pod_id AND pod.removed is NULL)"+ - "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster ON (cluster.id = capacity.cluster_id AND cluster.removed is NULL)"+ - "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host` host ON (host.id = capacity.host_id AND host.removed is NULL)"+ - "WHERE dc.allocation_state = ? AND pod.allocation_state = ? AND cluster.allocation_state = ? AND host.resource_state = ? AND capacity_type not in (3,4) "; - + "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`data_center` dc ON (dc.id = capacity.data_center_id AND dc.removed is NULL)"+ + "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod ON (pod.id = capacity.pod_id AND pod.removed is NULL)"+ + "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster` cluster ON (cluster.id = capacity.cluster_id AND cluster.removed is NULL)"+ + "FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host` host ON (host.id = capacity.host_id AND host.removed is NULL)"+ + "WHERE dc.allocation_state = ? AND pod.allocation_state = ? AND cluster.allocation_state = ? AND host.resource_state = ? AND capacity_type not in (3,4) "; + private static final String LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity)), (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end), " + - "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+ - " capacity.capacity_type, capacity.data_center_id "+ - "FROM `cloud`.`op_host_capacity` capacity "+ - "WHERE total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled'"; + "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+ + " capacity.capacity_type, capacity.data_center_id "+ + "FROM `cloud`.`op_host_capacity` capacity "+ + "WHERE total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled'"; private static final String LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART2 = " GROUP BY data_center_id, capacity_type order by percent desc limit "; - private static final String LIST_CAPACITY_GROUP_BY_POD_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity)), (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end), " + - "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+ - " capacity.capacity_type, capacity.data_center_id, pod_id "+ - "FROM `cloud`.`op_host_capacity` capacity "+ - "WHERE total_capacity > 0 AND pod_id is not null AND capacity_state='Enabled'"; + private static final String LIST_CAPACITY_GROUP_BY_POD_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity))," + + " (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " + + "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)," + + "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / ( case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " + + "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)) percent," + + "capacity.capacity_type, capacity.data_center_id, pod_id FROM `cloud`.`op_host_capacity` capacity WHERE total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled' "; + private static final String LIST_CAPACITY_GROUP_BY_POD_TYPE_PART2 = " GROUP BY pod_id, capacity_type order by percent desc limit "; - private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity)), (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end), " + - "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`configuration` where name like 'cpu.overprovisioning.factor')) else sum(total_capacity) end)) percent,"+ - "capacity.capacity_type, capacity.data_center_id, pod_id, cluster_id "+ - "FROM `cloud`.`op_host_capacity` capacity "+ - "WHERE total_capacity > 0 AND cluster_id is not null AND capacity_state='Enabled'"; + private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART1 = "SELECT (sum(capacity.used_capacity) + sum(capacity.reserved_capacity))," + + " (case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " + + "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)," + + "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / ( case capacity_type when 1 then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id)) " + + "when '0' then (sum(total_capacity) * (select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id))else sum(total_capacity) end)) percent," + + "capacity.capacity_type, capacity.data_center_id, pod_id, cluster_id FROM `cloud`.`op_host_capacity` capacity WHERE total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled' "; + + private static final String LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART2 = " GROUP BY cluster_id, capacity_type order by percent desc limit "; private static final String UPDATE_CAPACITY_STATE = "UPDATE `cloud`.`op_host_capacity` SET capacity_state = ? WHERE "; private static final String LIST_CLUSTERS_CROSSING_THRESHOLD = "SELECT cluster_id " + @@ -139,34 +144,34 @@ public class CapacityDaoImpl extends GenericDaoBase implements _allFieldsSearch.done(); } - + @Override - public List listClustersCrossingThreshold(short capacityType, Long zoneId, Float disableThreshold, long compute_requested, Float overProvFactor){ + public List listClustersCrossingThreshold(short capacityType, Long zoneId, Float disableThreshold, long compute_requested){ - Transaction txn = Transaction.currentTxn(); - PreparedStatement pstmt = null; - List result = new ArrayList(); - StringBuilder sql = new StringBuilder(LIST_CLUSTERS_CROSSING_THRESHOLD); + Transaction txn = Transaction.currentTxn(); + PreparedStatement pstmt = null; + List result = new ArrayList(); + StringBuilder sql = new StringBuilder(LIST_CLUSTERS_CROSSING_THRESHOLD); + + + try { + pstmt = txn.prepareAutoCloseStatement(sql.toString()); + pstmt.setLong(1,compute_requested); + pstmt.setShort(2,capacityType); + pstmt.setFloat(3,disableThreshold); + pstmt.setLong(4,zoneId); - - try { - pstmt = txn.prepareAutoCloseStatement(sql.toString()); - pstmt.setLong(1, compute_requested); - pstmt.setLong(2, zoneId); - pstmt.setShort(3, capacityType); - pstmt.setFloat(4, disableThreshold*overProvFactor); - - ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - result.add(rs.getLong(1)); - } - return result; - } catch (SQLException e) { - throw new CloudRuntimeException("DB Exception on: " + sql, e); - } catch (Throwable e) { - throw new CloudRuntimeException("Caught: " + sql, e); - } - } + ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + result.add(rs.getLong(1)); + } + return result; + } catch (SQLException e) { + throw new CloudRuntimeException("DB Exception on: " + sql, e); + } catch (Throwable e) { + throw new CloudRuntimeException("Caught: " + sql, e); + } + } /*public static String preparePlaceHolders(int length) { StringBuilder builder = new StringBuilder(); @@ -243,6 +248,8 @@ public class CapacityDaoImpl extends GenericDaoBase implements PreparedStatement pstmt = null; List result = new ArrayList(); + List resourceIdList = new ArrayList(); + switch(level){ case 1: // List all the capacities grouped by zone, capacity Type finalQuery.append(LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART1); @@ -258,17 +265,21 @@ public class CapacityDaoImpl extends GenericDaoBase implements } if (zoneId != null){ - finalQuery.append(" AND data_center_id="+zoneId); + finalQuery.append(" AND data_center_id = ?" ); + resourceIdList.add(zoneId); } if (podId != null){ - finalQuery.append(" AND pod_id="+podId); + finalQuery.append(" AND pod_id = ?" ); + resourceIdList.add(podId); } if (clusterId != null){ - finalQuery.append(" AND cluster_id="+clusterId); + finalQuery.append(" AND cluster_id = ?" ); + resourceIdList.add(clusterId ); } if (capacityType != null){ - finalQuery.append(" AND capacity_type="+capacityType); - } + finalQuery.append(" AND capacity_type = ?"); + resourceIdList.add(capacityType.longValue() ); + } switch(level){ case 1: // List all the capacities grouped by zone, capacity Type @@ -284,16 +295,27 @@ public class CapacityDaoImpl extends GenericDaoBase implements break; } - finalQuery.append(limit.toString()); - + finalQuery.append("?"); + resourceIdList.add((long) limit); + try { - pstmt = txn.prepareAutoCloseStatement(finalQuery.toString()); + pstmt = txn.prepareAutoCloseStatement(finalQuery.toString()); + for (int i = 0; i < resourceIdList.size(); i++){ + pstmt.setLong(1+i, resourceIdList.get(i)); + } ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { + while (rs.next()) { + Long capacityPodId = null; + Long capacityClusterId = null; + + if(level != 1 && rs.getLong(6) != 0) + capacityPodId = rs.getLong(6); + if(level == 3 && rs.getLong(7) != 0) + capacityClusterId = rs.getLong(7); + SummedCapacity summedCapacity = new SummedCapacity( rs.getLong(1), rs.getLong(2), rs.getFloat(3), (short)rs.getLong(4), rs.getLong(5), - level != 1 ? rs.getLong(6): null, - level == 3 ? rs.getLong(7): null); + capacityPodId, capacityClusterId); result.add(summedCapacity); } @@ -309,7 +331,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public List findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId){ - SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); + GenericSearchBuilder SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); SummedCapacitySearch.select("dcId", Func.NATIVE, SummedCapacitySearch.entity().getDataCenterId()); SummedCapacitySearch.select("sumUsed", Func.SUM, SummedCapacitySearch.entity().getUsedCapacity()); SummedCapacitySearch.select("sumReserved", Func.SUM, SummedCapacitySearch.entity().getReservedCapacity()); @@ -391,8 +413,8 @@ public class CapacityDaoImpl extends GenericDaoBase implements } @Override - public List listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor){ - Transaction txn = Transaction.currentTxn(); + public List listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone){ + Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); @@ -415,11 +437,11 @@ public class CapacityDaoImpl extends GenericDaoBase implements pstmt = txn.prepareAutoCloseStatement(sql.toString()); pstmt.setLong(1, id); pstmt.setShort(2, CapacityVO.CAPACITY_TYPE_CPU); - pstmt.setFloat(3, cpuOverprovisioningFactor); + pstmt.setString(3,"cpuOvercommitRatio"); pstmt.setLong(4, requiredCpu); pstmt.setLong(5, id); pstmt.setShort(6, CapacityVO.CAPACITY_TYPE_MEMORY); - pstmt.setFloat(7, 1); + pstmt.setString(7,"memoryOvercommitRatio"); pstmt.setLong(8, requiredRam); ResultSet rs = pstmt.executeQuery(); @@ -436,8 +458,8 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override - public List listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType, float cpuOverprovisioningFactor){ - Transaction txn = Transaction.currentTxn(); + public List listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType){ + Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); @@ -446,9 +468,8 @@ public class CapacityDaoImpl extends GenericDaoBase implements pstmt = txn.prepareAutoCloseStatement(sql.toString()); pstmt.setLong(1, clusterId); pstmt.setString(2, hostType); - pstmt.setFloat(3, cpuOverprovisioningFactor); - pstmt.setLong(4, requiredCpu); - pstmt.setLong(5, requiredRam); + pstmt.setLong(3, requiredCpu); + pstmt.setLong(4, requiredRam); ResultSet rs = pstmt.executeQuery(); while (rs.next()) { @@ -528,7 +549,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public List findByClusterPodZone(Long zoneId, Long podId, Long clusterId){ - SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); + GenericSearchBuilder SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); SummedCapacitySearch.select("sumUsed", Func.SUM, SummedCapacitySearch.entity().getUsedCapacity()); SummedCapacitySearch.select("sumTotal", Func.SUM, SummedCapacitySearch.entity().getTotalCapacity()); SummedCapacitySearch.select("capacityType", Func.NATIVE, SummedCapacitySearch.entity().getCapacityType()); @@ -563,7 +584,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public List findNonSharedStorageForClusterPodZone(Long zoneId, Long podId, Long clusterId){ - SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); + GenericSearchBuilder SummedCapacitySearch = createSearchBuilder(SummedCapacity.class); SummedCapacitySearch.select("sumUsed", Func.SUM, SummedCapacitySearch.entity().getUsedCapacity()); SummedCapacitySearch.select("sumTotal", Func.SUM, SummedCapacitySearch.entity().getTotalCapacity()); SummedCapacitySearch.select("capacityType", Func.NATIVE, SummedCapacitySearch.entity().getCapacityType()); @@ -630,31 +651,44 @@ public class CapacityDaoImpl extends GenericDaoBase implements } @Override - public Pair, Map> orderClustersByAggregateCapacity(long id, short capacityTypeForOrdering, boolean isZone, float cpuOverprovisioningFactor){ + public Pair, Map> orderClustersByAggregateCapacity(long id, short capacityTypeForOrdering, boolean isZone){ Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); Map clusterCapacityMap = new HashMap(); + StringBuilder sql = new StringBuilder(); + if (capacityTypeForOrdering != Capacity.CAPACITY_TYPE_CPU && capacityTypeForOrdering != Capacity.CAPACITY_TYPE_MEMORY) { + sql.append(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1); + } + else { + sql.append(ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART1); + } - StringBuilder sql = new StringBuilder(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1); if(isZone){ - sql.append("data_center_id = ?"); + sql.append(" data_center_id = ?"); }else{ - sql.append("pod_id = ?"); + sql.append(" pod_id = ?"); } - sql.append(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2); + if (capacityTypeForOrdering != Capacity.CAPACITY_TYPE_CPU && capacityTypeForOrdering != Capacity.CAPACITY_TYPE_MEMORY){ + sql.append(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2); + } + else { + sql.append(ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART2); + } + try { pstmt = txn.prepareAutoCloseStatement(sql.toString()); - if(capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_CPU){ - pstmt.setFloat(1, cpuOverprovisioningFactor); - pstmt.setFloat(4, cpuOverprovisioningFactor); - }else{ - pstmt.setFloat(1, 1); - pstmt.setFloat(4, 1); + pstmt.setLong(1, id); + pstmt.setShort(2,capacityTypeForOrdering); + + if (capacityTypeForOrdering == Capacity.CAPACITY_TYPE_CPU){ + pstmt.setString(3,"cpuOvercommitRatio"); } - pstmt.setLong(2, id); - pstmt.setShort(3, capacityTypeForOrdering); + else if (capacityTypeForOrdering == Capacity.CAPACITY_TYPE_MEMORY){ + pstmt.setString(3,"memoryOvercommitRatio"); + } + ResultSet rs = pstmt.executeQuery(); while (rs.next()) { Long clusterId = rs.getLong(1); @@ -670,22 +704,25 @@ public class CapacityDaoImpl extends GenericDaoBase implements } @Override - public List listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType, float cpuOverprovisioningFactor) { + public List listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType) { Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); - StringBuilder sql = new StringBuilder(LIST_PODSINZONE_BY_HOST_CAPACITIES); + StringBuilder sql = new StringBuilder(LIST_PODSINZONE_BY_HOST_CAPACITY_TYPE); + sql.append("AND capacity.pod_id IN ("); + sql.append(LIST_PODSINZONE_BY_HOST_CAPACITY_TYPE); + sql.append(")"); try { pstmt = txn.prepareAutoCloseStatement(sql.toString()); pstmt.setLong(1, zoneId); pstmt.setShort(2, CapacityVO.CAPACITY_TYPE_CPU); - pstmt.setFloat(3, cpuOverprovisioningFactor); + pstmt.setString(3, "cpuOvercommitRatio"); pstmt.setLong(4, requiredCpu); pstmt.setLong(5, zoneId); pstmt.setShort(6, CapacityVO.CAPACITY_TYPE_MEMORY); - pstmt.setFloat(7, 1); + pstmt.setString(7,"memoryOvercommitRatio" ); pstmt.setLong(8, requiredRam); ResultSet rs = pstmt.executeQuery(); @@ -701,26 +738,22 @@ public class CapacityDaoImpl extends GenericDaoBase implements } @Override - public Pair, Map> orderPodsByAggregateCapacity(long zoneId, short capacityTypeForOrdering, float cpuOverprovisioningFactor) { + public Pair, Map> orderPodsByAggregateCapacity(long zoneId, short capacityTypeForOrdering) { Transaction txn = Transaction.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); Map podCapacityMap = new HashMap(); - + StringBuilder sql = new StringBuilder(ORDER_PODS_BY_AGGREGATE_CAPACITY); try { pstmt = txn.prepareAutoCloseStatement(sql.toString()); pstmt.setLong(2, zoneId); pstmt.setShort(3, capacityTypeForOrdering); - + if(capacityTypeForOrdering == CapacityVO.CAPACITY_TYPE_CPU){ - pstmt.setFloat(1, cpuOverprovisioningFactor); - pstmt.setFloat(4, cpuOverprovisioningFactor); - }else{ - pstmt.setFloat(1, 1); - pstmt.setFloat(4, 1); + pstmt.setString(3, "cpuOvercommitRatio"); } - + ResultSet rs = pstmt.executeQuery(); while (rs.next()) { Long podId = rs.getLong(1); @@ -738,7 +771,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public void updateCapacityState(Long dcId, Long podId, Long clusterId, Long hostId, String capacityState) { Transaction txn = Transaction.currentTxn(); - StringBuilder sql = new StringBuilder(UPDATE_CAPACITY_STATE); + StringBuilder sql = new StringBuilder(UPDATE_CAPACITY_STATE); List resourceIdList = new ArrayList(); if (dcId != null){ diff --git a/server/src/com/cloud/cluster/ClusterManagerImpl.java b/server/src/com/cloud/cluster/ClusterManagerImpl.java index 45d9dca8f91..27e0e0381d2 100755 --- a/server/src/com/cloud/cluster/ClusterManagerImpl.java +++ b/server/src/com/cloud/cluster/ClusterManagerImpl.java @@ -74,6 +74,7 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Profiler; import com.cloud.utils.PropertiesUtil; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.ComponentLifecycle; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; @@ -364,11 +365,11 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager { try { // schedule a scan task immediately - if (_agentMgr instanceof ClusteredAgentManagerImpl) { + if (ComponentContext.getTargetObject(_agentMgr) instanceof ClusteredAgentManagerImpl) { if (s_logger.isDebugEnabled()) { s_logger.debug("Received notification as part of addHost command to start a host scan task"); } - ClusteredAgentManagerImpl clusteredAgentMgr = (ClusteredAgentManagerImpl)_agentMgr; + ClusteredAgentManagerImpl clusteredAgentMgr = (ClusteredAgentManagerImpl)ComponentContext.getTargetObject(_agentMgr); clusteredAgentMgr.scheduleHostScanTask(); } } catch (Exception e) { diff --git a/server/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java b/server/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java index bf629896907..cff4cfc1b95 100644 --- a/server/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java +++ b/server/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java @@ -19,6 +19,7 @@ package com.cloud.cluster.agentlb.dao; import java.util.Date; import java.util.List; +import javax.annotation.PostConstruct; import javax.ejb.Local; import org.apache.log4j.Logger; @@ -37,30 +38,35 @@ import com.cloud.utils.db.SearchCriteria; public class HostTransferMapDaoImpl extends GenericDaoBase implements HostTransferMapDao { private static final Logger s_logger = Logger.getLogger(HostTransferMapDaoImpl.class); - protected final SearchBuilder AllFieldsSearch; - protected final SearchBuilder IntermediateStateSearch; - protected final SearchBuilder ActiveSearch; + protected SearchBuilder AllFieldsSearch; + protected SearchBuilder IntermediateStateSearch; + protected SearchBuilder ActiveSearch; public HostTransferMapDaoImpl() { - AllFieldsSearch = createSearchBuilder(); - AllFieldsSearch.and("id", AllFieldsSearch.entity().getId(), SearchCriteria.Op.EQ); - AllFieldsSearch.and("initialOwner", AllFieldsSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ); - AllFieldsSearch.and("futureOwner", AllFieldsSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ); - AllFieldsSearch.and("state", AllFieldsSearch.entity().getState(), SearchCriteria.Op.EQ); - AllFieldsSearch.done(); - - IntermediateStateSearch = createSearchBuilder(); - IntermediateStateSearch.and("futureOwner", IntermediateStateSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ); - IntermediateStateSearch.and("initialOwner", IntermediateStateSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ); - IntermediateStateSearch.and("state", IntermediateStateSearch.entity().getState(), SearchCriteria.Op.IN); - IntermediateStateSearch.done(); - - ActiveSearch = createSearchBuilder(); - ActiveSearch.and("created", ActiveSearch.entity().getCreated(), SearchCriteria.Op.GT); - ActiveSearch.and("id", ActiveSearch.entity().getId(), SearchCriteria.Op.EQ); - ActiveSearch.and("state", ActiveSearch.entity().getState(), SearchCriteria.Op.EQ); - ActiveSearch.done(); - + super(); + } + + @PostConstruct + public void init() { + AllFieldsSearch = createSearchBuilder(); + AllFieldsSearch.and("id", AllFieldsSearch.entity().getId(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("initialOwner", AllFieldsSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("futureOwner", AllFieldsSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("state", AllFieldsSearch.entity().getState(), SearchCriteria.Op.EQ); + AllFieldsSearch.done(); + + IntermediateStateSearch = createSearchBuilder(); + IntermediateStateSearch.and("futureOwner", IntermediateStateSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ); + IntermediateStateSearch.and("initialOwner", IntermediateStateSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ); + IntermediateStateSearch.and("state", IntermediateStateSearch.entity().getState(), SearchCriteria.Op.IN); + IntermediateStateSearch.done(); + + ActiveSearch = createSearchBuilder(); + ActiveSearch.and("created", ActiveSearch.entity().getCreated(), SearchCriteria.Op.GT); + ActiveSearch.and("id", ActiveSearch.entity().getId(), SearchCriteria.Op.EQ); + ActiveSearch.and("state", ActiveSearch.entity().getState(), SearchCriteria.Op.EQ); + ActiveSearch.done(); + } @Override diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index b22bf4b76f0..64465a2034c 100755 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -20,6 +20,8 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; + import com.cloud.agent.AgentManager; import com.cloud.consoleproxy.ConsoleProxyManager; import com.cloud.ha.HighAvailabilityManager; @@ -28,7 +30,6 @@ import com.cloud.network.NetworkManager; import com.cloud.network.router.VpcVirtualNetworkApplianceManager; import com.cloud.server.ManagementServer; import com.cloud.storage.StorageManager; -import com.cloud.storage.allocator.StoragePoolAllocator; import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.template.TemplateManager; @@ -137,7 +138,8 @@ public enum Config { SnapshotMonthlyMax("Snapshots", SnapshotManager.class, Integer.class, "snapshot.max.monthly", "8", "Maximum monthly snapshots for a volume", null), SnapshotPollInterval("Snapshots", SnapshotManager.class, Integer.class, "snapshot.poll.interval", "300", "The time interval in seconds when the management server polls for snapshots to be scheduled.", null), SnapshotDeltaMax("Snapshots", SnapshotManager.class, Integer.class, "snapshot.delta.max", "16", "max delta snapshots between two full snapshots.", null), - + BackupSnapshotAferTakingSnapshot("Snapshots", SnapshotManager.class, Boolean.class, "snapshot.backup.rightafter", "true", "backup snapshot right after snapshot is taken", null), + // Advanced JobExpireMinutes("Advanced", ManagementServer.class, String.class, "job.expire.minutes", "1440", "Time (in minutes) for async-jobs to be kept in system", null), JobCancelThresholdMinutes("Advanced", ManagementServer.class, String.class, "job.cancel.threshold.minutes", "60", "Time (in minutes) for async-jobs to be forcely cancelled if it has been in process for long", null), @@ -202,9 +204,10 @@ public enum Config { SecStorageSessionMax("Advanced", AgentManager.class, Integer.class, "secstorage.session.max", "50", "The max number of command execution sessions that a SSVM can handle", null), SecStorageCmdExecutionTimeMax("Advanced", AgentManager.class, Integer.class, "secstorage.cmd.execution.time.max", "30", "The max command execution time in minute", null), SecStorageProxy("Advanced", AgentManager.class, String.class, "secstorage.proxy", null, "http proxy used by ssvm, in http://username:password@proxyserver:port format", null), + AlertPurgeInterval("Advanced", ManagementServer.class, Integer.class, "alert.purge.interval", "86400", "The interval (in seconds) to wait before running the alert purge thread", null), + AlertPurgeDelay("Advanced", ManagementServer.class, Integer.class, "alert.purge.delay", "0", "Alerts older than specified number days will be purged. Set this value to 0 to never delete alerts", null), - - DirectAttachNetworkEnabled("Advanced", ManagementServer.class, Boolean.class, "direct.attach.network.externalIpAllocator.enabled", "false", "Direct-attach VMs using external DHCP server", "true,false"), + DirectAttachNetworkEnabled("Advanced", ManagementServer.class, Boolean.class, "direct.attach.network.externalIpAllocator.enabled", "false", "Direct-attach VMs using external DHCP server", "true,false"), DirectAttachNetworkExternalAPIURL("Advanced", ManagementServer.class, String.class, "direct.attach.network.externalIpAllocator.url", null, "Direct-attach VMs using external DHCP server (API url)", null), CheckPodCIDRs("Advanced", ManagementServer.class, String.class, "check.pod.cidrs", "true", "If true, different pods must belong to different CIDR subnets.", "true,false"), NetworkGcWait("Advanced", ManagementServer.class, Integer.class, "network.gc.wait", "600", "Time (in seconds) to wait before shutting down a network that's not in used", null), @@ -231,6 +234,7 @@ public enum Config { EnableEC2API("Advanced", ManagementServer.class, Boolean.class, "enable.ec2.api", "false", "enable EC2 API on CloudStack", null), EnableS3API("Advanced", ManagementServer.class, Boolean.class, "enable.s3.api", "false", "enable Amazon S3 API on CloudStack", null), RecreateSystemVmEnabled("Advanced", ManagementServer.class, Boolean.class, "recreate.systemvm.enabled", "false", "If true, will recreate system vm root disk whenever starting system vm", "true,false"), + SetVmInternalNameUsingDisplayName("Advanced", ManagementServer.class, Boolean.class, "vm.instancename.flag", "false", "If true, will append guest VM's display Name (if set) to its internal instance name", "true,false"), IncorrectLoginAttemptsAllowed("Advanced", ManagementServer.class, Integer.class, "incorrect.login.attempts.allowed", "5", "Incorrect login attempts allowed before the user is disabled", null), // Ovm OvmPublicNetwork("Hidden", ManagementServer.class, String.class, "ovm.public.network.device", null, "Specify the public bridge on host for public network", null), @@ -247,18 +251,17 @@ public enum Config { XenBondStorageNic("Advanced", ManagementServer.class, String.class, "xen.bond.storage.nics", null, "Attempt to bond the two networks if found", null), XenHeartBeatInterval("Advanced", ManagementServer.class, Integer.class, "xen.heartbeat.interval", "60", "heartbeat to use when implementing XenServer Self Fencing", null), XenGuestNetwork("Hidden", ManagementServer.class, String.class, "xen.guest.network.device", null, "Specify for guest network name label", null), - + XenMaxNics("Advanced", AgentManager.class, Integer.class, "xen.nics.max", "7", "Maximum allowed nics for Vms created on Xen", null), // VMware - VmwarePrivateNetworkVSwitch("Hidden", ManagementServer.class, String.class, "vmware.private.vswitch", null, "Specify the vSwitch on host for private network", null), - VmwarePublicNetworkVSwitch("Hidden", ManagementServer.class, String.class, "vmware.public.vswitch", null, "Specify the vSwitch on host for public network", null), - VmwareGuestNetworkVSwitch("Hidden", ManagementServer.class, String.class, "vmware.guest.vswitch", null, "Specify the vSwitch on host for guest network", null), VmwareUseNexusVSwitch("Network", ManagementServer.class, Boolean.class, "vmware.use.nexus.vswitch", "false", "Enable/Disable Cisco Nexus 1000v vSwitch in VMware environment", null), + VmwareUseDVSwitch("Network", ManagementServer.class, Boolean.class, "vmware.use.dvswitch", "false", "Enable/Disable Nexus/Vmware dvSwitch in VMware environment", null), + VmwarePortsPerDVPortGroup("Network", ManagementServer.class, Integer.class, "vmware.ports.per.dvportgroup", "256", "Default number of ports per Vmware dvPortGroup in VMware environment", null), + VmwareCreateFullClone("Advanced", ManagementServer.class, Boolean.class, "vmware.create.full.clone", "false", "If set to true, creates guest VMs as full clones on ESX", null), VmwareServiceConsole("Advanced", ManagementServer.class, String.class, "vmware.service.console", "Service Console", "Specify the service console network name(for ESX hosts)", null), VmwareManagementPortGroup("Advanced", ManagementServer.class, String.class, "vmware.management.portgroup", "Management Network", "Specify the management network name(for ESXi hosts)", null), VmwareAdditionalVncPortRangeStart("Advanced", ManagementServer.class, Integer.class, "vmware.additional.vnc.portrange.start", "50000", "Start port number of additional VNC port range", null), VmwareAdditionalVncPortRangeSize("Advanced", ManagementServer.class, Integer.class, "vmware.additional.vnc.portrange.size", "1000", "Start port number of additional VNC port range", null), //VmwareGuestNicDeviceType("Advanced", ManagementServer.class, String.class, "vmware.guest.nic.device.type", "E1000", "Ethernet card type used in guest VM, valid values are E1000, PCNet32, Vmxnet2, Vmxnet3", null), - VmwarePerClusterHostMax("Advanced", ManagementServer.class, Integer.class, "vmware.percluster.host.max", "8", "maxmium hosts per vCenter cluster(do not let it grow over 8)", "1-8"), VmwareReserveCpu("Advanced", ManagementServer.class, Boolean.class, "vmware.reserve.cpu", "false", "Specify whether or not to reserve CPU based on CPU overprovisioning factor", null), VmwareReserveMem("Advanced", ManagementServer.class, Boolean.class, "vmware.reserve.mem", "false", "Specify whether or not to reserve memory based on memory overprovisioning factor", null), VmwareRootDiskControllerType("Advanced", ManagementServer.class, String.class, "vmware.root.disk.controller", "ide", "Specify the default disk controller for root volumes, valid values are scsi, ide", null), @@ -319,7 +322,7 @@ public enum Config { //disabling lb as cluster sync does not work with distributed cluster AgentLbEnable("Advanced", ManagementServer.class, Boolean.class, "agent.lb.enabled", "false", "If agent load balancing enabled in cluster setup", null), SubDomainNetworkAccess("Advanced", NetworkManager.class, Boolean.class, "allow.subdomain.network.access", "true", "Allow subdomains to use networks dedicated to their parent domain(s)", null), - UseExternalDnsServers("Advanced", NetworkManager.class, Boolean.class, "use.external.dns", "false", "Bypass internal dns, use exetrnal dns1 and dns2", null), + UseExternalDnsServers("Advanced", NetworkManager.class, Boolean.class, "use.external.dns", "false", "Bypass internal dns, use external dns1 and dns2", null), EncodeApiResponse("Advanced", ManagementServer.class, Boolean.class, "encode.api.response", "false", "Do URL encoding for the api response, false by default", null), DnsBasicZoneUpdates("Advanced", NetworkManager.class, String.class, "network.dns.basiczone.updates", "all", "This parameter can take 2 values: all (default) and pod. It defines if DHCP/DNS requests have to be send to all dhcp servers in cloudstack, or only to the one in the same pod", "all,pod"), @@ -361,7 +364,7 @@ public enum Config { VpcMaxNetworks("Advanced", ManagementServer.class, Integer.class, "vpc.max.networks", "3", "Maximum number of networks per vpc", null), DetailBatchQuerySize("Advanced", ManagementServer.class, Integer.class, "detail.batch.query.size", "2000", "Default entity detail batch query size for listing", null), ConcurrentSnapshotsThresholdPerHost("Advanced", ManagementServer.class, Long.class, "concurrent.snapshots.threshold.perhost", - null, "Limits number of snapshots that can be handled by the host concurrently; default is NULL - unlimited", null), + null, "Limits number of snapshots that can be handled by the host concurrently; default is NULL - unlimited", null), NetworkIPv6SearchRetryMax("Network", ManagementServer.class, Integer.class, "network.ipv6.search.retry.max", "10000", "The maximum number of retrying times to search for an available IPv6 address in the table", null), ExternalBaremetalSystemUrl("Advanced", ManagementServer.class, String.class, "external.baremetal.system.url", null, "url of external baremetal system that CloudStack will talk to", null), @@ -370,7 +373,8 @@ public enum Config { IntervalToEchoBaremetalSecurityGroupAgent("Advanced", ManagementServer.class, Integer.class, "interval.baremetal.securitygroup.agent.echo", "10", "Interval to echo baremetal security group agent, in seconds", null), TimeoutToEchoBaremetalSecurityGroupAgent("Advanced", ManagementServer.class, Integer.class, "timeout.baremetal.securitygroup.agent.echo", "3600", "Timeout to echo baremetal security group agent, in seconds, the provisioning process will be treated as a failure", null), - ApiLimitInterval("Advanced", ManagementServer.class, Integer.class, "api.throttling.interval", "1", "Time interval (in seconds) to reset API count", null), + ApiLimitEnabled("Advanced", ManagementServer.class, Boolean.class, "api.throttling.enabled", "true", "Enable/disable Api rate limit", null), + ApiLimitInterval("Advanced", ManagementServer.class, Integer.class, "api.throttling.interval", "1", "Time interval (in seconds) to reset API count", null), ApiLimitMax("Advanced", ManagementServer.class, Integer.class, "api.throttling.max", "25", "Max allowed number of APIs within fixed interval", null), ApiLimitCacheSize("Advanced", ManagementServer.class, Integer.class, "api.throttling.cachesize", "50000", "Account based API count cache size", null), diff --git a/server/src/com/cloud/configuration/ConfigurationManager.java b/server/src/com/cloud/configuration/ConfigurationManager.java index 5c1b0d58c6f..20e98845ac0 100644 --- a/server/src/com/cloud/configuration/ConfigurationManager.java +++ b/server/src/com/cloud/configuration/ConfigurationManager.java @@ -72,6 +72,7 @@ public interface ConfigurationManager extends ConfigurationService, Manager { * @param localStorageRequired * @param offerHA * @param domainId + * @param volatileVm * @param hostTag * @param networkRate * TODO @@ -80,7 +81,7 @@ public interface ConfigurationManager extends ConfigurationService, Manager { * @return ID */ ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vm_typeType, String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired, - boolean offerHA, boolean limitResourceUse, String tags, Long domainId, String hostTag, Integer networkRate); + boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate); /** * Creates a new disk offering @@ -128,12 +129,14 @@ public interface ConfigurationManager extends ConfigurationService, Manager { * TODO * @param isSecurityGroupEnabled * TODO + * @param ip6Dns1 TODO + * @param ip6Dns2 TODO * @return * @throws * @throws */ DataCenterVO createZone(long userId, String zoneName, String dns1, String dns2, String internalDns1, String internalDns2, String guestCidr, String domain, Long domainId, NetworkType zoneType, String allocationState, - String networkDomain, boolean isSecurityGroupEnabled, boolean isLocalStorageEnabled); + String networkDomain, boolean isSecurityGroupEnabled, boolean isLocalStorageEnabled, String ip6Dns1, String ip6Dns2); /** * Deletes a VLAN from the database, along with all of its IP addresses. Will not delete VLANs that have allocated diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index 074675cdd33..b1ad6b7fb9d 100755 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -86,6 +86,7 @@ import com.cloud.dc.dao.AccountVlanMapDao; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.DataCenterIpAddressDao; +import com.cloud.dc.dao.DataCenterLinkLocalIpAddressDao; import com.cloud.dc.dao.DataCenterLinkLocalIpAddressDaoImpl; import com.cloud.dc.dao.HostPodDao; import com.cloud.dc.dao.PodVlanMapDao; @@ -249,19 +250,21 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati VpcManager _vpcMgr; // FIXME - why don't we have interface for DataCenterLinkLocalIpAddressDao? - @Inject protected DataCenterLinkLocalIpAddressDaoImpl _LinkLocalIpAllocDao; + @Inject protected DataCenterLinkLocalIpAddressDao _LinkLocalIpAllocDao; - private int _maxVolumeSizeInGb; - private long _defaultPageSize; + private int _maxVolumeSizeInGb = Integer.parseInt(Config.MaxVolumeSize.getDefaultValue()); + private long _defaultPageSize = Long.parseLong(Config.DefaultPageSize.getDefaultValue()); protected Set configValuesForValidation; @Override public boolean configure(final String name, final Map params) throws ConfigurationException { - String maxVolumeSizeInGbString = _configDao.getValue("storage.max.volume.size"); - _maxVolumeSizeInGb = NumbersUtil.parseInt(maxVolumeSizeInGbString, 2000); + String maxVolumeSizeInGbString = _configDao.getValue(Config.MaxVolumeSize.key()); + _maxVolumeSizeInGb = NumbersUtil.parseInt(maxVolumeSizeInGbString, + Integer.parseInt(Config.MaxVolumeSize.getDefaultValue())); - String defaultPageSizeString = _configDao.getValue("default.page.size"); - _defaultPageSize = NumbersUtil.parseLong(defaultPageSizeString, 500L); + String defaultPageSizeString = _configDao.getValue(Config.DefaultPageSize.key()); + _defaultPageSize = NumbersUtil.parseLong(defaultPageSizeString, + Long.parseLong(Config.DefaultPageSize.getDefaultValue())); populateConfigValuesForValidationSet(); return true; @@ -1092,7 +1095,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } - private void checkZoneParameters(String zoneName, String dns1, String dns2, String internalDns1, String internalDns2, boolean checkForDuplicates, Long domainId, String allocationStateStr) { + private void checkZoneParameters(String zoneName, String dns1, String dns2, String internalDns1, String internalDns2, boolean checkForDuplicates, Long domainId, String allocationStateStr, + String ip6Dns1, String ip6Dns2) { if (checkForDuplicates) { // Check if a zone with the specified name already exists if (validZone(zoneName)) { @@ -1127,6 +1131,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("Please enter a valid IP address for internal DNS2"); } + if (ip6Dns1 != null && ip6Dns1.length() > 0 && !NetUtils.isValidIpv6(ip6Dns1)) { + throw new InvalidParameterValueException("Please enter a valid IPv6 address for IP6 DNS1"); + } + + if (ip6Dns2 != null && ip6Dns2.length() > 0 && !NetUtils.isValidIpv6(ip6Dns2)) { + throw new InvalidParameterValueException("Please enter a valid IPv6 address for IP6 DNS2"); + } + Grouping.AllocationState allocationState = null; if (allocationStateStr != null && !allocationStateStr.isEmpty()) { try { @@ -1246,6 +1258,27 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati return true; } + + @Override + @DB + public LDAPConfigCmd listLDAPConfig(LDAPConfigCmd cmd) { + String hostname = _configDao.getValue(LDAPParams.hostname.toString()); + cmd.setHostname(hostname == null ? "" : hostname); + String port = _configDao.getValue(LDAPParams.port.toString()); + cmd.setPort(port == null ? 0 : Integer.valueOf(port)); + String queryFilter = _configDao.getValue(LDAPParams.queryfilter.toString()); + cmd.setQueryFilter(queryFilter == null ? "" : queryFilter); + String searchBase = _configDao.getValue(LDAPParams.searchbase.toString()); + cmd.setSearchBase(searchBase == null ? "" : searchBase); + String useSSL = _configDao.getValue(LDAPParams.usessl.toString()); + cmd.setUseSSL(useSSL == null ? Boolean.FALSE : Boolean.valueOf(useSSL)); + String binddn = _configDao.getValue(LDAPParams.dn.toString()); + cmd.setBindDN(binddn == null ? "" : binddn); + String truststore = _configDao.getValue(LDAPParams.truststore.toString()); + cmd.setTrustStore(truststore == null ? "" : truststore); + return cmd; + } + @Override @DB public boolean updateLDAP(LDAPConfigCmd cmd) { @@ -1265,11 +1298,16 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("If you specify a bind name then you need to provide bind password too."); } + // check query filter if it contains valid substitution + if (!queryFilter.contains("%u") && !queryFilter.contains("%n") && !queryFilter.contains("%e")){ + throw new InvalidParameterValueException("QueryFilter should contain at least one of the substitutions: %u, %n or %e: " + queryFilter); + } + // check if the info is correct Hashtable env = new Hashtable(11); env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.ldap.LdapCtxFactory"); String protocol = "ldap://"; - if (new Boolean(useSSL)) { + if (useSSL) { env.put(Context.SECURITY_PROTOCOL, "ssl"); protocol = "ldaps://"; if (trustStore == null || trustStorePassword == null) { @@ -1288,7 +1326,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati DirContext ctx = new InitialDirContext(env); ctx.close(); - // store the result in DB COnfiguration + // store the result in DB Configuration ConfigurationVO cvo = _configDao.findByName(LDAPParams.hostname.toString()); if (cvo == null) { cvo = new ConfigurationVO("Hidden", "DEFAULT", "management-server", LDAPParams.hostname.toString(), null, "Hostname or ip address of the ldap server eg: my.ldap.com"); @@ -1356,8 +1394,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati s_logger.debug("The ldap server is configured: " + hostname); } catch (NamingException ne) { - ne.printStackTrace(); - throw new InvalidParameterValueException("Naming Exception, check you ldap data ! " + ne.getMessage() + (ne.getCause() != null ? ("Caused by:" + ne.getCause().getMessage()) : "")); + throw new InvalidParameterValueException("Naming Exception, check you ldap data ! " + ne.getMessage() + (ne.getCause() != null ? ("; Caused by:" + ne.getCause().getMessage()) : "")); } return true; } @@ -1371,6 +1408,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati String zoneName = cmd.getZoneName(); String dns1 = cmd.getDns1(); String dns2 = cmd.getDns2(); + String ip6Dns1 = cmd.getIp6Dns1(); + String ip6Dns2 = cmd.getIp6Dns2(); String internalDns1 = cmd.getInternalDns1(); String internalDns2 = cmd.getInternalDns2(); String guestCidr = cmd.getGuestCidrAddress(); @@ -1448,6 +1487,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati dns2 = zone.getDns2(); } + if (ip6Dns1 == null) { + ip6Dns1 = zone.getIp6Dns1(); + } + + if (ip6Dns2 == null) { + ip6Dns2 = zone.getIp6Dns2(); + } + if (internalDns1 == null) { internalDns1 = zone.getInternalDns1(); } @@ -1470,20 +1517,13 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } boolean checkForDuplicates = !zoneName.equals(oldZoneName); - checkZoneParameters(zoneName, dns1, dns2, internalDns1, internalDns2, checkForDuplicates, null, allocationStateStr);// not - // allowing - // updating - // domain - // associated - // with - // a - // zone, - // once - // created + checkZoneParameters(zoneName, dns1, dns2, internalDns1, internalDns2, checkForDuplicates, null, allocationStateStr, ip6Dns1, ip6Dns2);// not allowing updating domain associated with a zone, once created zone.setName(zoneName); zone.setDns1(dns1); zone.setDns2(dns2); + zone.setIp6Dns1(ip6Dns1); + zone.setIp6Dns2(ip6Dns2); zone.setInternalDns1(internalDns1); zone.setInternalDns2(internalDns2); zone.setGuestNetworkCidr(guestCidr); @@ -1563,7 +1603,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Override @DB public DataCenterVO createZone(long userId, String zoneName, String dns1, String dns2, String internalDns1, String internalDns2, String guestCidr, String domain, Long domainId, - NetworkType zoneType, String allocationStateStr, String networkDomain, boolean isSecurityGroupEnabled, boolean isLocalStorageEnabled) { + NetworkType zoneType, String allocationStateStr, String networkDomain, boolean isSecurityGroupEnabled, boolean isLocalStorageEnabled, String ip6Dns1, String ip6Dns2) { // checking the following params outside checkzoneparams method as we do // not use these params for updatezone @@ -1581,7 +1621,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } - checkZoneParameters(zoneName, dns1, dns2, internalDns1, internalDns2, true, domainId, allocationStateStr); + checkZoneParameters(zoneName, dns1, dns2, internalDns1, internalDns2, true, domainId, allocationStateStr, ip6Dns1, ip6Dns2); byte[] bytes = (zoneName + System.currentTimeMillis()).getBytes(); String zoneToken = UUID.nameUUIDFromBytes(bytes).toString(); @@ -1589,7 +1629,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati try { txn.start(); // Create the new zone in the database - DataCenterVO zone = new DataCenterVO(zoneName, null, dns1, dns2, internalDns1, internalDns2, guestCidr, domain, domainId, zoneType, zoneToken, networkDomain, isSecurityGroupEnabled, isLocalStorageEnabled); + DataCenterVO zone = new DataCenterVO(zoneName, null, dns1, dns2, internalDns1, internalDns2, guestCidr, domain, domainId, zoneType, zoneToken, networkDomain, isSecurityGroupEnabled, isLocalStorageEnabled, ip6Dns1, ip6Dns2); if (allocationStateStr != null && !allocationStateStr.isEmpty()) { Grouping.AllocationState allocationState = Grouping.AllocationState.valueOf(allocationStateStr); zone.setAllocationState(allocationState); @@ -1661,6 +1701,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati String zoneName = cmd.getZoneName(); String dns1 = cmd.getDns1(); String dns2 = cmd.getDns2(); + String ip6Dns1 = cmd.getIp6Dns1(); + String ip6Dns2 = cmd.getIp6Dns2(); String internalDns1 = cmd.getInternalDns1(); String internalDns2 = cmd.getInternalDns2(); String guestCidr = cmd.getGuestCidrAddress(); @@ -1704,7 +1746,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } return createZone(userId, zoneName, dns1, dns2, internalDns1, internalDns2, guestCidr, domainVO != null ? domainVO.getName() : null, domainId, zoneType, allocationState, networkDomain, - isSecurityGroupEnabled, isLocalStorageEnabled); + isSecurityGroupEnabled, isLocalStorageEnabled, ip6Dns1, ip6Dns2); } @Override @@ -1752,14 +1794,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } Boolean offerHA = cmd.getOfferHa(); - if (offerHA == null) { - offerHA = false; - } - Boolean limitCpuUse = cmd.GetLimitCpuUse(); - if (limitCpuUse == null) { - limitCpuUse = false; - } + Boolean volatileVm = cmd.getVolatileVm(); String vmTypeString = cmd.getSystemVmType(); VirtualMachine.Type vmType = null; @@ -1786,15 +1822,15 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } return createServiceOffering(userId, cmd.getIsSystem(), vmType, cmd.getServiceOfferingName(), cpuNumber.intValue(), memory.intValue(), cpuSpeed.intValue(), cmd.getDisplayText(), - localStorageRequired, offerHA, limitCpuUse, cmd.getTags(), cmd.getDomainId(), cmd.getHostTag(), cmd.getNetworkRate()); + localStorageRequired, offerHA, limitCpuUse, volatileVm, cmd.getTags(), cmd.getDomainId(), cmd.getHostTag(), cmd.getNetworkRate()); } @Override @ActionEvent(eventType = EventTypes.EVENT_SERVICE_OFFERING_CREATE, eventDescription = "creating service offering") public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vm_type, String name, int cpu, int ramSize, int speed, String displayText, - boolean localStorageRequired, boolean offerHA, boolean limitResourceUse, String tags, Long domainId, String hostTag, Integer networkRate) { + boolean localStorageRequired, boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate) { tags = cleanupTags(tags); - ServiceOfferingVO offering = new ServiceOfferingVO(name, cpu, ramSize, speed, networkRate, null, offerHA, limitResourceUse, displayText, localStorageRequired, false, tags, isSystem, vm_type, + ServiceOfferingVO offering = new ServiceOfferingVO(name, cpu, ramSize, speed, networkRate, null, offerHA, limitResourceUse, volatileVm, displayText, localStorageRequired, false, tags, isSystem, vm_type, domainId, hostTag); if ((offering = _serviceOfferingDao.persist(offering)) != null) { @@ -2296,10 +2332,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati Vlan vlan = createVlanAndPublicIpRange(zoneId, networkId, physicalNetworkId, forVirtualNetwork, podId, startIP, endIP, vlanGateway, vlanNetmask, vlanId, vlanOwner, startIPv6, endIPv6, ip6Gateway, ip6Cidr); + txn.commit(); if (associateIpRangeToAccount) { _networkMgr.associateIpAddressListToAccount(userId, vlanOwner.getId(), zoneId, vlan.getId(), null); } - txn.commit(); // Associate ips to the network if (associateIpRangeToAccount) { diff --git a/server/src/com/cloud/configuration/dao/ConfigurationDaoImpl.java b/server/src/com/cloud/configuration/dao/ConfigurationDaoImpl.java index 68106f7dd6a..fe5f5aee810 100644 --- a/server/src/com/cloud/configuration/dao/ConfigurationDaoImpl.java +++ b/server/src/com/cloud/configuration/dao/ConfigurationDaoImpl.java @@ -65,15 +65,6 @@ public class ConfigurationDaoImpl extends GenericDaoBase private final SearchBuilder AccountSearch; private final SearchBuilder DomainSearch; - //protected final DomainDaoImpl _domainDao = ComponentLocator.inject(DomainDaoImpl.class); - //protected final AccountDaoImpl _accountDao = ComponentLocator.inject(AccountDaoImpl.class); - - @Inject protected DomainDaoImpl _domainDao; - @Inject protected AccountDaoImpl _accountDao; + @Inject protected DomainDao _domainDao; + @Inject protected AccountDao _accountDao; public ResourceCountDaoImpl() { TypeSearch = createSearchBuilder(); diff --git a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index 168ac0e43cb..544a803b13f 100755 --- a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -29,14 +29,11 @@ import java.util.UUID; import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import javax.persistence.Table; import org.apache.cloudstack.api.ServerApiException; -import com.cloud.offering.DiskOffering; -import com.cloud.storage.dao.DiskOfferingDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; -import org.springframework.context.annotation.Primary; -import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; import com.cloud.agent.api.AgentControlAnswer; @@ -102,6 +99,7 @@ import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; import com.cloud.network.rules.RulesManager; +import com.cloud.offering.DiskOffering; import com.cloud.offering.NetworkOffering; import com.cloud.offering.ServiceOffering; import com.cloud.offerings.dao.NetworkOfferingDao; @@ -114,13 +112,13 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.servlet.ConsoleProxyServlet; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.VMTemplateHostVO; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.dao.StoragePoolDao; +import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateHostDao; +import com.cloud.template.TemplateManager; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.User; @@ -223,7 +221,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy @Inject NetworkOfferingDao _networkOfferingDao; @Inject - StoragePoolDao _storagePoolDao; + PrimaryDataStoreDao _storagePoolDao; @Inject UserVmDetailsDao _vmDetailsDao; @Inject @@ -233,6 +231,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy @Inject RulesManager _rulesMgr; @Inject + TemplateManager templateMgr; + @Inject IPAddressDao _ipAddressDao; private ConsoleProxyListener _listener; @@ -1175,7 +1175,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy ZoneHostInfo zoneHostInfo = zoneHostInfoMap.get(dataCenterId); if (zoneHostInfo != null && isZoneHostReady(zoneHostInfo)) { VMTemplateVO template = _templateDao.findSystemVMTemplate(dataCenterId); - HostVO secondaryStorageHost = _storageMgr.getSecondaryStorageHost(dataCenterId); + HostVO secondaryStorageHost = this.templateMgr.getSecondaryStorageHost(dataCenterId); boolean templateReady = false; if (template != null && secondaryStorageHost != null) { diff --git a/server/src/com/cloud/dc/DataCenterVO.java b/server/src/com/cloud/dc/DataCenterVO.java index 28fb11f8cc0..6da13e77e1d 100644 --- a/server/src/com/cloud/dc/DataCenterVO.java +++ b/server/src/com/cloud/dc/DataCenterVO.java @@ -59,6 +59,12 @@ public class DataCenterVO implements DataCenter { @Column(name="dns2") private String dns2 = null; + @Column(name="ip6_dns1") + private String ip6Dns1 = null; + + @Column(name="ip6_dns2") + private String ip6Dns2 = null; + @Column(name="internal_dns1") private String internalDns1 = null; @@ -177,17 +183,19 @@ public class DataCenterVO implements DataCenter { } public DataCenterVO(long id, String name, String description, String dns1, String dns2, String dns3, String dns4, String guestCidr, String domain, Long domainId, NetworkType zoneType, String zoneToken, String domainSuffix) { - this(name, description, dns1, dns2, dns3, dns4, guestCidr, domain, domainId, zoneType, zoneToken, domainSuffix, false, false); + this(name, description, dns1, dns2, dns3, dns4, guestCidr, domain, domainId, zoneType, zoneToken, domainSuffix, false, false, null, null); this.id = id; this.allocationState = Grouping.AllocationState.Enabled; this.uuid = UUID.randomUUID().toString(); } - public DataCenterVO(String name, String description, String dns1, String dns2, String dns3, String dns4, String guestCidr, String domain, Long domainId, NetworkType zoneType, String zoneToken, String domainSuffix, boolean securityGroupEnabled, boolean localStorageEnabled) { + public DataCenterVO(String name, String description, String dns1, String dns2, String dns3, String dns4, String guestCidr, String domain, Long domainId, NetworkType zoneType, String zoneToken, String domainSuffix, boolean securityGroupEnabled, boolean localStorageEnabled, String ip6Dns1, String ip6Dns2) { this.name = name; this.description = description; this.dns1 = dns1; this.dns2 = dns2; + this.ip6Dns1 = ip6Dns1; + this.ip6Dns2 = ip6Dns2; this.internalDns1 = dns3; this.internalDns2 = dns4; this.guestNetworkCidr = guestCidr; @@ -431,4 +439,22 @@ public class DataCenterVO implements DataCenter { public void setMacAddress(long macAddress) { this.macAddress = macAddress; } + + @Override + public String getIp6Dns1() { + return ip6Dns1; + } + + public void setIp6Dns1(String ip6Dns1) { + this.ip6Dns1 = ip6Dns1; + } + + @Override + public String getIp6Dns2() { + return ip6Dns2; + } + + public void setIp6Dns2(String ip6Dns2) { + this.ip6Dns2 = ip6Dns2; + } } diff --git a/server/src/com/cloud/dc/dao/DataCenterDaoImpl.java b/server/src/com/cloud/dc/dao/DataCenterDaoImpl.java index a63bbd3c068..2a6c2ecb252 100755 --- a/server/src/com/cloud/dc/dao/DataCenterDaoImpl.java +++ b/server/src/com/cloud/dc/dao/DataCenterDaoImpl.java @@ -63,11 +63,11 @@ public class DataCenterDaoImpl extends GenericDaoBase implem protected SearchBuilder DisabledZonesSearch; protected SearchBuilder TokenSearch; - @Inject protected DataCenterIpAddressDaoImpl _ipAllocDao = null; - @Inject protected DataCenterLinkLocalIpAddressDaoImpl _LinkLocalIpAllocDao = null; - @Inject protected DataCenterVnetDaoImpl _vnetAllocDao = null; - @Inject protected PodVlanDaoImpl _podVlanAllocDao = null; - @Inject protected DcDetailsDaoImpl _detailsDao = null; + @Inject protected DataCenterIpAddressDao _ipAllocDao = null; + @Inject protected DataCenterLinkLocalIpAddressDao _LinkLocalIpAllocDao = null; + @Inject protected DataCenterVnetDao _vnetAllocDao = null; + @Inject protected PodVlanDao _podVlanAllocDao = null; + @Inject protected DcDetailsDao _detailsDao = null; protected long _prefix; protected Random _rand = new Random(System.currentTimeMillis()); diff --git a/server/src/com/cloud/dc/dao/DataCenterIpAddressDao.java b/server/src/com/cloud/dc/dao/DataCenterIpAddressDao.java index bf7884466d0..7a19b245d2c 100644 --- a/server/src/com/cloud/dc/dao/DataCenterIpAddressDao.java +++ b/server/src/com/cloud/dc/dao/DataCenterIpAddressDao.java @@ -23,6 +23,12 @@ import com.cloud.utils.db.GenericDao; public interface DataCenterIpAddressDao extends GenericDao { + public DataCenterIpAddressVO takeIpAddress(long dcId, long podId, long instanceId, String reservationId); + public DataCenterIpAddressVO takeDataCenterIpAddress(long dcId, String reservationId); + public void addIpRange(long dcId, long podId, String start, String end); + public void releaseIpAddress(String ipAddress, long dcId, Long instanceId); + public void releaseIpAddress(long nicId, String reservationId); + boolean mark(long dcId, long podId, String ip); List listByPodIdDcIdIpAddress(long podId, long dcId, String ipAddress); List listByPodIdDcId(long podId, long dcId); diff --git a/server/src/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDao.java b/server/src/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDao.java new file mode 100644 index 00000000000..7fe946762c6 --- /dev/null +++ b/server/src/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDao.java @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc.dao; + +import java.util.List; + +import com.cloud.dc.DataCenterLinkLocalIpAddressVO; +import com.cloud.utils.db.GenericDao; + +public interface DataCenterLinkLocalIpAddressDao extends GenericDao{ + public DataCenterLinkLocalIpAddressVO takeIpAddress(long dcId, long podId, long instanceId, String reservationId); + public boolean deleteIpAddressByPod(long podId); + public void addIpRange(long dcId, long podId, String start, String end); + public void releaseIpAddress(String ipAddress, long dcId, long instanceId); + public void releaseIpAddress(long nicId, String reservationId); + public List listByPodIdDcId(long podId, long dcId); + public int countIPs(long podId, long dcId, boolean onlyCountAllocated); +} diff --git a/server/src/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java b/server/src/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java index 6456ebfd5c5..b52e3733a3f 100644 --- a/server/src/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java +++ b/server/src/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java @@ -42,7 +42,7 @@ import com.cloud.utils.net.NetUtils; @Component @Local(value={DataCenterLinkLocalIpAddressDaoImpl.class}) @DB(txn=false) -public class DataCenterLinkLocalIpAddressDaoImpl extends GenericDaoBase implements GenericDao { +public class DataCenterLinkLocalIpAddressDaoImpl extends GenericDaoBase implements DataCenterLinkLocalIpAddressDao { private static final Logger s_logger = Logger.getLogger(DataCenterLinkLocalIpAddressDaoImpl.class); private final SearchBuilder AllFieldsSearch; diff --git a/server/src/com/cloud/dc/dao/DataCenterVnetDao.java b/server/src/com/cloud/dc/dao/DataCenterVnetDao.java new file mode 100644 index 00000000000..79e91c4bca8 --- /dev/null +++ b/server/src/com/cloud/dc/dao/DataCenterVnetDao.java @@ -0,0 +1,38 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc.dao; + +import java.util.List; + +import com.cloud.dc.DataCenterVnetVO; +import com.cloud.utils.db.GenericDao; + +public interface DataCenterVnetDao extends GenericDao { + public List listAllocatedVnets(long physicalNetworkId); + public List findVnet(long dcId, String vnet); + public int countZoneVlans(long dcId, boolean onlyCountAllocated); + public List findVnet(long dcId, long physicalNetworkId, String vnet); + + public void add(long dcId, long physicalNetworkId, int start, int end); + + public void delete(long physicalNetworkId); + + public DataCenterVnetVO take(long physicalNetworkId, long accountId, String reservationId); + + public void release(String vnet, long physicalNetworkId, long accountId, String reservationId); + +} diff --git a/server/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java b/server/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java index af8bd25ebc9..5ded0f4ecf5 100755 --- a/server/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java +++ b/server/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java @@ -41,7 +41,7 @@ import com.cloud.utils.exception.CloudRuntimeException; */ @Component @DB(txn=false) -public class DataCenterVnetDaoImpl extends GenericDaoBase implements GenericDao { +public class DataCenterVnetDaoImpl extends GenericDaoBase implements DataCenterVnetDao { private final SearchBuilder FreeVnetSearch; private final SearchBuilder VnetDcSearch; private final SearchBuilder VnetDcSearchAllocated; diff --git a/server/src/com/cloud/dc/dao/PodVlanDao.java b/server/src/com/cloud/dc/dao/PodVlanDao.java new file mode 100644 index 00000000000..6359dfea83d --- /dev/null +++ b/server/src/com/cloud/dc/dao/PodVlanDao.java @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc.dao; + +import java.util.List; + +import com.cloud.dc.PodVlanVO; +import com.cloud.utils.db.GenericDao; + +public interface PodVlanDao extends GenericDao { + public List listAllocatedVnets(long podId); + public void add(long podId, int start, int end); + public void delete(long podId); + public PodVlanVO take(long podId, long accountId); + public void release(String vlan, long podId, long accountId); +} diff --git a/server/src/com/cloud/dc/dao/PodVlanDaoImpl.java b/server/src/com/cloud/dc/dao/PodVlanDaoImpl.java index 96cd42cf31f..413f9ed6c18 100755 --- a/server/src/com/cloud/dc/dao/PodVlanDaoImpl.java +++ b/server/src/com/cloud/dc/dao/PodVlanDaoImpl.java @@ -35,7 +35,7 @@ import com.cloud.utils.exception.CloudRuntimeException; * PodVlanDaoImpl maintains the one-to-many relationship between */ @Component -public class PodVlanDaoImpl extends GenericDaoBase implements GenericDao { +public class PodVlanDaoImpl extends GenericDaoBase implements PodVlanDao { private final SearchBuilder FreeVlanSearch; private final SearchBuilder VlanPodSearch; private final SearchBuilder PodSearchAllocated; diff --git a/server/src/com/cloud/deploy/AbstractDeployPlannerSelector.java b/server/src/com/cloud/deploy/AbstractDeployPlannerSelector.java new file mode 100755 index 00000000000..62094eb5ea6 --- /dev/null +++ b/server/src/com/cloud/deploy/AbstractDeployPlannerSelector.java @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.deploy; + +import java.util.Map; + +import javax.naming.ConfigurationException; + +import com.cloud.vm.UserVmVO; + +public abstract class AbstractDeployPlannerSelector implements DeployPlannerSelector { + protected Map params; + protected String name; + protected int runLevel; + + @Override + public String getName() { + return name; + } + + @Override + public void setName(String name) { + this.name = name; + } + + @Override + public void setConfigParams(Map params) { + this.params = params; + } + + @Override + public Map getConfigParams() { + return params; + } + + @Override + public int getRunLevel() { + return runLevel; + } + + @Override + public void setRunLevel(int level) { + this.runLevel = level; + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + return true; + } + + @Override + public boolean start() { + return true; + } + + @Override + public boolean stop() { + return true; + } +} diff --git a/core/src/com/cloud/resource/NetworkPreparer.java b/server/src/com/cloud/deploy/DeployPlannerSelector.java old mode 100644 new mode 100755 similarity index 81% rename from core/src/com/cloud/resource/NetworkPreparer.java rename to server/src/com/cloud/deploy/DeployPlannerSelector.java index d7034535e9f..40eabb1646f --- a/core/src/com/cloud/resource/NetworkPreparer.java +++ b/server/src/com/cloud/deploy/DeployPlannerSelector.java @@ -1,29 +1,24 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.resource; - -import com.cloud.utils.component.Adapter; - -/** - * Prepares the network for VM. - */ -public interface NetworkPreparer extends Adapter { - - String setup(String vnet); - - void cleanup(String vnet); -} +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.deploy; + +import com.cloud.utils.component.Adapter; +import com.cloud.vm.UserVmVO; + +public interface DeployPlannerSelector extends Adapter { + String selectPlanner(UserVmVO vm); +} diff --git a/server/src/com/cloud/deploy/FirstFitPlanner.java b/server/src/com/cloud/deploy/FirstFitPlanner.java index 66a24ac0e43..c219cfccaf2 100755 --- a/server/src/com/cloud/deploy/FirstFitPlanner.java +++ b/server/src/com/cloud/deploy/FirstFitPlanner.java @@ -27,16 +27,20 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.log4j.Logger; import com.cloud.agent.manager.allocator.HostAllocator; -import com.cloud.api.ApiDBUtils; import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityManager; import com.cloud.capacity.CapacityVO; import com.cloud.capacity.dao.CapacityDao; import com.cloud.configuration.Config; import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; @@ -59,14 +63,11 @@ import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; -import com.cloud.storage.allocator.StoragePoolAllocator; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.GuestOSCategoryDao; import com.cloud.storage.dao.GuestOSDao; -import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.AccountManager; @@ -95,10 +96,12 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { @Inject protected VolumeDao _volsDao; @Inject protected CapacityManager _capacityMgr; @Inject protected ConfigurationDao _configDao; - @Inject protected StoragePoolDao _storagePoolDao; + @Inject protected PrimaryDataStoreDao _storagePoolDao; @Inject protected CapacityDao _capacityDao; @Inject protected AccountManager _accountMgr; @Inject protected StorageManager _storageMgr; + @Inject DataStoreManager dataStoreMgr; + @Inject protected ClusterDetailsDao _clusterDetailsDao; //@com.cloud.utils.component.Inject(adapter=StoragePoolAllocator.class) @Inject protected List _storagePoolAllocators; @@ -127,9 +130,6 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { int cpu_requested = offering.getCpu() * offering.getSpeed(); long ram_requested = offering.getRamSize() * 1024L * 1024L; - String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); - float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1); - if (s_logger.isDebugEnabled()) { s_logger.debug("DeploymentPlanner allocation algorithm: "+_allocationAlgorithm); @@ -198,7 +198,12 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { s_logger.debug("The last Host, hostId: "+ host.getId() +" already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); }else{ if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) { - if(_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOverprovisioningFactor, true)){ + long cluster_id = host.getClusterId(); + ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,"cpuOvercommitRatio"); + ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio"); + Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); + Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); + if(_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true)){ s_logger.debug("The last host of this VM is UP and has enough capacity"); s_logger.debug("Now checking for suitable pools under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId()); //search for storage under the zone, pod, cluster of the last host. @@ -286,12 +291,9 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { ServiceOffering offering = vmProfile.getServiceOffering(); int requiredCpu = offering.getCpu() * offering.getSpeed(); long requiredRam = offering.getRamSize() * 1024L * 1024L; - String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); - float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1); - //list pods under this zone by cpu and ram capacity List prioritizedPodIds = new ArrayList(); - Pair, Map> podCapacityInfo = listPodsByCapacity(plan.getDataCenterId(), requiredCpu, requiredRam, cpuOverprovisioningFactor); + Pair, Map> podCapacityInfo = listPodsByCapacity(plan.getDataCenterId(), requiredCpu, requiredRam); List podsWithCapacity = podCapacityInfo.first(); if(!podsWithCapacity.isEmpty()){ @@ -349,11 +351,9 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { DataCenter dc = _dcDao.findById(vm.getDataCenterId()); int requiredCpu = offering.getCpu() * offering.getSpeed(); long requiredRam = offering.getRamSize() * 1024L * 1024L; - String opFactor = _configDao.getValue(Config.CPUOverprovisioningFactor.key()); - float cpuOverprovisioningFactor = NumbersUtil.parseFloat(opFactor, 1); //list clusters under this zone by cpu and ram capacity - Pair, Map> clusterCapacityInfo = listClustersByCapacity(id, requiredCpu, requiredRam, avoid, isZone, cpuOverprovisioningFactor); + Pair, Map> clusterCapacityInfo = listClustersByCapacity(id, requiredCpu, requiredRam, avoid, isZone); List prioritizedClusterIds = clusterCapacityInfo.first(); if(!prioritizedClusterIds.isEmpty()){ if(avoid.getClustersToAvoid() != null){ @@ -467,30 +467,30 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { // For each capacity get the cluster list crossing the threshold and remove it from the clusterList that will be used for vm allocation. for(short capacity : capacityList){ - - if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0){ - return; + + if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0){ + return; + } + if (capacity == Capacity.CAPACITY_TYPE_CPU) { + clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(), + capacityThresholdMap.get(capacity), cpu_requested); } - - if (capacity == Capacity.CAPACITY_TYPE_CPU){ - clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(Capacity.CAPACITY_TYPE_CPU, plan.getDataCenterId(), - capacityThresholdMap.get(capacity), cpu_requested, ApiDBUtils.getCpuOverprovisioningFactor()); - }else{ + else if (capacity == Capacity.CAPACITY_TYPE_MEMORY ) { clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(), - capacityThresholdMap.get(capacity), ram_requested, 1.0f);//Mem overprov not supported yet - } - - - if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0){ - // addToAvoid Set - avoid.addClusterList(clustersCrossingThreshold); - // Remove clusters crossing disabled threshold - clusterListForVmAllocation.removeAll(clustersCrossingThreshold); - - s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" + - " crosses the disable capacity threshold: " + capacityThresholdMap.get(capacity) + " for capacity Type : " + capacity + ", skipping these clusters"); + capacityThresholdMap.get(capacity), ram_requested ); } + + if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0){ + // addToAvoid Set + avoid.addClusterList(clustersCrossingThreshold); + // Remove clusters crossing disabled threshold + clusterListForVmAllocation.removeAll(clustersCrossingThreshold); + + s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" + + " crosses the disable capacity threshold: " + capacityThresholdMap.get(capacity) + " for capacity Type : " + capacity + ", skipping these clusters"); + } + } } @@ -559,7 +559,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } - protected Pair, Map> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone, float cpuOverprovisioningFactor){ + protected Pair, Map> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone){ //look at the aggregate available cpu and ram per cluster //although an aggregate value may be false indicator that a cluster can host a vm, it will at the least eliminate those clusters which definitely cannot @@ -573,14 +573,11 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { capacityType = CapacityVO.CAPACITY_TYPE_MEMORY; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("CPUOverprovisioningFactor considered: " + cpuOverprovisioningFactor); - } - List clusterIdswithEnoughCapacity = _capacityDao.listClustersInZoneOrPodByHostCapacities(id, requiredCpu, requiredRam, capacityType, isZone, cpuOverprovisioningFactor); + List clusterIdswithEnoughCapacity = _capacityDao.listClustersInZoneOrPodByHostCapacities(id, requiredCpu, requiredRam, capacityType, isZone); if (s_logger.isTraceEnabled()) { s_logger.trace("ClusterId List having enough CPU and RAM capacity: " + clusterIdswithEnoughCapacity); } - Pair, Map> result = _capacityDao.orderClustersByAggregateCapacity(id, capacityType, isZone, cpuOverprovisioningFactor); + Pair, Map> result = _capacityDao.orderClustersByAggregateCapacity(id, capacityType, isZone); List clusterIdsOrderedByAggregateCapacity = result.first(); //only keep the clusters that have enough capacity to host this VM if (s_logger.isTraceEnabled()) { @@ -596,7 +593,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { } - protected Pair, Map> listPodsByCapacity(long zoneId, int requiredCpu, long requiredRam, float cpuOverprovisioningFactor){ + + protected Pair, Map> listPodsByCapacity(long zoneId, int requiredCpu, long requiredRam){ //look at the aggregate available cpu and ram per pod //although an aggregate value may be false indicator that a pod can host a vm, it will at the least eliminate those pods which definitely cannot @@ -610,14 +608,11 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { capacityType = CapacityVO.CAPACITY_TYPE_MEMORY; } - if (s_logger.isDebugEnabled()) { - s_logger.debug("CPUOverprovisioningFactor considered: " + cpuOverprovisioningFactor); - } - List podIdswithEnoughCapacity = _capacityDao.listPodsByHostCapacities(zoneId, requiredCpu, requiredRam, capacityType, cpuOverprovisioningFactor); + List podIdswithEnoughCapacity = _capacityDao.listPodsByHostCapacities(zoneId, requiredCpu, requiredRam, capacityType); if (s_logger.isTraceEnabled()) { s_logger.trace("PodId List having enough CPU and RAM capacity: " + podIdswithEnoughCapacity); } - Pair, Map> result = _capacityDao.orderPodsByAggregateCapacity(zoneId, capacityType, cpuOverprovisioningFactor); + Pair, Map> result = _capacityDao.orderPodsByAggregateCapacity(zoneId, capacityType); List podIdsOrderedByAggregateCapacity = result.first(); //only keep the clusters that have enough capacity to host this VM if (s_logger.isTraceEnabled()) { @@ -736,11 +731,11 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner { if(plan.getPoolId() != null){ s_logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: "+toBeCreated.getPoolId()); List suitablePools = new ArrayList(); - StoragePoolVO pool; + StoragePool pool = null; if(toBeCreated.getPoolId() != null){ - pool = _storagePoolDao.findById(toBeCreated.getPoolId()); + pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId()); }else{ - pool = _storagePoolDao.findById(plan.getPoolId()); + pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(plan.getPoolId()); } if(!pool.isInMaintenance()){ diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalCmdbDaoImpl.java b/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java similarity index 63% rename from plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalCmdbDaoImpl.java rename to server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java index 5a882f1ef14..034a9aafc92 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalCmdbDaoImpl.java +++ b/server/src/com/cloud/deploy/HypervisorVmPlannerSelector.java @@ -1,32 +1,33 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -// -// Automatically generated by addcopyright.py at 01/29/2013 -package com.cloud.baremetal.database; +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.deploy; import javax.ejb.Local; - -import org.springframework.stereotype.Component; -import com.cloud.utils.db.DB; -import com.cloud.utils.db.GenericDaoBase; -@Component -@Local(value = {BaremetalCmdbDao.class}) -@DB(txn = false) -public class BaremetalCmdbDaoImpl extends GenericDaoBase implements BaremetalCmdbDao { +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.vm.UserVmVO; +@Local(value = {DeployPlannerSelector.class}) +public class HypervisorVmPlannerSelector extends AbstractDeployPlannerSelector { + @Override + public String selectPlanner(UserVmVO vm) { + if (vm.getHypervisorType() != HypervisorType.BareMetal) { + return "FirstFitPlanner"; + } + return null; + } } diff --git a/server/src/com/cloud/event/ActionEventUtils.java b/server/src/com/cloud/event/ActionEventUtils.java index 22589f1a292..3f3ca685f73 100755 --- a/server/src/com/cloud/event/ActionEventUtils.java +++ b/server/src/com/cloud/event/ActionEventUtils.java @@ -26,22 +26,23 @@ import com.cloud.user.UserContext; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; import com.cloud.utils.component.AnnotationInterceptor; +import com.cloud.utils.component.ComponentContext; import net.sf.cglib.proxy.Callback; import net.sf.cglib.proxy.MethodInterceptor; import net.sf.cglib.proxy.MethodProxy; import org.apache.cloudstack.framework.events.EventBus; import org.apache.cloudstack.framework.events.EventBusException; import org.apache.log4j.Logger; +import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.springframework.stereotype.Component; +import javax.annotation.PostConstruct; +import javax.inject.Inject; import java.lang.reflect.AnnotatedElement; import java.lang.reflect.Method; import java.util.HashMap; import java.util.Map; -import javax.annotation.PostConstruct; -import javax.inject.Inject; - @Component public class ActionEventUtils { private static final Logger s_logger = Logger.getLogger(ActionEventUtils.class); @@ -49,14 +50,12 @@ public class ActionEventUtils { private static EventDao _eventDao; private static AccountDao _accountDao; protected static UserDao _userDao; - - // get the event bus provider if configured - protected static EventBus _eventBus; + protected static EventBus _eventBus = null; @Inject EventDao eventDao; @Inject AccountDao accountDao; @Inject UserDao userDao; - + public ActionEventUtils() { } @@ -65,8 +64,6 @@ public class ActionEventUtils { _eventDao = eventDao; _accountDao = accountDao; _userDao = userDao; - - // TODO we will do injection of event bus later } public static Long onActionEvent(Long userId, Long accountId, Long domainId, String type, String description) { @@ -156,7 +153,9 @@ public class ActionEventUtils { private static void publishOnEventBus(long userId, long accountId, String eventCategory, String eventType, Event.State state) { - if (_eventBus == null) { + try { + _eventBus = ComponentContext.getComponent(EventBus.class); + } catch(NoSuchBeanDefinitionException nbe) { return; // no provider is configured to provide events bus, so just return } diff --git a/server/src/com/cloud/event/AlertGenerator.java b/server/src/com/cloud/event/AlertGenerator.java index 2dc7f3eb9e1..c56f9177af2 100644 --- a/server/src/com/cloud/event/AlertGenerator.java +++ b/server/src/com/cloud/event/AlertGenerator.java @@ -22,16 +22,17 @@ import com.cloud.dc.HostPodVO; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.server.ManagementServer; -import org.apache.cloudstack.framework.events.*; +import com.cloud.utils.component.ComponentContext; +import org.apache.cloudstack.framework.events.EventBus; +import org.apache.cloudstack.framework.events.EventBusException; import org.apache.log4j.Logger; +import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.springframework.stereotype.Component; -import java.util.Enumeration; -import java.util.HashMap; -import java.util.Map; - import javax.annotation.PostConstruct; import javax.inject.Inject; +import java.util.HashMap; +import java.util.Map; @Component public class AlertGenerator { @@ -39,13 +40,11 @@ public class AlertGenerator { private static final Logger s_logger = Logger.getLogger(AlertGenerator.class); private static DataCenterDao _dcDao; private static HostPodDao _podDao; - - // get the event bus provider if configured protected static EventBus _eventBus = null; @Inject DataCenterDao dcDao; @Inject HostPodDao podDao; - + public AlertGenerator() { } @@ -56,8 +55,10 @@ public class AlertGenerator { } public static void publishAlertOnEventBus(String alertType, long dataCenterId, Long podId, String subject, String body) { - if (_eventBus == null) { - return; // no provider is configured to provider events bus, so just return + try { + _eventBus = ComponentContext.getComponent(EventBus.class); + } catch(NoSuchBeanDefinitionException nbe) { + return; // no provider is configured to provide events bus, so just return } org.apache.cloudstack.framework.events.Event event = diff --git a/server/src/com/cloud/event/UsageEventUtils.java b/server/src/com/cloud/event/UsageEventUtils.java index d59262af2ba..54012443848 100644 --- a/server/src/com/cloud/event/UsageEventUtils.java +++ b/server/src/com/cloud/event/UsageEventUtils.java @@ -23,17 +23,18 @@ import com.cloud.event.dao.UsageEventDao; import com.cloud.server.ManagementServer; import com.cloud.user.Account; import com.cloud.user.dao.AccountDao; -import org.apache.cloudstack.framework.events.EventBus; +import com.cloud.utils.component.ComponentContext; import org.apache.cloudstack.framework.events.Event; +import org.apache.cloudstack.framework.events.EventBus; import org.apache.cloudstack.framework.events.EventBusException; import org.apache.log4j.Logger; +import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.springframework.stereotype.Component; -import java.util.HashMap; -import java.util.Map; - import javax.annotation.PostConstruct; import javax.inject.Inject; +import java.util.HashMap; +import java.util.Map; @Component public class UsageEventUtils { @@ -42,14 +43,12 @@ public class UsageEventUtils { private static AccountDao _accountDao; private static DataCenterDao _dcDao; private static final Logger s_logger = Logger.getLogger(UsageEventUtils.class); - - // get the event bus provider if configured - protected static EventBus _eventBus; + protected static EventBus _eventBus = null; @Inject UsageEventDao usageEventDao; @Inject AccountDao accountDao; @Inject DataCenterDao dcDao; - + public UsageEventUtils() { } @@ -116,8 +115,10 @@ public class UsageEventUtils { private static void publishUsageEvent(String usageEventType, Long accountId, Long zoneId, String resourceType, String resourceUUID) { - if (_eventBus == null) { - return; // no provider is configured to provider events bus, so just return + try { + _eventBus = ComponentContext.getComponent(EventBus.class); + } catch(NoSuchBeanDefinitionException nbe) { + return; // no provider is configured to provide events bus, so just return } Account account = _accountDao.findById(accountId); diff --git a/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java b/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java index eb27fda1fe8..bba8be5c649 100755 --- a/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java +++ b/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java @@ -31,7 +31,6 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.apache.log4j.NDC; -import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; import com.cloud.alert.AlertManager; @@ -60,6 +59,7 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.resource.ResourceManager; import com.cloud.server.ManagementServer; import com.cloud.storage.StorageManager; +import com.cloud.storage.VolumeManager; import com.cloud.storage.dao.GuestOSCategoryDao; import com.cloud.storage.dao.GuestOSDao; import com.cloud.user.AccountManager; @@ -140,6 +140,8 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai ManagementServer _msServer; @Inject ConfigurationDao _configDao; + @Inject + VolumeManager volumeMgr; String _instance; ScheduledExecutorService _executor; @@ -499,7 +501,7 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai return null; // VM doesn't require HA } - if (!_storageMgr.canVmRestartOnAnotherServer(vm.getId())) { + if (!this.volumeMgr.canVmRestartOnAnotherServer(vm.getId())) { if (s_logger.isDebugEnabled()) { s_logger.debug("VM can not restart on another server."); } diff --git a/server/src/com/cloud/ha/RecreatableFencer.java b/server/src/com/cloud/ha/RecreatableFencer.java index 52ab34f05cd..50aa1b75762 100644 --- a/server/src/com/cloud/ha/RecreatableFencer.java +++ b/server/src/com/cloud/ha/RecreatableFencer.java @@ -21,12 +21,12 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.host.HostVO; import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.component.AdapterBase; import com.cloud.vm.VMInstanceVO; @@ -37,9 +37,9 @@ import com.cloud.vm.VirtualMachine; public class RecreatableFencer extends AdapterBase implements FenceBuilder { private static final Logger s_logger = Logger.getLogger(RecreatableFencer.class); @Inject VolumeDao _volsDao; - @Inject StoragePoolDao _poolDao; + @Inject PrimaryDataStoreDao _poolDao; - protected RecreatableFencer() { + public RecreatableFencer() { super(); } @@ -56,7 +56,7 @@ public class RecreatableFencer extends AdapterBase implements FenceBuilder { for (VolumeVO vol : vols) { if (!vol.isRecreatable()) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to fence off volumes that are not recreatable: " + vol); + s_logger.debug("Unable to f ence off volumes that are not recreatable: " + vol); } return null; } diff --git a/server/src/com/cloud/host/dao/HostDaoImpl.java b/server/src/com/cloud/host/dao/HostDaoImpl.java index c03611d41e9..07a42322ce3 100755 --- a/server/src/com/cloud/host/dao/HostDaoImpl.java +++ b/server/src/com/cloud/host/dao/HostDaoImpl.java @@ -128,6 +128,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @Inject protected ClusterDao _clusterDao; public HostDaoImpl() { + super(); } @PostConstruct @@ -261,7 +262,11 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao * UnmanagedDirectConnectSearch.and("lastPinged", UnmanagedDirectConnectSearch.entity().getLastPinged(), * SearchCriteria.Op.LTEQ); UnmanagedDirectConnectSearch.cp(); UnmanagedDirectConnectSearch.cp(); */ + try { HostTransferSearch = _hostTransferDao.createSearchBuilder(); + } catch (Throwable e) { + s_logger.debug("error", e); + } HostTransferSearch.and("id", HostTransferSearch.entity().getId(), SearchCriteria.Op.NULL); UnmanagedDirectConnectSearch.join("hostTransferSearch", HostTransferSearch, HostTransferSearch.entity().getId(), UnmanagedDirectConnectSearch.entity().getId(), JoinType.LEFTOUTER); ClusterManagedSearch = _clusterDao.createSearchBuilder(); @@ -491,7 +496,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao txn.start(); SearchCriteria sc = UnmanagedApplianceSearch.create(); sc.setParameters("lastPinged", lastPingSecondsAfter); - sc.setParameters("types", Type.ExternalDhcp, Type.ExternalFirewall, Type.ExternalLoadBalancer, Type.PxeServer, Type.TrafficMonitor, Type.L2Networking); + sc.setParameters("types", Type.ExternalDhcp, Type.ExternalFirewall, Type.ExternalLoadBalancer, Type.BaremetalDhcp, Type.BaremetalPxe, Type.TrafficMonitor, Type.L2Networking); List hosts = lockRows(sc, null, true); for (HostVO host : hosts) { diff --git a/server/src/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/com/cloud/hypervisor/HypervisorGuruBase.java index e158962aa11..d77796da300 100644 --- a/server/src/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/com/cloud/hypervisor/HypervisorGuruBase.java @@ -34,6 +34,7 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.NicSecondaryIpDao; import com.cloud.vm.dao.VMInstanceDao; public abstract class HypervisorGuruBase extends AdapterBase implements HypervisorGuru { @@ -41,7 +42,8 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis @Inject VMTemplateDetailsDao _templateDetailsDao; @Inject NicDao _nicDao; @Inject VMInstanceDao _virtualMachineDao; - + @Inject NicSecondaryIpDao _nicSecIpDao; + protected HypervisorGuruBase() { super(); } @@ -68,6 +70,14 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis // Workaround to make sure the TO has the UUID we need for Niciri integration NicVO nicVO = _nicDao.findById(profile.getId()); to.setUuid(nicVO.getUuid()); + //check whether the this nic has secondary ip addresses set + //set nic secondary ip address in NicTO which are used for security group + // configuration. Use full when vm stop/start + List secIps = null; + if (nicVO.getSecondaryIp()) { + secIps = _nicSecIpDao.getSecondaryIpAddressesForNic(nicVO.getId()); + } + to.setNicSecIps(secIps); return to; } @@ -76,9 +86,11 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis ServiceOffering offering = vmProfile.getServiceOffering(); VirtualMachine vm = vmProfile.getVirtualMachine(); - - VirtualMachineTO to = new VirtualMachineTO(vm.getId(), vm.getInstanceName(), vm.getType(), offering.getCpu(), offering.getSpeed(), - offering.getRamSize() * 1024l * 1024l, offering.getRamSize() * 1024l * 1024l, null, null, vm.isHaEnabled(), vm.limitCpuUse(), vm.getVncPassword()); + Long minMemory = (long) (offering.getRamSize()/vmProfile.getCpuOvercommitRatio()); + int minspeed= (int)(offering.getSpeed()/vmProfile.getMemoryOvercommitRatio()); + int maxspeed = (offering.getSpeed()); + VirtualMachineTO to = new VirtualMachineTO(vm.getId(), vm.getInstanceName(), vm.getType(), offering.getCpu(), minspeed, maxspeed, + minMemory * 1024l * 1024l, offering.getRamSize() * 1024l * 1024l, null, null, vm.isHaEnabled(), vm.limitCpuUse(), vm.getVncPassword()); to.setBootArgs(vmProfile.getBootArgs()); List nicProfiles = vmProfile.getNics(); diff --git a/server/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDao.java b/server/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDao.java index 8f4a47584b3..0fe0b535f78 100644 --- a/server/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDao.java +++ b/server/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDao.java @@ -31,4 +31,6 @@ public interface HypervisorCapabilitiesDao extends GenericDao HypervisorTypeSearch; protected final SearchBuilder HypervisorTypeAndVersionSearch; - protected final GenericSearchBuilder MaxGuestLimitByHypervisorSearch; - protected final GenericSearchBuilder MaxDataVolumesLimitByHypervisorSearch; private static final String DEFAULT_VERSION = "default"; @@ -52,18 +50,14 @@ public class HypervisorCapabilitiesDaoImpl extends GenericDaoBase sc = MaxGuestLimitByHypervisorSearch.create(); - sc.setParameters("hypervisorType", hypervisorType); - sc.setParameters("hypervisorVersion", hypervisorVersion); - List limitList = customSearch(sc, null); - if(!limitList.isEmpty()){ - result = limitList.get(0); - }else{ - useDefault = true; - } - }else{ - useDefault = true; - } - if(useDefault){ - SearchCriteria sc = MaxGuestLimitByHypervisorSearch.create(); - sc.setParameters("hypervisorType", hypervisorType); - sc.setParameters("hypervisorVersion", DEFAULT_VERSION); - List limitList = customSearch(sc, null); - if(!limitList.isEmpty()){ - result = limitList.get(0); - } - } - if(result == null){ + HypervisorCapabilitiesVO result = getCapabilities(hypervisorType, hypervisorVersion); + Long limit = result.getMaxGuestsLimit(); + if (limit == null) return defaultLimit; - } - return result; + return limit; } @Override public Integer getMaxDataVolumesLimit(HypervisorType hypervisorType, String hypervisorVersion) { - Integer result = null; - boolean useDefault = false; - if (hypervisorVersion != null) { - SearchCriteria sc = MaxDataVolumesLimitByHypervisorSearch.create(); - sc.setParameters("hypervisorType", hypervisorType); - sc.setParameters("hypervisorVersion", hypervisorVersion); - List limitList = customSearch(sc, null); - if (!limitList.isEmpty()) { - result = limitList.get(0); - } else { - useDefault = true; - } - } else { - useDefault = true; - } - // If data is not available for a specific hypervisor version then use 'default' as the version - if (useDefault) { - SearchCriteria sc = MaxDataVolumesLimitByHypervisorSearch.create(); - sc.setParameters("hypervisorType", hypervisorType); - sc.setParameters("hypervisorVersion", DEFAULT_VERSION); - List limitList = customSearch(sc, null); - if (!limitList.isEmpty()) { - result = limitList.get(0); - } - } - return result; + HypervisorCapabilitiesVO result = getCapabilities(hypervisorType, hypervisorVersion); + return result.getMaxDataVolumesLimit(); } -} \ No newline at end of file + + @Override + public Integer getMaxHostsPerCluster(HypervisorType hypervisorType, String hypervisorVersion) { + HypervisorCapabilitiesVO result = getCapabilities(hypervisorType, hypervisorVersion); + return result.getMaxHostsPerCluster(); + } +} diff --git a/server/src/com/cloud/migration/ServiceOffering21VO.java b/server/src/com/cloud/migration/ServiceOffering21VO.java index fdec30e3b8a..d07be6462f1 100644 --- a/server/src/com/cloud/migration/ServiceOffering21VO.java +++ b/server/src/com/cloud/migration/ServiceOffering21VO.java @@ -169,5 +169,10 @@ public class ServiceOffering21VO extends DiskOffering21VO implements ServiceOffe return null; } + @Override + public boolean getVolatileVm() { + return false; + } + } diff --git a/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java b/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java index bcefccc3a04..d7b6d78c9bb 100644 --- a/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java +++ b/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java @@ -109,6 +109,7 @@ import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserStatisticsDao; import com.cloud.utils.NumbersUtil; import com.cloud.utils.component.AdapterBase; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.DB; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.Transaction; @@ -818,7 +819,8 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase } } else { s_logger.debug("Revoking a rule for an inline load balancer that has not been programmed yet."); - return null; + nic.setNic(null); + return nic; } } @@ -876,9 +878,9 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase MappingNic nic = getLoadBalancingIpNic(zone, network, rule.getSourceIpAddressId(), revoked, null); mappingStates.add(nic.getState()); NicVO loadBalancingIpNic = nic.getNic(); - if (loadBalancingIpNic == null) { - continue; - } + if (loadBalancingIpNic == null) { + continue; + } // Change the source IP address for the load balancing rule to be the load balancing IP address srcIp = loadBalancingIpNic.getIp4Address(); @@ -1102,7 +1104,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase } NetworkElement element = _networkModel.getElementImplementingProvider(providers.get(0).getName()); - if (!(element instanceof IpDeployer)) { + if (!(ComponentContext.getTargetObject(element) instanceof IpDeployer)) { s_logger.error("The firewall provider for network " + network.getName() + " don't have ability to deploy IP address!"); return null; } diff --git a/server/src/com/cloud/network/ExternalLoadBalancerUsageManagerImpl.java b/server/src/com/cloud/network/ExternalLoadBalancerUsageManagerImpl.java index 90045014370..d405382f89c 100644 --- a/server/src/com/cloud/network/ExternalLoadBalancerUsageManagerImpl.java +++ b/server/src/com/cloud/network/ExternalLoadBalancerUsageManagerImpl.java @@ -16,22 +16,6 @@ // under the License. package com.cloud.network; -import java.net.URI; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - import com.cloud.agent.AgentManager; import com.cloud.agent.api.ExternalNetworkResourceUsageAnswer; import com.cloud.agent.api.ExternalNetworkResourceUsageCommand; @@ -41,27 +25,30 @@ import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.dc.dao.VlanDao; -import com.cloud.host.DetailVO; import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDetailsDao; import com.cloud.network.dao.ExternalFirewallDeviceDao; +import com.cloud.network.dao.ExternalFirewallDeviceVO; import com.cloud.network.dao.ExternalLoadBalancerDeviceDao; import com.cloud.network.dao.ExternalLoadBalancerDeviceVO; import com.cloud.network.dao.IPAddressDao; +import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.InlineLoadBalancerNicMapDao; import com.cloud.network.dao.InlineLoadBalancerNicMapVO; import com.cloud.network.dao.LoadBalancerDao; import com.cloud.network.dao.LoadBalancerVO; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkExternalFirewallDao; +import com.cloud.network.dao.NetworkExternalFirewallVO; import com.cloud.network.dao.NetworkExternalLoadBalancerDao; import com.cloud.network.dao.NetworkExternalLoadBalancerVO; import com.cloud.network.dao.NetworkServiceMapDao; import com.cloud.network.dao.NetworkVO; import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.dao.PhysicalNetworkServiceProviderDao; +import com.cloud.network.rules.PortForwardingRuleVO; import com.cloud.network.rules.dao.PortForwardingRulesDao; import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.resource.ResourceManager; @@ -81,6 +68,20 @@ import com.cloud.vm.DomainRouterVO; import com.cloud.vm.NicVO; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.NicDao; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import java.net.URI; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; @Component @Local(value = { ExternalLoadBalancerUsageManager.class }) @@ -146,7 +147,7 @@ public class ExternalLoadBalancerUsageManagerImpl extends ManagerBase implements ScheduledExecutorService _executor; private int _externalNetworkStatsInterval; private static final org.apache.log4j.Logger s_logger = Logger.getLogger(ExternalLoadBalancerUsageManagerImpl.class); - + @Override public boolean configure(String name, Map params) throws ConfigurationException { _externalNetworkStatsInterval = NumbersUtil.parseInt(_configDao.getValue(Config.ExternalNetworkStatsInterval.key()), 300); @@ -160,7 +161,7 @@ public class ExternalLoadBalancerUsageManagerImpl extends ManagerBase implements @Override public boolean start() { if (_externalNetworkStatsInterval > 0) { - _executor.scheduleAtFixedRate(new ExternalLoadBalancerDeviceNetworkUsageTask(), _externalNetworkStatsInterval, _externalNetworkStatsInterval, TimeUnit.SECONDS); + _executor.scheduleAtFixedRate(new ExternalDeviceNetworkUsageTask(), _externalNetworkStatsInterval, _externalNetworkStatsInterval, TimeUnit.SECONDS); } return true; } @@ -170,6 +171,11 @@ public class ExternalLoadBalancerUsageManagerImpl extends ManagerBase implements return true; } + @Override + public String getName() { + return _name; + } + private ExternalLoadBalancerDeviceVO getExternalLoadBalancerForNetwork(Network network) { NetworkExternalLoadBalancerVO lbDeviceForNetwork = _networkExternalLBDao.findByNetworkId(network.getId()); if (lbDeviceForNetwork != null) { @@ -180,15 +186,21 @@ public class ExternalLoadBalancerUsageManagerImpl extends ManagerBase implements } return null; } - - private boolean externalLoadBalancerIsInline(HostVO externalLoadBalancer) { - DetailVO detail = _hostDetailDao.findDetail(externalLoadBalancer.getId(), "inline"); - return (detail != null && detail.getValue().equals("true")); + + private ExternalFirewallDeviceVO getExternalFirewallForNetwork(Network network) { + NetworkExternalFirewallVO fwDeviceForNetwork = _networkExternalFirewallDao.findByNetworkId(network.getId()); + if (fwDeviceForNetwork != null) { + long fwDeviceId = fwDeviceForNetwork.getExternalFirewallDeviceId(); + ExternalFirewallDeviceVO fwDevice = _externalFirewallDeviceDao.findById(fwDeviceId); + assert(fwDevice != null); + return fwDevice; + } + return null; } - + @Override public void updateExternalLoadBalancerNetworkUsageStats(long loadBalancerRuleId){ - + LoadBalancerVO lb = _loadBalancerDao.findById(loadBalancerRuleId); if(lb == null){ if(s_logger.isDebugEnabled()){ @@ -204,7 +216,8 @@ public class ExternalLoadBalancerUsageManagerImpl extends ManagerBase implements } return; } - ExternalLoadBalancerDeviceVO lbDeviceVO = getExternalLoadBalancerForNetwork(network); + + ExternalLoadBalancerDeviceVO lbDeviceVO = getExternalLoadBalancerForNetwork(network); if (lbDeviceVO == null) { if(s_logger.isDebugEnabled()){ s_logger.debug("Cannot update usage stats, No external LB device found"); @@ -235,24 +248,24 @@ public class ExternalLoadBalancerUsageManagerImpl extends ManagerBase implements String publicIp = _networkMgr.getIp(lb.getSourceIpAddressId()).getAddress().addr(); DataCenterVO zone = _dcDao.findById(network.getDataCenterId()); - String statsEntryIdentifier = "account " + account.getAccountName() + ", zone " + zone.getName() + ", network ID " + networkId + ", host ID " + externalLoadBalancer.getName(); - + String statsEntryIdentifier = "account " + account.getAccountName() + ", zone " + zone.getName() + ", network ID " + networkId + ", host ID " + externalLoadBalancer.getName(); + long newCurrentBytesSent = 0; long newCurrentBytesReceived = 0; - + if (publicIp != null) { long[] bytesSentAndReceived = null; statsEntryIdentifier += ", public IP: " + publicIp; - - if (externalLoadBalancer.getType().equals(Host.Type.ExternalLoadBalancer) && externalLoadBalancerIsInline(externalLoadBalancer)) { + boolean inline = _networkMgr.isNetworkInlineMode(network); + if (externalLoadBalancer.getType().equals(Host.Type.ExternalLoadBalancer) && inline) { // Look up stats for the guest IP address that's mapped to the public IP address InlineLoadBalancerNicMapVO mapping = _inlineLoadBalancerNicMapDao.findByPublicIpAddress(publicIp); - + if (mapping != null) { NicVO nic = _nicDao.findById(mapping.getNicId()); String loadBalancingIpAddress = nic.getIp4Address(); bytesSentAndReceived = lbAnswer.ipBytes.get(loadBalancingIpAddress); - + if (bytesSentAndReceived != null) { bytesSentAndReceived[0] = 0; } @@ -260,39 +273,39 @@ public class ExternalLoadBalancerUsageManagerImpl extends ManagerBase implements } else { bytesSentAndReceived = lbAnswer.ipBytes.get(publicIp); } - + if (bytesSentAndReceived == null) { s_logger.debug("Didn't get an external network usage answer for public IP " + publicIp); } else { newCurrentBytesSent += bytesSentAndReceived[0]; newCurrentBytesReceived += bytesSentAndReceived[1]; } - + UserStatisticsVO userStats; final Transaction txn = Transaction.currentTxn(); try { - txn.start(); + txn.start(); userStats = _userStatsDao.lock(accountId, zone.getId(), networkId, publicIp, externalLoadBalancer.getId(), externalLoadBalancer.getType().toString()); - + if(userStats != null){ long oldNetBytesSent = userStats.getNetBytesSent(); long oldNetBytesReceived = userStats.getNetBytesReceived(); long oldCurrentBytesSent = userStats.getCurrentBytesSent(); long oldCurrentBytesReceived = userStats.getCurrentBytesReceived(); String warning = "Received an external network stats byte count that was less than the stored value. Zone ID: " + userStats.getDataCenterId() + ", account ID: " + userStats.getAccountId() + "."; - + userStats.setCurrentBytesSent(newCurrentBytesSent); if (oldCurrentBytesSent > newCurrentBytesSent) { - s_logger.warn(warning + "Stored bytes sent: " + oldCurrentBytesSent + ", new bytes sent: " + newCurrentBytesSent + "."); + s_logger.warn(warning + "Stored bytes sent: " + oldCurrentBytesSent + ", new bytes sent: " + newCurrentBytesSent + "."); userStats.setNetBytesSent(oldNetBytesSent + oldCurrentBytesSent); - } - + } + userStats.setCurrentBytesReceived(newCurrentBytesReceived); if (oldCurrentBytesReceived > newCurrentBytesReceived) { - s_logger.warn(warning + "Stored bytes received: " + oldCurrentBytesReceived + ", new bytes received: " + newCurrentBytesReceived + "."); + s_logger.warn(warning + "Stored bytes received: " + oldCurrentBytesReceived + ", new bytes received: " + newCurrentBytesReceived + "."); userStats.setNetBytesReceived(oldNetBytesReceived + oldCurrentBytesReceived); - } - + } + if (_userStatsDao.update(userStats.getId(), userStats)) { s_logger.debug("Successfully updated stats for " + statsEntryIdentifier); } else { @@ -301,7 +314,7 @@ public class ExternalLoadBalancerUsageManagerImpl extends ManagerBase implements }else { s_logger.warn("Unable to find user stats entry for " + statsEntryIdentifier); } - + txn.commit(); }catch (final Exception e) { txn.rollback(); @@ -310,32 +323,32 @@ public class ExternalLoadBalancerUsageManagerImpl extends ManagerBase implements } } - protected class ExternalLoadBalancerDeviceNetworkUsageTask implements Runnable { + protected class ExternalDeviceNetworkUsageTask implements Runnable { - public ExternalLoadBalancerDeviceNetworkUsageTask() { + public ExternalDeviceNetworkUsageTask() { } @Override public void run() { - GlobalLock scanLock = GlobalLock.getInternLock("ExternalLoadBalancerUsageManagerImpl"); + GlobalLock scanLock = GlobalLock.getInternLock("ExternalDeviceNetworkUsageManagerImpl"); try { if (scanLock.lock(20)) { try { - runExternalLoadBalancerNetworkUsageTask(); + runExternalDeviceNetworkUsageTask(); } finally { scanLock.unlock(); } } } catch (Exception e) { - s_logger.warn("Problems while getting external load balancer device usage", e); + s_logger.warn("Problems while getting external device usage", e); } finally { scanLock.releaseRef(); } } - private void runExternalLoadBalancerNetworkUsageTask() { - s_logger.debug("External load balancer devices stats collector is running..."); + private void runExternalDeviceNetworkUsageTask() { + s_logger.debug("External devices stats collector is running..."); for (DataCenterVO zone : _dcDao.listAll()) { List domainRoutersInZone = _routerDao.listByDataCenter(zone.getId()); @@ -343,6 +356,7 @@ public class ExternalLoadBalancerUsageManagerImpl extends ManagerBase implements continue; } Map lbDeviceUsageAnswerMap = new HashMap(); + Map fwDeviceUsageAnswerMap = new HashMap(); List accountsProcessed = new ArrayList(); for (DomainRouterVO domainRouter : domainRoutersInZone) { @@ -368,45 +382,88 @@ public class ExternalLoadBalancerUsageManagerImpl extends ManagerBase implements continue; } + ExternalFirewallDeviceVO fwDeviceVO = getExternalFirewallForNetwork(network); ExternalLoadBalancerDeviceVO lbDeviceVO = getExternalLoadBalancerForNetwork(network); - if (lbDeviceVO == null) { + if (lbDeviceVO == null && fwDeviceVO == null) { continue; } + // Get network stats from the external firewall + ExternalNetworkResourceUsageAnswer firewallAnswer = null; + HostVO externalFirewall = null; + if(fwDeviceVO != null){ + externalFirewall = _hostDao.findById(fwDeviceVO.getHostId()); + if (externalFirewall != null) { + Long fwDeviceId = new Long(externalFirewall.getId()); + if(!fwDeviceUsageAnswerMap.containsKey(fwDeviceId)){ + try{ + ExternalNetworkResourceUsageCommand cmd = new ExternalNetworkResourceUsageCommand(); + firewallAnswer = (ExternalNetworkResourceUsageAnswer) _agentMgr.easySend(externalFirewall.getId(), cmd); + if (firewallAnswer == null || !firewallAnswer.getResult()) { + String details = (firewallAnswer != null) ? firewallAnswer.getDetails() : "details unavailable"; + String msg = "Unable to get external firewall stats for network" + zone.getName() + " due to: " + details + "."; + s_logger.error(msg); + } else { + fwDeviceUsageAnswerMap.put(fwDeviceId, firewallAnswer); + } + } catch (Exception e){ + String msg = "Unable to get external firewall stats for network" + zone.getName(); + s_logger.error(msg, e); + } + } else { + if (s_logger.isTraceEnabled()) { + s_logger.trace("Reusing usage Answer for device id " + fwDeviceId + "for Network " + network.getId()); + } + firewallAnswer = fwDeviceUsageAnswerMap.get(fwDeviceId); + } + }} + // Get network stats from the external load balancer ExternalNetworkResourceUsageAnswer lbAnswer = null; - HostVO externalLoadBalancer = _hostDao.findById(lbDeviceVO.getHostId()); - if (externalLoadBalancer != null) { - Long lbDeviceId = new Long(externalLoadBalancer.getId()); - if (!lbDeviceUsageAnswerMap.containsKey(lbDeviceId)) { - ExternalNetworkResourceUsageCommand cmd = new ExternalNetworkResourceUsageCommand(); - lbAnswer = (ExternalNetworkResourceUsageAnswer) _agentMgr.easySend(externalLoadBalancer.getId(), cmd); - if (lbAnswer == null || !lbAnswer.getResult()) { - String details = (lbAnswer != null) ? lbAnswer.getDetails() : "details unavailable"; - String msg = "Unable to get external load balancer stats for " + zone.getName() + " due to: " + details + "."; - s_logger.error(msg); - continue; + HostVO externalLoadBalancer = null; + if(lbDeviceVO !=null){ + externalLoadBalancer = _hostDao.findById(lbDeviceVO.getHostId()); + if (externalLoadBalancer != null) { + Long lbDeviceId = new Long(externalLoadBalancer.getId()); + if (!lbDeviceUsageAnswerMap.containsKey(lbDeviceId)) { + try { + ExternalNetworkResourceUsageCommand cmd = new ExternalNetworkResourceUsageCommand(); + lbAnswer = (ExternalNetworkResourceUsageAnswer) _agentMgr.easySend(externalLoadBalancer.getId(), cmd); + if (lbAnswer == null || !lbAnswer.getResult()) { + String details = (lbAnswer != null) ? lbAnswer.getDetails() : "details unavailable"; + String msg = "Unable to get external load balancer stats for " + zone.getName() + " due to: " + details + "."; + s_logger.error(msg); + } else { + lbDeviceUsageAnswerMap.put(lbDeviceId, lbAnswer); + } + } catch (Exception e){ + String msg = "Unable to get external load balancer stats for " + zone.getName(); + s_logger.error(msg, e); + } + } else { + if (s_logger.isTraceEnabled()) { + s_logger.trace("Reusing usage Answer for device id " + lbDeviceId + "for Network " + network.getId()); + } + lbAnswer = lbDeviceUsageAnswerMap.get(lbDeviceId); } - lbDeviceUsageAnswerMap.put(lbDeviceId, lbAnswer); - } else { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Reusing usage Answer for device id " + lbDeviceId + "for Network " + network.getId()); - } - lbAnswer = lbDeviceUsageAnswerMap.get(lbDeviceId); } } + if(firewallAnswer == null && lbAnswer == null){ + continue; + } + AccountVO account = _accountDao.findById(accountId); if (account == null) { s_logger.debug("Skipping stats update for account with ID " + accountId); continue; } - if (!manageStatsEntries(true, accountId, zoneId, network, externalLoadBalancer, lbAnswer)) { + if (!manageStatsEntries(true, accountId, zoneId, network, externalFirewall, firewallAnswer, externalLoadBalancer, lbAnswer)) { continue; } - manageStatsEntries(false, accountId, zoneId, network, externalLoadBalancer, lbAnswer); + manageStatsEntries(false, accountId, zoneId, network, externalFirewall, firewallAnswer, externalLoadBalancer, lbAnswer); } accountsProcessed.add(new Long(accountId)); @@ -448,7 +505,7 @@ public class ExternalLoadBalancerUsageManagerImpl extends ManagerBase implements } // Updates an existing stats entry with new data from the specified usage answer. - private boolean updateStatsEntry(long accountId, long zoneId, long networkId, String publicIp, long hostId, ExternalNetworkResourceUsageAnswer answer) { + private boolean updateStatsEntry(long accountId, long zoneId, long networkId, String publicIp, long hostId, ExternalNetworkResourceUsageAnswer answer, boolean inline) { AccountVO account = _accountDao.findById(accountId); DataCenterVO zone = _dcDao.findById(zoneId); NetworkVO network = _networkDao.findById(networkId); @@ -462,7 +519,7 @@ public class ExternalLoadBalancerUsageManagerImpl extends ManagerBase implements long[] bytesSentAndReceived = null; statsEntryIdentifier += ", public IP: " + publicIp; - if (host.getType().equals(Host.Type.ExternalLoadBalancer) && externalLoadBalancerIsInline(host)) { + if (host.getType().equals(Host.Type.ExternalLoadBalancer) && inline) { // Look up stats for the guest IP address that's mapped to the public IP address InlineLoadBalancerNicMapVO mapping = _inlineLoadBalancerNicMapDao.findByPublicIpAddress(publicIp); @@ -520,11 +577,11 @@ public class ExternalLoadBalancerUsageManagerImpl extends ManagerBase implements } } - private boolean createOrUpdateStatsEntry(boolean create, long accountId, long zoneId, long networkId, String publicIp, long hostId, ExternalNetworkResourceUsageAnswer answer) { + private boolean createOrUpdateStatsEntry(boolean create, long accountId, long zoneId, long networkId, String publicIp, long hostId, ExternalNetworkResourceUsageAnswer answer, boolean inline) { if (create) { return createStatsEntry(accountId, zoneId, networkId, publicIp, hostId); } else { - return updateStatsEntry(accountId, zoneId, networkId, publicIp, hostId, answer); + return updateStatsEntry(accountId, zoneId, networkId, publicIp, hostId, answer, inline); } } @@ -534,19 +591,66 @@ public class ExternalLoadBalancerUsageManagerImpl extends ManagerBase implements * balancing rules */ private boolean manageStatsEntries(boolean create, long accountId, long zoneId, Network network, - HostVO externalLoadBalancer, ExternalNetworkResourceUsageAnswer lbAnswer) { + HostVO externalFirewall, ExternalNetworkResourceUsageAnswer firewallAnswer, + HostVO externalLoadBalancer, ExternalNetworkResourceUsageAnswer lbAnswer) { String accountErrorMsg = "Failed to update external network stats entry. Details: account ID = " + accountId; Transaction txn = Transaction.open(Transaction.CLOUD_DB); try { txn.start(); String networkErrorMsg = accountErrorMsg + ", network ID = " + network.getId(); + boolean sharedSourceNat = false; + Map sourceNatCapabilities = _networkMgr.getNetworkServiceCapabilities(network.getId(), Network.Service.SourceNat); + if (sourceNatCapabilities != null) { + String supportedSourceNatTypes = sourceNatCapabilities.get(Network.Capability.SupportedSourceNatTypes).toLowerCase(); + if (supportedSourceNatTypes.contains("zone")) { + sharedSourceNat = true; + } + } + + if(externalFirewall != null && firewallAnswer != null){ + if (!sharedSourceNat) { + // Manage the entry for this network's source NAT IP address + List sourceNatIps = _ipAddressDao.listByAssociatedNetwork(network.getId(), true); + if (sourceNatIps.size() == 1) { + String publicIp = sourceNatIps.get(0).getAddress().addr(); + if (!createOrUpdateStatsEntry(create, accountId, zoneId, network.getId(), publicIp, externalFirewall.getId(), firewallAnswer, false)) { + throw new ExecutionException(networkErrorMsg + ", source NAT IP = " + publicIp); + } + } + + // Manage one entry for each static NAT rule in this network + List staticNatIps = _ipAddressDao.listStaticNatPublicIps(network.getId()); + for (IPAddressVO staticNatIp : staticNatIps) { + String publicIp = staticNatIp.getAddress().addr(); + if (!createOrUpdateStatsEntry(create, accountId, zoneId, network.getId(), publicIp, externalFirewall.getId(), firewallAnswer, false)) { + throw new ExecutionException(networkErrorMsg + ", static NAT rule public IP = " + publicIp); + } + } + + // Manage one entry for each port forwarding rule in this network + List portForwardingRules = _portForwardingRulesDao.listByNetwork(network.getId()); + for (PortForwardingRuleVO portForwardingRule : portForwardingRules) { + String publicIp = _networkMgr.getIp(portForwardingRule.getSourceIpAddressId()).getAddress().addr(); + if (!createOrUpdateStatsEntry(create, accountId, zoneId, network.getId(), publicIp, externalFirewall.getId(), firewallAnswer, false)) { + throw new ExecutionException(networkErrorMsg + ", port forwarding rule public IP = " + publicIp); + } + } + } else { + // Manage the account-wide entry for the external firewall + if (!createOrUpdateStatsEntry(create, accountId, zoneId, network.getId(), null, externalFirewall.getId(), firewallAnswer, false)) { + throw new ExecutionException(networkErrorMsg); + } + } + } + // If an external load balancer is added, manage one entry for each load balancing rule in this network if (externalLoadBalancer != null && lbAnswer != null) { + boolean inline = _networkMgr.isNetworkInlineMode(network); List loadBalancers = _loadBalancerDao.listByNetworkId(network.getId()); for (LoadBalancerVO loadBalancer : loadBalancers) { String publicIp = _networkMgr.getIp(loadBalancer.getSourceIpAddressId()).getAddress().addr(); - if (!createOrUpdateStatsEntry(create, accountId, zoneId, network.getId(), publicIp, externalLoadBalancer.getId(), lbAnswer)) { + if (!createOrUpdateStatsEntry(create, accountId, zoneId, network.getId(), publicIp, externalLoadBalancer.getId(), lbAnswer, inline)) { throw new ExecutionException(networkErrorMsg + ", load balancing rule public IP = " + publicIp); } } @@ -561,4 +665,5 @@ public class ExternalLoadBalancerUsageManagerImpl extends ManagerBase implements } } } -} \ No newline at end of file + +} diff --git a/server/src/com/cloud/network/ExternalNetworkDeviceManagerImpl.java b/server/src/com/cloud/network/ExternalNetworkDeviceManagerImpl.java index e2382f875c7..014db59447d 100755 --- a/server/src/com/cloud/network/ExternalNetworkDeviceManagerImpl.java +++ b/server/src/com/cloud/network/ExternalNetworkDeviceManagerImpl.java @@ -25,7 +25,6 @@ import java.util.concurrent.ScheduledExecutorService; import javax.ejb.Local; import javax.inject.Inject; -import javax.naming.ConfigurationException; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.network.AddNetworkDeviceCmd; @@ -37,14 +36,7 @@ import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; -import com.cloud.api.ApiDBUtils; -import com.cloud.baremetal.ExternalDhcpManager; -import com.cloud.baremetal.PxeServerManager; -import com.cloud.baremetal.PxeServerManager.PxeServerType; -import com.cloud.baremetal.PxeServerProfile; import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.dc.DataCenter; -import com.cloud.dc.Pod; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.VlanDao; import com.cloud.host.Host; @@ -63,8 +55,6 @@ import com.cloud.network.dao.PhysicalNetworkServiceProviderDao; import com.cloud.network.dao.VpnUserDao; import com.cloud.network.rules.dao.PortForwardingRulesDao; import com.cloud.offerings.dao.NetworkOfferingDao; -import com.cloud.server.api.response.NwDeviceDhcpResponse; -import com.cloud.server.api.response.PxePingResponse; import com.cloud.user.AccountManager; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserStatisticsDao; @@ -77,8 +67,6 @@ import com.cloud.vm.dao.NicDao; @Local(value = {ExternalNetworkDeviceManager.class}) public class ExternalNetworkDeviceManagerImpl extends ManagerBase implements ExternalNetworkDeviceManager { - @Inject ExternalDhcpManager _dhcpMgr; - @Inject PxeServerManager _pxeMgr; @Inject AgentManager _agentMgr; @Inject NetworkModel _networkMgr; @Inject HostDao _hostDao; @@ -121,80 +109,12 @@ public class ExternalNetworkDeviceManagerImpl extends ManagerBase implements Ext Collection paramsCollection = paramList.values(); HashMap params = (HashMap) (paramsCollection.toArray())[0]; - if (cmd.getDeviceType().equalsIgnoreCase(NetworkDevice.ExternalDhcp.getName())) { - //Long zoneId = _identityService.getIdentityId("data_center", (String) params.get(ApiConstants.ZONE_ID)); - //Long podId = _identityService.getIdentityId("host_pod_ref", (String)params.get(ApiConstants.POD_ID)); - Long zoneId = Long.valueOf((String) params.get(ApiConstants.ZONE_ID)); - Long podId = Long.valueOf((String)params.get(ApiConstants.POD_ID)); - String type = (String) params.get(ApiConstants.DHCP_SERVER_TYPE); - String url = (String) params.get(ApiConstants.URL); - String username = (String) params.get(ApiConstants.USERNAME); - String password = (String) params.get(ApiConstants.PASSWORD); - - return _dhcpMgr.addDhcpServer(zoneId, podId, type, url, username, password); - } else if (cmd.getDeviceType().equalsIgnoreCase(NetworkDevice.PxeServer.getName())) { - Long zoneId = Long.parseLong((String) params.get(ApiConstants.ZONE_ID)); - Long podId = Long.parseLong((String)params.get(ApiConstants.POD_ID)); - //Long zoneId = _identityService.getIdentityId("data_center", (String) params.get(ApiConstants.ZONE_ID)); - //Long podId = _identityService.getIdentityId("host_pod_ref", (String)params.get(ApiConstants.POD_ID)); - String type = (String) params.get(ApiConstants.PXE_SERVER_TYPE); - String url = (String) params.get(ApiConstants.URL); - String username = (String) params.get(ApiConstants.USERNAME); - String password = (String) params.get(ApiConstants.PASSWORD); - String pingStorageServerIp = (String) params.get(ApiConstants.PING_STORAGE_SERVER_IP); - String pingDir = (String) params.get(ApiConstants.PING_DIR); - String tftpDir = (String) params.get(ApiConstants.TFTP_DIR); - String pingCifsUsername = (String) params.get(ApiConstants.PING_CIFS_USERNAME); - String pingCifsPassword = (String) params.get(ApiConstants.PING_CIFS_PASSWORD); - PxeServerProfile profile = new PxeServerProfile(zoneId, podId, url, username, password, type, pingStorageServerIp, pingDir, tftpDir, - pingCifsUsername, pingCifsPassword); - return _pxeMgr.addPxeServer(profile); - } else { - throw new CloudRuntimeException("Unsupported network device type:" + cmd.getDeviceType()); - } + return null; } @Override public NetworkDeviceResponse getApiResponse(Host device) { - NetworkDeviceResponse response; - HostVO host = (HostVO)device; - _hostDao.loadDetails(host); - if (host.getType() == Host.Type.ExternalDhcp) { - NwDeviceDhcpResponse r = new NwDeviceDhcpResponse(); - r.setZoneId(host.getDataCenterId()); - r.setPodId(host.getPodId()); - r.setUrl(host.getPrivateIpAddress()); - r.setType(host.getDetail("type")); - response = r; - } else if (host.getType() == Host.Type.PxeServer) { - String pxeType = host.getDetail("type"); - if (pxeType.equalsIgnoreCase(PxeServerType.PING.getName())) { - PxePingResponse r = new PxePingResponse(); - DataCenter zone = ApiDBUtils.findZoneById(host.getDataCenterId()); - if (zone != null) { - r.setZoneId(zone.getUuid()); - } - if (host.getPodId() != null) { - Pod pod = ApiDBUtils.findPodById(host.getPodId()); - if (pod != null) { - r.setPodId(pod.getUuid()); - } - } - r.setUrl(host.getPrivateIpAddress()); - r.setType(pxeType); - r.setStorageServerIp(host.getDetail("storageServer")); - r.setPingDir(host.getDetail("pingDir")); - r.setTftpDir(host.getDetail("tftpDir")); - response = r; - } else { - throw new CloudRuntimeException("Unsupported PXE server type:" + pxeType); - } - } else { - throw new CloudRuntimeException("Unsupported network device type:" + host.getType()); - } - - response.setId(device.getUuid()); - return response; + return null; } private List listNetworkDevice(Long zoneId, Long physicalNetworkId, Long podId, Host.Type type) { diff --git a/server/src/com/cloud/network/Ipv6AddressManagerImpl.java b/server/src/com/cloud/network/Ipv6AddressManagerImpl.java index ecef5a225e9..a401f9ae396 100644 --- a/server/src/com/cloud/network/Ipv6AddressManagerImpl.java +++ b/server/src/com/cloud/network/Ipv6AddressManagerImpl.java @@ -80,7 +80,7 @@ public class Ipv6AddressManagerImpl extends ManagerBase implements Ipv6AddressMa } List vlans = _vlanDao.listVlansByNetworkId(networkId); if (vlans == null) { - s_logger.debug("Cannot find related vlan or too many vlan attached to network " + networkId); + s_logger.debug("Cannot find related vlan attached to network " + networkId); return null; } String ip = null; @@ -109,7 +109,7 @@ public class Ipv6AddressManagerImpl extends ManagerBase implements Ipv6AddressMa } } if (ip == null) { - throw new InsufficientAddressCapacityException("Cannot find a usable IP in the network " + network.getName() + " after network.ipv6.search.retry.max = " + _ipv6RetryMax + " times retry!", + throw new InsufficientAddressCapacityException("Cannot find a usable IP in the network " + network.getName() + " after " + _ipv6RetryMax + "(network.ipv6.search.retry.max) times retry!", DataCenter.class, network.getDataCenterId()); } } else { diff --git a/server/src/com/cloud/network/NetworkManager.java b/server/src/com/cloud/network/NetworkManager.java index 2904183911e..48e017edabd 100755 --- a/server/src/com/cloud/network/NetworkManager.java +++ b/server/src/com/cloud/network/NetworkManager.java @@ -21,6 +21,8 @@ import java.util.Map; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import com.cloud.dc.DataCenter; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.Pod; import com.cloud.dc.Vlan.VlanType; import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeployDestination; @@ -49,6 +51,7 @@ import com.cloud.user.User; import com.cloud.utils.Pair; import com.cloud.vm.Nic; import com.cloud.vm.NicProfile; +import com.cloud.vm.NicSecondaryIp; import com.cloud.vm.ReservationContext; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; @@ -331,4 +334,16 @@ public interface NetworkManager { int getRuleCountForIp(Long addressId, FirewallRule.Purpose purpose, FirewallRule.State state); LoadBalancingServiceProvider getLoadBalancingProviderForNetwork(Network network); + + + boolean isSecondaryIpSetForNic(long nicId); + + public String allocateGuestIP(Account ipOwner, boolean isSystem, long zoneId, Long networkId, String requestedIp) + throws InsufficientAddressCapacityException; + + + List listVmNics(Long vmId, Long nicId); + String allocatePublicIpForGuestNic(Long networkId, DataCenter dc, Pod pod, Account caller, String requestedIp) throws InsufficientAddressCapacityException; + boolean removeVmSecondaryIpsOfNic(long nicId); + } diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java index f5868658751..3220c9174eb 100755 --- a/server/src/com/cloud/network/NetworkManagerImpl.java +++ b/server/src/com/cloud/network/NetworkManagerImpl.java @@ -129,6 +129,7 @@ import com.cloud.utils.Journal; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.*; @@ -140,8 +141,19 @@ import com.cloud.utils.fsm.StateMachine2; import com.cloud.utils.net.Ip; import com.cloud.utils.net.NetUtils; import com.cloud.vm.*; +import com.cloud.vm.Nic; +import com.cloud.vm.NicProfile; +import com.cloud.vm.NicSecondaryIp; +import com.cloud.vm.NicVO; +import com.cloud.vm.ReservationContext; +import com.cloud.vm.ReservationContextImpl; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Type; import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.NicSecondaryIpDao; +import com.cloud.vm.dao.NicSecondaryIpVO; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; @@ -240,6 +252,8 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L UsageEventDao _usageEventDao; @Inject NetworkModel _networkModel; + @Inject + NicSecondaryIpDao _nicSecondaryIpDao; @Inject UserIpv6AddressDao _ipv6Dao; @Inject @@ -355,9 +369,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L _firewallMgr.addSystemFirewallRules(addr, owner); } - long macAddress = NetUtils.createSequenceBasedMacAddress(addr.getMacAddress()); - - return new PublicIp(addr, _vlanDao.findById(addr.getVlanId()), macAddress); + return PublicIp.createFromAddrAndVlan(addr, _vlanDao.findById(addr.getVlanId())); } @DB @@ -404,8 +416,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L PublicIp ipToReturn = null; if (sourceNatIp != null) { - ipToReturn = new PublicIp(sourceNatIp, _vlanDao.findById(sourceNatIp.getVlanId()), - NetUtils.createSequenceBasedMacAddress(sourceNatIp.getMacAddress())); + ipToReturn = PublicIp.createFromAddrAndVlan(sourceNatIp, _vlanDao.findById(sourceNatIp.getVlanId())); } else { ipToReturn = assignDedicateIpAddress(owner, guestNetwork.getId(), null, dcId, true); } @@ -490,8 +501,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L List publicIps = new ArrayList(); if (userIps != null && !userIps.isEmpty()) { for (IPAddressVO userIp : userIps) { - PublicIp publicIp = new PublicIp(userIp, _vlanDao.findById(userIp.getVlanId()), - NetUtils.createSequenceBasedMacAddress(userIp.getMacAddress())); + PublicIp publicIp = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); publicIps.add(publicIp); } } @@ -537,10 +547,10 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L } IpDeployer deployer = null; NetworkElement element = _networkModel.getElementImplementingProvider(provider.getName()); - if (!(element instanceof IpDeployingRequester)) { + if (!(ComponentContext.getTargetObject(element) instanceof IpDeployingRequester)) { throw new CloudRuntimeException("Element " + element + " is not a IpDeployingRequester!"); } - deployer = ((IpDeployingRequester)element).getIpDeployer(network); + deployer = ((IpDeployingRequester)ComponentContext.getTargetObject(element)).getIpDeployer(network); if (deployer == null) { throw new CloudRuntimeException("Fail to get ip deployer for element: " + element); } @@ -1517,16 +1527,16 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L DeployDestination dest, ReservationContext context) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { element.prepare(network, profile, vmProfile, dest, context); - if (vmProfile.getType() == Type.User && vmProfile.getHypervisorType() != HypervisorType.BareMetal && element.getProvider() != null) { + if (vmProfile.getType() == Type.User && element.getProvider() != null) { if (_networkModel.areServicesSupportedInNetwork(network.getId(), Service.Dhcp) && _networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.Dhcp, element.getProvider()) && - (element instanceof DhcpServiceProvider)) { + (ComponentContext.getTargetObject(element) instanceof DhcpServiceProvider)) { DhcpServiceProvider sp = (DhcpServiceProvider) element; sp.addDhcpEntry(network, profile, vmProfile, dest, context); } if (_networkModel.areServicesSupportedInNetwork(network.getId(), Service.UserData) && _networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.UserData, element.getProvider()) && - (element instanceof UserDataServiceProvider)) { + (ComponentContext.getTargetObject(element) instanceof UserDataServiceProvider)) { UserDataServiceProvider sp = (UserDataServiceProvider) element; sp.addPasswordAndUserdata(network, profile, vmProfile, dest, context); } @@ -1754,6 +1764,10 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L guru.deallocate(network, profile, vm); _nicDao.remove(nic.getId()); s_logger.debug("Removed nic id=" + nic.getId()); + //remove the secondary ip addresses corresponding to to this nic + if (!removeVmSecondaryIpsOfNic(nic.getId())) { + s_logger.debug("Removing nic " + nic.getId() + " secondary ip addreses failed"); + } } @Override @@ -2311,7 +2325,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L List userIps = _ipAddressDao.listByAssociatedNetwork(network.getId(), null); if (userIps != null && !userIps.isEmpty()) { for (IPAddressVO userIp : userIps) { - PublicIp publicIp = new PublicIp(userIp, _vlanDao.findById(userIp.getVlanId()), NetUtils.createSequenceBasedMacAddress(userIp.getMacAddress())); + PublicIp publicIp = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); publicIps.add(publicIp); } } @@ -2796,6 +2810,32 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L Random _rand = new Random(System.currentTimeMillis()); + public List listVmNics(Long vmId, Long nicId) { + List result = null; + if (nicId == null) { + result = _nicDao.listByVmId(vmId); + } else { + result = _nicDao.listByVmIdAndNicId(vmId, nicId); + } + return result; + } + + public String allocateGuestIP(Account ipOwner, boolean isSystem, long zoneId, Long networkId, String requestedIp) + throws InsufficientAddressCapacityException { + String ipaddr = null; + Account caller = UserContext.current().getCaller(); + long callerUserId = UserContext.current().getCallerUserId(); + // check permissions + DataCenter zone = _configMgr.getZone(zoneId); + Network network = _networksDao.findById(networkId); + + _accountMgr.checkAccess(caller, null, false, network); + + ipaddr = acquireGuestIpAddress(network, requestedIp); + return ipaddr; + } + + @Override @DB public String acquireGuestIpAddress(Network network, String requestedIp) { @@ -2806,7 +2846,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L Set availableIps = _networkModel.getAvailableIps(network, requestedIp); - if (availableIps.isEmpty()) { + if (availableIps == null || availableIps.isEmpty()) { return null; } @@ -2847,7 +2887,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L List publicIps = new ArrayList(); if (userIps != null && !userIps.isEmpty()) { for (IPAddressVO userIp : userIps) { - PublicIp publicIp = new PublicIp(userIp, _vlanDao.findById(userIp.getVlanId()), NetUtils.createSequenceBasedMacAddress(userIp.getMacAddress())); + PublicIp publicIp = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); publicIps.add(publicIp); } } @@ -2877,7 +2917,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L IPAddressVO ip = _ipAddressDao.findByIdIncludingRemoved(staticNat.getSourceIpAddressId()); // ip can't be null, otherwise something wrong happened ip.setAssociatedWithVmId(null); - publicIp = new PublicIp(ip, _vlanDao.findById(ip.getVlanId()), NetUtils.createSequenceBasedMacAddress(ip.getMacAddress())); + publicIp = PublicIp.createFromAddrAndVlan(ip, _vlanDao.findById(ip.getVlanId())); publicIps.add(publicIp); break; } @@ -3036,9 +3076,9 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L throw new InvalidParameterValueException("Source ip address of the rule id=" + firewallStaticNatRule.getId() + " is not static nat enabled"); } - String dstIp = _networkModel.getIpInNetwork(ip.getAssociatedWithVmId(), firewallStaticNatRule.getNetworkId()); + //String dstIp = _networkModel.getIpInNetwork(ip.getAssociatedWithVmId(), firewallStaticNatRule.getNetworkId()); ruleVO.setState(FirewallRule.State.Revoke); - staticNatRules.add(new StaticNatRuleImpl(ruleVO, dstIp)); + staticNatRules.add(new StaticNatRuleImpl(ruleVO, ip.getVmIp())); } try { @@ -3070,7 +3110,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L } try { - if (!_lbMgr.applyRules(network, Purpose.LoadBalancing, lbs)) { + if (!_lbMgr.applyRules(network, Purpose.LoadBalancing, lbRules)) { s_logger.warn("Failed to cleanup lb rules as a part of shutdownNetworkRules"); success = false; } @@ -3154,7 +3194,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L if (userIps != null && !userIps.isEmpty()) { for (IPAddressVO userIp : userIps) { userIp.setState(State.Releasing); - PublicIp publicIp = new PublicIp(userIp, _vlanDao.findById(userIp.getVlanId()), NetUtils.createSequenceBasedMacAddress(userIp.getMacAddress())); + PublicIp publicIp = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); publicIpsToRelease.add(publicIp); } } @@ -3405,6 +3445,8 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L nic.setReservationId(String.valueOf(ip.getVlanTag())); nic.setMacAddress(ip.getMacAddress()); } + nic.setDns1(dc.getDns1()); + nic.setDns2(dc.getDns2()); } if (network.getIp6Gateway() != null) { @@ -3426,10 +3468,10 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L nic.setMacAddress(ip.getMacAddress()); } } + nic.setIp6Dns1(dc.getIp6Dns1()); + nic.setIp6Dns2(dc.getIp6Dns2()); } - nic.setDns1(dc.getDns1()); - nic.setDns2(dc.getDns2()); } @Override @@ -3576,15 +3618,15 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L @Override public StaticNatServiceProvider getStaticNatProviderForNetwork(Network network) { NetworkElement element = getElementForServiceInNetwork(network, Service.StaticNat); - assert element instanceof StaticNatServiceProvider; - return (StaticNatServiceProvider)element; + assert ComponentContext.getTargetObject(element) instanceof StaticNatServiceProvider; + return (StaticNatServiceProvider)ComponentContext.getTargetObject(element); } @Override public LoadBalancingServiceProvider getLoadBalancingProviderForNetwork(Network network) { NetworkElement element = getElementForServiceInNetwork(network, Service.Lb); - assert element instanceof LoadBalancingServiceProvider; - return ( LoadBalancingServiceProvider)element; + assert ComponentContext.getTargetObject(element) instanceof LoadBalancingServiceProvider; + return ( LoadBalancingServiceProvider)ComponentContext.getTargetObject(element); } @Override public boolean isNetworkInlineMode(Network network) { @@ -3600,4 +3642,38 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L } return rules.size(); } -} + + @Override + public boolean isSecondaryIpSetForNic(long nicId) { + NicVO nic = _nicDao.findById(nicId); + return nic.getSecondaryIp(); + } + + @Override + public boolean removeVmSecondaryIpsOfNic(long nicId) { + Transaction txn = Transaction.currentTxn(); + txn.start(); + List ipList = _nicSecondaryIpDao.listByNicId(nicId); + if (ipList != null) { + for (NicSecondaryIpVO ip: ipList) { + _nicSecondaryIpDao.remove(ip.getId()); + } + s_logger.debug("Revoving nic secondary ip entry ..."); + } + txn.commit(); + return true; + } + + @Override + public String allocatePublicIpForGuestNic(Long networkId, DataCenter dc, Pod pod,Account owner, + String requestedIp) throws InsufficientAddressCapacityException { + PublicIp ip = assignPublicIpAddress(dc.getId(), null, owner, VlanType.DirectAttached, networkId, requestedIp, false); + if (ip == null) { + s_logger.debug("There is no free public ip address"); + return null; + } + Ip ipAddr = ip.getAddress(); + return ipAddr.addr(); + } + + } diff --git a/server/src/com/cloud/network/NetworkModelImpl.java b/server/src/com/cloud/network/NetworkModelImpl.java index ca7a900e39c..46790b3ebdc 100644 --- a/server/src/com/cloud/network/NetworkModelImpl.java +++ b/server/src/com/cloud/network/NetworkModelImpl.java @@ -92,6 +92,7 @@ import com.cloud.user.Account; import com.cloud.user.DomainManager; import com.cloud.user.dao.AccountDao; import com.cloud.utils.component.AdapterBase; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; @@ -109,6 +110,7 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Type; import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.NicSecondaryIpDao; import com.cloud.vm.dao.VMInstanceDao; @Component @@ -170,6 +172,8 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { PrivateIpDao _privateIpDao; @Inject UserIpv6AddressDao _ipv6Dao; + @Inject + NicSecondaryIpDao _nicSecondaryIpDao;; private final HashMap _systemNetworks = new HashMap(5); @@ -395,9 +399,9 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { Network network = _networksDao.findById(networkId); NetworkElement oldElement = getElementImplementingProvider(oldProvider.getName()); NetworkElement newElement = getElementImplementingProvider(newProvider.getName()); - if (oldElement instanceof IpDeployingRequester && newElement instanceof IpDeployingRequester) { - IpDeployer oldIpDeployer = ((IpDeployingRequester)oldElement).getIpDeployer(network); - IpDeployer newIpDeployer = ((IpDeployingRequester)newElement).getIpDeployer(network); + if (ComponentContext.getTargetObject(oldElement) instanceof IpDeployingRequester && ComponentContext.getTargetObject(newElement) instanceof IpDeployingRequester) { + IpDeployer oldIpDeployer = ((IpDeployingRequester)ComponentContext.getTargetObject(oldElement)).getIpDeployer(network); + IpDeployer newIpDeployer = ((IpDeployingRequester)ComponentContext.getTargetObject(newElement)).getIpDeployer(network); if (!oldIpDeployer.getProvider().getName().equals(newIpDeployer.getProvider().getName())) { throw new InvalidParameterException("There would be multiple providers for IP " + publicIp.getAddress() + "!"); } @@ -568,6 +572,9 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { @Override public boolean isIP6AddressAvailableInVlan(long vlanId) { VlanVO vlan = _vlanDao.findById(vlanId); + if (vlan.getIp6Range() == null) { + return false; + } long existedCount = _ipv6Dao.countExistedIpsInVlan(vlanId); BigInteger existedInt = BigInteger.valueOf(existedCount); BigInteger rangeInt = NetUtils.countIp6InRange(vlan.getIp6Range()); @@ -706,7 +713,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { return null; } - return new PublicIp(addr, _vlanDao.findById(addr.getVlanId()), NetUtils.createSequenceBasedMacAddress(addr.getMacAddress())); + return PublicIp.createFromAddrAndVlan(addr, _vlanDao.findById(addr.getVlanId())); } @Override @@ -735,7 +742,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { @Override public Nic getNicInNetwork(long vmId, long networkId) { - return _nicDao.findByInstanceIdAndNetworkId(networkId, vmId); + return _nicDao.findByInstanceIdAndNetworkIdIncludingRemoved(networkId, vmId); } @Override @@ -1402,7 +1409,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { return true; } IPAddressVO ipVO = _ipAddressDao.findById(userIp.getId()); - PublicIp publicIp = new PublicIp(ipVO, _vlanDao.findById(userIp.getVlanId()), NetUtils.createSequenceBasedMacAddress(ipVO.getMacAddress())); + PublicIp publicIp = PublicIp.createFromAddrAndVlan(ipVO, _vlanDao.findById(userIp.getVlanId())); if (!canIpUsedForService(publicIp, service, networkId)) { return false; } @@ -1621,6 +1628,8 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { public Set getAvailableIps(Network network, String requestedIp) { String[] cidr = network.getCidr().split("/"); List ips = _nicDao.listIpAddressInNetwork(network.getId()); + List secondaryIps = _nicSecondaryIpDao.listSecondaryIpAddressInNetwork(network.getId()); + ips.addAll(secondaryIps); Set allPossibleIps = NetUtils.getAllIpsFromCidr(cidr[0], Integer.parseInt(cidr[1])); Set usedIps = new TreeSet(); @@ -1762,17 +1771,26 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { @Override public boolean networkIsConfiguredForExternalNetworking(long zoneId, long networkId) { - boolean netscalerInNetwork = isProviderForNetwork(Network.Provider.Netscaler, networkId); - boolean juniperInNetwork = isProviderForNetwork(Network.Provider.JuniperSRX, networkId); - boolean f5InNetwork = isProviderForNetwork(Network.Provider.F5BigIp, networkId); - - if (netscalerInNetwork || juniperInNetwork || f5InNetwork) { - return true; - } else { - return false; + List networkProviders = getNetworkProviders(networkId); + for(Provider provider : networkProviders){ + if(provider.isExternal()){ + return true; + } } + return false; } + private List getNetworkProviders(long networkId) { + List providerNames = _ntwkSrvcDao.getDistinctProviders(networkId); + Map providers = new HashMap(); + for (String providerName : providerNames) { + if(!providers.containsKey(providerName)){ + providers.put(providerName, Network.Provider.getProvider(providerName)); + } + } + + return new ArrayList(providers.values()); + } @Override public boolean configure(String name, Map params) throws ConfigurationException { @@ -1872,8 +1890,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { for (IpAddress addr : addrs) { if (addr.isSourceNat()) { sourceNatIp = _ipAddressDao.findById(addr.getId()); - return new PublicIp(sourceNatIp, _vlanDao.findById(sourceNatIp.getVlanId()), - NetUtils.createSequenceBasedMacAddress(sourceNatIp.getMacAddress())); + return PublicIp.createFromAddrAndVlan(sourceNatIp, _vlanDao.findById(sourceNatIp.getVlanId())); } } @@ -1918,9 +1935,41 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { } int cidrSize = NetUtils.getIp6CidrSize(ip6Cidr); - // Ipv6 cidr limit should be at least /64 - if (cidrSize < 64) { - throw new InvalidParameterValueException("The cidr size of IPv6 network must be no less than 64 bits!"); + // we only support cidr == 64 + if (cidrSize != 64) { + throw new InvalidParameterValueException("The cidr size of IPv6 network must be 64 bits!"); + } + } + + @Override + public void checkRequestedIpAddresses(long networkId, String ip4, String ip6) throws InvalidParameterValueException { + if (ip4 != null) { + if (!NetUtils.isValidIp(ip4)) { + throw new InvalidParameterValueException("Invalid specified IPv4 address " + ip4); + } + //Other checks for ipv4 are done in assignPublicIpAddress() + } + if (ip6 != null) { + if (!NetUtils.isValidIpv6(ip6)) { + throw new InvalidParameterValueException("Invalid specified IPv6 address " + ip6); + } + if (_ipv6Dao.findByNetworkIdAndIp(networkId, ip6) != null) { + throw new InvalidParameterValueException("The requested IP is already taken!"); + } + List vlans = _vlanDao.listVlansByNetworkId(networkId); + if (vlans == null) { + throw new CloudRuntimeException("Cannot find related vlan attached to network " + networkId); + } + Vlan ipVlan = null; + for (Vlan vlan : vlans) { + if (NetUtils.isIp6InRange(ip6, vlan.getIp6Range())) { + ipVlan = vlan; + break; + } + } + if (ipVlan == null) { + throw new InvalidParameterValueException("Requested IPv6 is not in the predefined range!"); + } } } } diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java index 70f0fa818bd..52e81e5c8c8 100755 --- a/server/src/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/com/cloud/network/NetworkServiceImpl.java @@ -37,23 +37,28 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.acl.ControlledEntity.ACLType; +import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.command.admin.usage.ListTrafficTypeImplementorsCmd; import org.apache.cloudstack.api.command.user.network.CreateNetworkCmd; import org.apache.cloudstack.api.command.user.network.ListNetworksCmd; import org.apache.cloudstack.api.command.user.network.RestartNetworkCmd; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import org.apache.cloudstack.api.command.user.vm.ListNicsCmd; +import org.bouncycastle.util.IPAddress; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.DataCenter; +import com.cloud.dc.Pod; import com.cloud.dc.DataCenter.NetworkType; import com.cloud.dc.DataCenterVO; import com.cloud.dc.Vlan.VlanType; import com.cloud.dc.VlanVO; import com.cloud.dc.dao.AccountVlanMapDao; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; import com.cloud.dc.dao.VlanDao; import com.cloud.deploy.DeployDestination; import com.cloud.domain.Domain; @@ -65,7 +70,10 @@ import com.cloud.event.UsageEventUtils; import com.cloud.event.dao.EventDao; import com.cloud.event.dao.UsageEventDao; import com.cloud.exception.*; +import com.cloud.host.Host; +import com.cloud.host.dao.HostDao; import com.cloud.network.IpAddress.State; +import com.cloud.vm.Nic; import com.cloud.network.Network.Capability; import com.cloud.network.Network.GuestType; import com.cloud.network.Network.Provider; @@ -81,7 +89,9 @@ import com.cloud.network.element.VirtualRouterElement; import com.cloud.network.element.VpcVirtualRouterElement; import com.cloud.network.guru.NetworkGuru; import com.cloud.network.rules.FirewallRule.Purpose; +import com.cloud.network.rules.dao.PortForwardingRulesDao; import com.cloud.network.rules.FirewallRuleVO; +import com.cloud.network.rules.PortForwardingRuleVO; import com.cloud.network.rules.RulesManager; import com.cloud.network.vpc.PrivateIpVO; import com.cloud.network.vpc.VpcManager; @@ -103,6 +113,7 @@ import com.cloud.utils.AnnotationHelper; import com.cloud.utils.Journal; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.*; @@ -111,6 +122,8 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.vm.*; import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.NicSecondaryIpDao; +import com.cloud.vm.dao.NicSecondaryIpVO; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; import java.util.*; @@ -202,6 +215,16 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { @Inject NetworkModel _networkModel; + @Inject + NicSecondaryIpDao _nicSecondaryIpDao; + + @Inject + PortForwardingRulesDao _portForwardingDao; + @Inject + HostDao _hostDao; + @Inject + HostPodDao _hostPodDao; + int _cidrLimit; boolean _allowSubdomainNetworkAccess; @@ -448,6 +471,196 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } + public String allocateSecondaryGuestIP (Account ipOwner, long zoneId, Long nicId, Long networkId, String requestedIp) throws InsufficientAddressCapacityException { + + Long accountId = null; + Long domainId = null; + Long vmId = null; + String ipaddr = null; + + if (networkId == null) { + throw new InvalidParameterValueException("Invalid network id is given"); + } + + Network network = _networksDao.findById(networkId); + if (network == null) { + throw new InvalidParameterValueException("Invalid network id is given"); + } + accountId = network.getAccountId(); + domainId = network.getDomainId(); + + // verify permissions + _accountMgr.checkAccess(ipOwner, null, true, network); + + //check whether the nic belongs to user vm. + NicVO nicVO = _nicDao.findById(nicId); + if (nicVO == null) { + throw new InvalidParameterValueException("There is no nic for the " + nicId); + } + + if (nicVO.getVmType() != VirtualMachine.Type.User) { + throw new InvalidParameterValueException("The nic is not belongs to user vm"); + } + + DataCenter dc = _dcDao.findById(network.getDataCenterId()); + Long id = nicVO.getInstanceId(); + + DataCenter zone = _configMgr.getZone(zoneId); + if (zone == null) { + throw new InvalidParameterValueException("Invalid zone Id is given"); + } + + s_logger.debug("Calling the ip allocation ..."); + if (dc.getNetworkType() == NetworkType.Advanced && network.getGuestType() == Network.GuestType.Isolated) { + try { + ipaddr = _networkMgr.allocateGuestIP(ipOwner, false, zoneId, networkId, requestedIp); + } catch (InsufficientAddressCapacityException e) { + throw new InvalidParameterValueException("Allocating guest ip for nic failed"); + } + } else if (dc.getNetworkType() == NetworkType.Basic) { + Account caller = UserContext.current().getCaller(); + long callerUserId = UserContext.current().getCallerUserId(); + _accountMgr.checkAccess(caller, AccessType.UseNetwork, false, network); + //handle the basic networks here + VirtualMachine vm = _userVmDao.findById(nicVO.getInstanceId()); + if (vm == null) { + throw new InvalidParameterValueException("There is no vm with the nic"); + } + VMInstanceVO vmi = (VMInstanceVO)vm; + Long podId = vmi.getPodIdToDeployIn(); + if (podId == null) { + throw new InvalidParameterValueException("vm pod id is null"); + } + Pod pod = _hostPodDao.findById(podId); + if (pod == null) { + throw new InvalidParameterValueException("vm pod is null"); + } + + try { + ipaddr = _networkMgr.allocatePublicIpForGuestNic(networkId, dc, pod, caller, requestedIp); + if (ipaddr == null) { + throw new InvalidParameterValueException("Allocating ip to guest nic " + nicId + " failed"); + } + } catch (InsufficientAddressCapacityException e) { + s_logger.error("Allocating ip to guest nic " + nicId + " failed"); + return null; + } + } else if (isSharedNetworkOfferingWithServices(network.getNetworkOfferingId()) && zone.getNetworkType() == NetworkType.Advanced) { + // if shared network in the advanced zone, then check the caller against the network for 'AccessType.UseNetwork' + Account caller = UserContext.current().getCaller(); + long callerUserId = UserContext.current().getCallerUserId(); + _accountMgr.checkAccess(caller, AccessType.UseNetwork, false, network); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Associate IP address called by the user " + callerUserId + " account " + ipOwner.getId()); + } + } else { + s_logger.error("AddIpToVMNic is not supported in this network..."); + return null; + } + + if (ipaddr != null) { + // we got the ip addr so up the nics table and secodary ip + Transaction txn = Transaction.currentTxn(); + txn.start(); + + boolean nicSecondaryIpSet = nicVO.getSecondaryIp(); + if (!nicSecondaryIpSet) { + nicVO.setSecondaryIp(true); + // commit when previously set ?? + s_logger.debug("Setting nics table ..."); + _nicDao.update(nicId, nicVO); + } + + s_logger.debug("Setting nic_secondary_ip table ..."); + vmId = nicVO.getInstanceId(); + NicSecondaryIpVO secondaryIpVO = new NicSecondaryIpVO(nicId, ipaddr, vmId, accountId, domainId, networkId); + _nicSecondaryIpDao.persist(secondaryIpVO); + txn.commit(); + } + return ipaddr; + } + + @DB + public boolean releaseSecondaryIpFromNic (long ipAddressId) { + Account caller = UserContext.current().getCaller(); + boolean success = false; + + // Verify input parameters + NicSecondaryIpVO secIpVO= _nicSecondaryIpDao.findById(ipAddressId); + if (secIpVO == null) { + throw new InvalidParameterValueException("Unable to find ip address by id"); + } + + Network network = _networksDao.findById(secIpVO.getNetworkId()); + + // verify permissions + _accountMgr.checkAccess(caller, null, true, network); + + Long nicId = secIpVO.getNicId(); + s_logger.debug("ip id = " + ipAddressId + " nic id = " + nicId); + //check is this the last secondary ip for NIC + List ipList = _nicSecondaryIpDao.listByNicId(nicId); + boolean lastIp = false; + if (ipList.size() == 1) { + // this is the last secondary ip to nic + lastIp = true; + } + + DataCenter dc = _dcDao.findById(network.getDataCenterId()); + if (dc == null) { + throw new InvalidParameterValueException("Invalid zone Id is given"); + } + + s_logger.debug("Calling the ip allocation ..."); + if (dc.getNetworkType() == NetworkType.Advanced && network.getGuestType() == Network.GuestType.Isolated) { + //check PF or static NAT is configured on this ip address + String secondaryIp = secIpVO.getIp4Address(); + List pfRuleList = _portForwardingDao.listByDestIpAddr(secondaryIp); + if (pfRuleList.size() != 0) { + s_logger.debug("VM nic IP " + secondaryIp + " is associated with the port forwarding rule"); + throw new InvalidParameterValueException("Can't remove the secondary ip " + secondaryIp + " is associate with the port forwarding rule"); + } + //check if the secondary ip associated with any static nat rule + IPAddressVO publicIpVO = _ipAddressDao.findByVmIp(secondaryIp); + if (publicIpVO != null) { + s_logger.debug("VM nic IP " + secondaryIp + " is associated with the static NAT rule public IP address id " + publicIpVO.getId()); + throw new InvalidParameterValueException("Can' remove the ip " + secondaryIp + "is associate with static NAT rule public IP address id " + publicIpVO.getId()); + } + } else if (dc.getNetworkType() == NetworkType.Basic) { + IPAddressVO ip = _ipAddressDao.findByIpAndNetworkId(secIpVO.getNetworkId(), secIpVO.getIp4Address()); + if (ip != null) { + Transaction txn = Transaction.currentTxn(); + txn.start(); + _networkMgr.markIpAsUnavailable(ip.getId()); + _ipAddressDao.unassignIpAddress(ip.getId()); + txn.commit(); + } + } else if (isSharedNetworkOfferingWithServices(network.getNetworkOfferingId()) && dc.getNetworkType() == NetworkType.Advanced) { + throw new InvalidParameterValueException("Not supported for this network now"); + } + + success = removeNicSecondaryIP(secIpVO, lastIp); + return success; + } + + boolean removeNicSecondaryIP(NicSecondaryIpVO ipVO, boolean lastIp) { + Transaction txn = Transaction.currentTxn(); + long nicId = ipVO.getNicId(); + NicVO nic = _nicDao.findById(nicId); + + txn.start(); + + if (lastIp) { + nic.setSecondaryIp(false); + s_logger.debug("Setting nics secondary ip to false ..."); + _nicDao.update(nicId, nic); + } + + s_logger.debug("Revoving nic secondary ip entry ..."); + _nicSecondaryIpDao.remove(ipVO.getId()); + txn.commit(); + return true; + } @Override @DB @@ -1491,7 +1704,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { @DB @ActionEvent(eventType = EventTypes.EVENT_NETWORK_UPDATE, eventDescription = "updating network", async = true) public Network updateGuestNetwork(long networkId, String name, String displayText, Account callerAccount, - User callerUser, String domainSuffix, Long networkOfferingId, Boolean changeCidr) { + User callerUser, String domainSuffix, Long networkOfferingId, Boolean changeCidr, String guestVmCidr) { boolean restartNetwork = false; // verify input parameters @@ -1552,8 +1765,6 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { ex.addProxyObject(networkOffering, networkOfferingId, "networkOfferingId"); throw ex; } - - //can't update from vpc to non-vpc network offering boolean forVpcNew = _configMgr.isOfferingForVpc(networkOffering); boolean vorVpcOriginal = _configMgr.isOfferingForVpc(_configMgr.getNetworkOffering(oldNetworkOfferingId)); @@ -1585,6 +1796,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { networkOfferingChanged = true; } } + Map newSvcProviders = new HashMap(); if (networkOfferingChanged) { newSvcProviders = _networkMgr.finalizeServicesAndProvidersForNetwork(_configMgr.getNetworkOffering(networkOfferingId), network.getPhysicalNetworkId()); @@ -1616,6 +1828,81 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { restartNetwork = true; } + //IP reservation checks + // allow reservation only to Isolated Guest networks + DataCenter dc = _dcDao.findById(network.getDataCenterId()); + String networkCidr = network.getNetworkCidr(); + + if (guestVmCidr!= null ) { + if(dc.getNetworkType() == NetworkType.Basic) { + throw new InvalidParameterValueException("Guest VM CIDR can't be specified for zone with " + NetworkType.Basic + " networking"); + } + if (network.getGuestType() != GuestType.Isolated) { + throw new InvalidParameterValueException("Can only allow IP Reservation in networks with guest type " + GuestType.Isolated); + } + if (networkOfferingChanged == true) { + throw new InvalidParameterValueException("Cannot specify this nework offering change and guestVmCidr at same time. Specify only one."); + } + if (!(network.getState() == Network.State.Implemented)) { + throw new InvalidParameterValueException ("The network must be in " + Network.State.Implemented + " state. IP Reservation cannot be applied in " + network.getState() + " state"); + } + if (!NetUtils.isValidCIDR(guestVmCidr)) { + throw new InvalidParameterValueException ("Invalid format of Guest VM CIDR."); + } + if (!NetUtils.validateGuestCidr(guestVmCidr)) { + throw new InvalidParameterValueException ("Invalid format of Guest VM CIDR. Make sure it is RFC1918 compliant. "); + } + + // If networkCidr is null it implies that there was no prior IP reservation, so the network cidr is network.getCidr() + // But in case networkCidr is a non null value (IP reservation already exists), it implies network cidr is networkCidr + if (networkCidr != null && ! NetUtils.isNetworkAWithinNetworkB(guestVmCidr, networkCidr)) { + throw new InvalidParameterValueException ("Invalid value of Guest VM CIDR. For IP Reservation, Guest VM CIDR should be a subset of network CIDR : " + networkCidr); + } else { + if (! NetUtils.isNetworkAWithinNetworkB(guestVmCidr, network.getCidr())) { + throw new InvalidParameterValueException ("Invalid value of Guest VM CIDR. For IP Reservation, Guest VM CIDR should be a subset of network CIDR : " + network.getCidr()); + } + } + + // This check makes sure there are no active IPs existing outside the guestVmCidr in the network + String[] guestVmCidrPair = guestVmCidr.split("\\/"); + Long size = Long.valueOf(guestVmCidrPair[1]); + List nicsPresent = _nicDao.listByNetworkId(networkId); + + String cidrIpRange[] = NetUtils.getIpRangeFromCidr(guestVmCidrPair[0], size); + s_logger.info("The start IP of the specified guest vm cidr is: " + cidrIpRange[0] +" and end IP is: " + cidrIpRange[1]); + long startIp = NetUtils.ip2Long(cidrIpRange[0]); + long endIp = NetUtils.ip2Long(cidrIpRange[1]); + long range = endIp - startIp + 1; + s_logger.info("The specified guest vm cidr has " + range + " IPs"); + + for (NicVO nic : nicsPresent) { + long nicIp = NetUtils.ip2Long(nic.getIp4Address()); + //check if nic IP is outside the guest vm cidr + if (nicIp < startIp || nicIp > endIp) { + if(!(nic.getState() == Nic.State.Deallocating)) { + throw new InvalidParameterValueException("Active IPs like " + nic.getIp4Address() + " exist outside the Guest VM CIDR. Cannot apply reservation "); + } + } + } + + // When reservation is applied for the first time, network_cidr will be null + // Populate it with the actual network cidr + if (network.getNetworkCidr() == null) { + network.setNetworkCidr(network.getCidr()); + } + + // Condition for IP Reservation reset : guestVmCidr and network CIDR are same + if (network.getNetworkCidr().equals(guestVmCidr)) { + s_logger.warn("Guest VM CIDR and Network CIDR both are same, reservation will reset."); + network.setNetworkCidr(null); + } + // Finally update "cidr" with the guestVmCidr + // which becomes the effective address space for CloudStack guest VMs + network.setCidr(guestVmCidr); + _networksDao.update(networkId, network); + s_logger.info("IP Reservation has been applied. The new CIDR for Guests Vms is " + guestVmCidr); + } + ReservationContext context = new ReservationContextImpl(null, null, callerUser, callerAccount); // 1) Shutdown all the elements and cleanup all the rules. Don't allow to shutdown network in intermediate // states - Shutdown and Implementing @@ -1635,6 +1922,15 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { // We need to shutdown the network, since we want to re-implement the network. s_logger.debug("Shutting down network id=" + networkId + " as a part of network update"); + //check if network has reservation + if(NetUtils.isNetworkAWithinNetworkB(network.getCidr(), network.getNetworkCidr())) { + s_logger.warn ("Existing IP reservation will become ineffective for the network with id = " + networkId + " You need to reapply reservation after network reimplementation."); + //set cidr to the newtork cidr + network.setCidr(network.getNetworkCidr()); + //set networkCidr to null to bring network back to no IP reservation state + network.setNetworkCidr(null); + } + if (!_networkMgr.shutdownNetwork(network.getId(), context, true)) { s_logger.warn("Failed to shutdown the network as a part of update to network with specified id"); CloudRuntimeException ex = new CloudRuntimeException("Failed to shutdown the network as a part of update of specified network id"); @@ -1655,7 +1951,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { boolean validStateToImplement = (networkState == Network.State.Implemented || networkState == Network.State.Setup || networkState == Network.State.Allocated); if (restartNetwork && !validStateToImplement) { CloudRuntimeException ex = new CloudRuntimeException("Failed to implement the network elements and resources as a part of update to network with specified id; network is in wrong state: " + networkState); - ex.addProxyObject(network, networkId, "networkId"); + ex.addProxyObject(network, networkId, "networkId"); throw ex; } @@ -1681,11 +1977,11 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { UsageEventUtils.saveUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_ASSIGN, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), vm.getHostName(), networkOfferingId, null, isDefault); } txn.commit(); - } else { + } else { network.setNetworkOfferingId(networkOfferingId); _networksDao.update(networkId, network, _networkMgr.finalizeServicesAndProvidersForNetwork(_configMgr.getNetworkOffering(networkOfferingId), network.getPhysicalNetworkId())); } - } else { + } else { _networksDao.update(networkId, network); } @@ -1711,7 +2007,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { // 4) if network has been upgraded from a non persistent ntwk offering to a persistent ntwk offering, // implement the network if its not already - if ( !oldNtwkOff.getIsPersistent() && networkOffering.getIsPersistent()) { + if ( networkOfferingChanged && !oldNtwkOff.getIsPersistent() && networkOffering.getIsPersistent()) { if( network.getState() == Network.State.Allocated) { try { DeployDestination dest = new DeployDestination(_dcDao.findById(network.getDataCenterId()), null, null, null); @@ -1731,7 +2027,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } - + protected Set getAvailableIps(Network network, String requestedIp) { String[] cidr = network.getCidr().split("/"); @@ -1811,7 +2107,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { List publicIps = new ArrayList(); if (userIps != null && !userIps.isEmpty()) { for (IPAddressVO userIp : userIps) { - PublicIp publicIp = new PublicIp(userIp, _vlanDao.findById(userIp.getVlanId()), NetUtils.createSequenceBasedMacAddress(userIp.getMacAddress())); + PublicIp publicIp = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); publicIps.add(publicIp); } } @@ -2738,10 +3034,12 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { PhysicalNetworkServiceProvider nsp = addProviderToPhysicalNetwork(physicalNetworkId, Network.Provider.VirtualRouter.getName(), null, null); // add instance of the provider - VirtualRouterElement element = (VirtualRouterElement) _networkModel.getElementImplementingProvider(Network.Provider.VirtualRouter.getName()); - if (element == null) { + NetworkElement networkElement = _networkModel.getElementImplementingProvider(Network.Provider.VirtualRouter.getName()); + if (networkElement == null) { throw new CloudRuntimeException("Unable to find the Network Element implementing the VirtualRouter Provider"); } + + VirtualRouterElement element = (VirtualRouterElement)networkElement; element.addElement(nsp.getId(), VirtualRouterProviderType.VirtualRouter); return nsp; @@ -2751,11 +3049,13 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { PhysicalNetworkServiceProvider nsp = addProviderToPhysicalNetwork(physicalNetworkId, Network.Provider.VPCVirtualRouter.getName(), null, null); - // add instance of the provider - VpcVirtualRouterElement element = (VpcVirtualRouterElement) _networkModel.getElementImplementingProvider(Network.Provider.VPCVirtualRouter.getName()); - if (element == null) { + + NetworkElement networkElement = _networkModel.getElementImplementingProvider(Network.Provider.VPCVirtualRouter.getName()); + if (networkElement == null) { throw new CloudRuntimeException("Unable to find the Network Element implementing the VPCVirtualRouter Provider"); } + + VpcVirtualRouterElement element = (VpcVirtualRouterElement)networkElement; element.addElement(nsp.getId(), VirtualRouterProviderType.VPCVirtualRouter); return nsp; @@ -2926,4 +3226,21 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { public Network getNetwork(String networkUuid) { return _networksDao.findByUuid(networkUuid); } + + @Override + public List listNics(ListNicsCmd cmd) { + Account caller = UserContext.current().getCaller(); + Long nicId = cmd.getNicId(); + Long vmId = cmd.getVmId(); + + UserVmVO userVm = _userVmDao.findById(vmId); + + if (userVm == null) { + InvalidParameterValueException ex = new InvalidParameterValueException("Virtual mahine id does not exist"); + ex.addProxyObject(userVm, vmId, "vmId"); + throw ex; + } + _accountMgr.checkAccess(caller, null, true, userVm); + return _networkMgr.listVmNics(vmId, nicId); + } } diff --git a/server/src/com/cloud/network/NetworkStateListener.java b/server/src/com/cloud/network/NetworkStateListener.java index bafe6d2d1f9..038e76988bf 100644 --- a/server/src/com/cloud/network/NetworkStateListener.java +++ b/server/src/com/cloud/network/NetworkStateListener.java @@ -23,24 +23,23 @@ import com.cloud.network.Network.Event; import com.cloud.network.Network.State; import com.cloud.network.dao.NetworkDao; import com.cloud.server.ManagementServer; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.fsm.StateListener; import org.apache.cloudstack.framework.events.EventBus; import org.apache.cloudstack.framework.events.EventBusException; import org.apache.log4j.Logger; - -import java.util.Enumeration; -import java.util.HashMap; -import java.util.Map; +import org.springframework.beans.factory.NoSuchBeanDefinitionException; import javax.inject.Inject; +import java.util.HashMap; +import java.util.Map; public class NetworkStateListener implements StateListener { @Inject protected UsageEventDao _usageEventDao; @Inject protected NetworkDao _networkDao; - // get the event bus provider if configured - @Inject protected EventBus _eventBus; + protected static EventBus _eventBus = null; private static final Logger s_logger = Logger.getLogger(NetworkStateListener.class); @@ -63,7 +62,9 @@ public class NetworkStateListener implements StateListener i protected final SearchBuilder SystemRuleSearch; protected final GenericSearchBuilder RulesByIpCount; - @Inject protected FirewallRulesCidrsDaoImpl _firewallRulesCidrsDao; - @Inject ResourceTagsDaoImpl _tagsDao; + @Inject protected FirewallRulesCidrsDao _firewallRulesCidrsDao; + @Inject ResourceTagDao _tagsDao; @Inject IPAddressDao _ipDao; protected FirewallRulesDaoImpl() { diff --git a/server/src/com/cloud/network/dao/IPAddressDao.java b/server/src/com/cloud/network/dao/IPAddressDao.java index 9cdb975d208..3d588fa9307 100755 --- a/server/src/com/cloud/network/dao/IPAddressDao.java +++ b/server/src/com/cloud/network/dao/IPAddressDao.java @@ -62,5 +62,10 @@ public interface IPAddressDao extends GenericDao { long countFreePublicIPs(); - long countFreeIPsInNetwork(long networkId); -} + long countFreeIPsInNetwork(long networkId); + IPAddressVO findByVmIp(String vmIp); + + IPAddressVO findByAssociatedVmIdAndVmIp(long vmId, String vmIp); + + IPAddressVO findByIpAndNetworkId(long networkId, String ipAddress); +} diff --git a/server/src/com/cloud/network/dao/IPAddressDaoImpl.java b/server/src/com/cloud/network/dao/IPAddressDaoImpl.java index e7067d98156..73f310fd628 100755 --- a/server/src/com/cloud/network/dao/IPAddressDaoImpl.java +++ b/server/src/com/cloud/network/dao/IPAddressDaoImpl.java @@ -30,9 +30,11 @@ import org.springframework.stereotype.Component; import com.cloud.dc.Vlan.VlanType; import com.cloud.dc.VlanVO; +import com.cloud.dc.dao.VlanDao; import com.cloud.dc.dao.VlanDaoImpl; import com.cloud.network.IpAddress.State; import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.tags.dao.ResourceTagDao; import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @@ -57,9 +59,9 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen protected GenericSearchBuilder AllocatedIpCount; protected GenericSearchBuilder AllIpCountForDashboard; protected GenericSearchBuilder AllocatedIpCountForAccount; - @Inject protected VlanDaoImpl _vlanDao; + @Inject protected VlanDao _vlanDao; protected GenericSearchBuilder CountFreePublicIps; - @Inject ResourceTagsDaoImpl _tagsDao; + @Inject ResourceTagDao _tagsDao; // make it public for JUnit test public IPAddressDaoImpl() { @@ -80,6 +82,7 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen AllFieldsSearch.and("sourcenetwork", AllFieldsSearch.entity().getSourceNetworkId(), Op.EQ); AllFieldsSearch.and("physicalNetworkId", AllFieldsSearch.entity().getPhysicalNetworkId(), Op.EQ); AllFieldsSearch.and("vpcId", AllFieldsSearch.entity().getVpcId(), Op.EQ); + AllFieldsSearch.and("associatedVmIp", AllFieldsSearch.entity().getVmIp(), Op.EQ); AllFieldsSearch.done(); VlanDbIdSearchUnallocated = createSearchBuilder(); @@ -181,6 +184,14 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen return findOneBy(sc); } + @Override + public IPAddressVO findByIpAndNetworkId(long networkId, String ipAddress) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("network", networkId); + sc.setParameters("ipAddress", ipAddress); + return findOneBy(sc); + } + @Override public IPAddressVO findByIpAndDcId(long dcId, String ipAddress) { SearchCriteria sc = AllFieldsSearch.create(); @@ -232,6 +243,12 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen return findOneBy(sc); } + @Override + public IPAddressVO findByVmIp(String vmIp) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("associatedVmIp", vmIp); + return findOneBy(sc); + } @Override public int countIPs(long dcId, long vlanId, boolean onlyCountAllocated) { SearchCriteria sc = onlyCountAllocated ? AllocatedIpCount.create() : AllIpCount.create(); @@ -347,5 +364,13 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen boolean result = super.remove(id); txn.commit(); return result; - } + } + + @Override + public IPAddressVO findByAssociatedVmIdAndVmIp(long vmId, String vmIp) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("associatedWithVmId", vmId); + sc.setParameters("associatedVmIp", vmIp); + return findOneBy(sc); + } } diff --git a/server/src/com/cloud/network/dao/IPAddressVO.java b/server/src/com/cloud/network/dao/IPAddressVO.java index 00da5eb9a39..8ce8d3382b2 100644 --- a/server/src/com/cloud/network/dao/IPAddressVO.java +++ b/server/src/com/cloud/network/dao/IPAddressVO.java @@ -112,6 +112,10 @@ public class IPAddressVO implements IpAddress { @Column(name="vpc_id") private Long vpcId; + @Column(name="dnat_vmip") + private String vmIp; + + protected IPAddressVO() { this.uuid = UUID.randomUUID().toString(); } @@ -288,8 +292,18 @@ public class IPAddressVO implements IpAddress { return vpcId; } - @Override + @Override public void setVpcId(Long vpcId) { this.vpcId = vpcId; } + + @Override + public String getVmIp() { + return vmIp; + } + + @Override + public void setVmIp(String vmIp) { + this.vmIp = vmIp; + } } diff --git a/server/src/com/cloud/network/dao/LoadBalancerDaoImpl.java b/server/src/com/cloud/network/dao/LoadBalancerDaoImpl.java index 547dc608e73..f211a7f1a79 100644 --- a/server/src/com/cloud/network/dao/LoadBalancerDaoImpl.java +++ b/server/src/com/cloud/network/dao/LoadBalancerDaoImpl.java @@ -51,7 +51,7 @@ public class LoadBalancerDaoImpl extends GenericDaoBase im private final SearchBuilder AccountAndNameSearch; protected final SearchBuilder TransitionStateSearch; - @Inject protected FirewallRulesCidrsDaoImpl _portForwardingRulesCidrsDao; + @Inject protected FirewallRulesCidrsDao _portForwardingRulesCidrsDao; protected LoadBalancerDaoImpl() { ListByIp = createSearchBuilder(); diff --git a/server/src/com/cloud/baremetal/BareMetalVmManager.java b/server/src/com/cloud/network/dao/NetworkAccountDao.java similarity index 84% rename from server/src/com/cloud/baremetal/BareMetalVmManager.java rename to server/src/com/cloud/network/dao/NetworkAccountDao.java index 900c57fdf80..c4435c8074e 100644 --- a/server/src/com/cloud/baremetal/BareMetalVmManager.java +++ b/server/src/com/cloud/network/dao/NetworkAccountDao.java @@ -14,9 +14,9 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package com.cloud.baremetal; +package com.cloud.network.dao; -import com.cloud.vm.UserVmManager; +import com.cloud.utils.db.GenericDao; -public interface BareMetalVmManager extends UserVmManager { +public interface NetworkAccountDao extends GenericDao { } diff --git a/server/src/com/cloud/network/dao/NetworkAccountDaoImpl.java b/server/src/com/cloud/network/dao/NetworkAccountDaoImpl.java index f0b71562f62..09479056f14 100644 --- a/server/src/com/cloud/network/dao/NetworkAccountDaoImpl.java +++ b/server/src/com/cloud/network/dao/NetworkAccountDaoImpl.java @@ -22,7 +22,7 @@ import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.GenericDaoBase; @Component -public class NetworkAccountDaoImpl extends GenericDaoBase implements GenericDao { +public class NetworkAccountDaoImpl extends GenericDaoBase implements NetworkAccountDao { public NetworkAccountDaoImpl() { super(); } diff --git a/server/src/com/cloud/network/dao/NetworkDaoImpl.java b/server/src/com/cloud/network/dao/NetworkDaoImpl.java index 43c581f83b9..1bc8973bc50 100644 --- a/server/src/com/cloud/network/dao/NetworkDaoImpl.java +++ b/server/src/com/cloud/network/dao/NetworkDaoImpl.java @@ -39,13 +39,11 @@ import com.cloud.network.Networks.Mode; import com.cloud.network.Networks.TrafficType; import com.cloud.offering.NetworkOffering; import com.cloud.offerings.NetworkOfferingVO; +import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.offerings.dao.NetworkOfferingDaoImpl; import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.tags.dao.ResourceTagDao; import com.cloud.tags.dao.ResourceTagsDaoImpl; -import com.cloud.utils.db.DB; -import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.GenericSearchBuilder; -import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.*; import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchCriteria.Func; @@ -76,13 +74,13 @@ public class NetworkDaoImpl extends GenericDaoBase implements N - @Inject ResourceTagsDaoImpl _tagsDao; - @Inject NetworkAccountDaoImpl _accountsDao; - @Inject NetworkDomainDaoImpl _domainsDao; - @Inject NetworkOpDaoImpl _opDao; - @Inject NetworkServiceMapDaoImpl _ntwkSvcMap; - @Inject NetworkOfferingDaoImpl _ntwkOffDao; - @Inject NetworkOpDaoImpl _ntwkOpDao; + @Inject ResourceTagDao _tagsDao; + @Inject NetworkAccountDao _accountsDao; + @Inject NetworkDomainDao _domainsDao; + @Inject NetworkOpDao _opDao; + @Inject NetworkServiceMapDao _ntwkSvcMap; + @Inject NetworkOfferingDao _ntwkOffDao; + @Inject NetworkOpDao _ntwkOpDao; TableGenerator _tgMacAddress; diff --git a/server/src/com/cloud/network/dao/NetworkOpDao.java b/server/src/com/cloud/network/dao/NetworkOpDao.java new file mode 100644 index 00000000000..f492dbd254f --- /dev/null +++ b/server/src/com/cloud/network/dao/NetworkOpDao.java @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.dao; + +import com.cloud.utils.db.GenericDao; + +public interface NetworkOpDao extends GenericDao { + public int getActiveNics(long networkId); + public void changeActiveNicsBy(long networkId, int count); + public void setCheckForGc(long networkId); + public void clearCheckForGc(long networkId); +} diff --git a/server/src/com/cloud/network/dao/NetworkOpDaoImpl.java b/server/src/com/cloud/network/dao/NetworkOpDaoImpl.java index bdc9f50398c..a3f54b78850 100644 --- a/server/src/com/cloud/network/dao/NetworkOpDaoImpl.java +++ b/server/src/com/cloud/network/dao/NetworkOpDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.UpdateBuilder; @Component -public class NetworkOpDaoImpl extends GenericDaoBase implements GenericDao { +public class NetworkOpDaoImpl extends GenericDaoBase implements NetworkOpDao { protected final SearchBuilder AllFieldsSearch; protected final GenericSearchBuilder ActiveNicsSearch; protected final Attribute _activeNicsAttribute; diff --git a/server/src/com/cloud/network/dao/NetworkVO.java b/server/src/com/cloud/network/dao/NetworkVO.java index d51a065ff83..77b40c8a5c9 100644 --- a/server/src/com/cloud/network/dao/NetworkVO.java +++ b/server/src/com/cloud/network/dao/NetworkVO.java @@ -81,6 +81,9 @@ public class NetworkVO implements Network { @Column(name="cidr") String cidr; + @Column(name="network_cidr") + String networkCidr; + @Column(name="network_offering_id") long networkOfferingId; @@ -198,6 +201,7 @@ public class NetworkVO implements Network { related, name, displayText, networkDomain, guestType, dcId, physicalNetworkId, aclType, specifyIpRanges, vpcId); this.gateway = that.getGateway(); this.cidr = that.getCidr(); + this.networkCidr = that.getNetworkCidr(); this.broadcastUri = that.getBroadcastUri(); this.broadcastDomainType = that.getBroadcastDomainType(); this.guruName = guruName; @@ -353,7 +357,10 @@ public class NetworkVO implements Network { public void setGateway(String gateway) { this.gateway = gateway; } - + // "cidr" is the Cloudstack managed address space, all CloudStack managed vms get IP address from "cidr" + // In general "cidr" also serves as the network cidr + // But in case IP reservation feature is configured for a Guest network, "network_cidr" is the Effective network cidr for the network, + // "cidr" will still continue to be the effective address space for CloudStack managed vms in that Guest network @Override public String getCidr() { return cidr; @@ -363,6 +370,18 @@ public class NetworkVO implements Network { this.cidr = cidr; } + // "networkcidr" is the network CIDR of the guest network which is configured with IP reservation feature + // It is the summation of "cidr" and the reservedIPrange(the address space used for non cloudstack purposes.) + // For networks not using IP reservation "networkcidr" is always null + @Override + public String getNetworkCidr() { + return networkCidr; + } + + public void setNetworkCidr(String networkCidr) { + this.networkCidr = networkCidr; + } + @Override public URI getBroadcastUri() { return broadcastUri; diff --git a/server/src/com/cloud/network/dao/PhysicalNetworkDaoImpl.java b/server/src/com/cloud/network/dao/PhysicalNetworkDaoImpl.java index 8e67d8bb5e8..1e26a51cead 100644 --- a/server/src/com/cloud/network/dao/PhysicalNetworkDaoImpl.java +++ b/server/src/com/cloud/network/dao/PhysicalNetworkDaoImpl.java @@ -36,7 +36,7 @@ import com.cloud.utils.db.SearchCriteria.Op; public class PhysicalNetworkDaoImpl extends GenericDaoBase implements PhysicalNetworkDao { final SearchBuilder ZoneSearch; - @Inject protected PhysicalNetworkTrafficTypeDaoImpl _trafficTypeDao; + @Inject protected PhysicalNetworkTrafficTypeDao _trafficTypeDao; protected PhysicalNetworkDaoImpl() { super(); diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalCmdbDao.java b/server/src/com/cloud/network/dao/RouterNetworkDao.java old mode 100755 new mode 100644 similarity index 75% rename from plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalCmdbDao.java rename to server/src/com/cloud/network/dao/RouterNetworkDao.java index 0f20c677f5d..bd275bdd5fc --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalCmdbDao.java +++ b/server/src/com/cloud/network/dao/RouterNetworkDao.java @@ -5,21 +5,22 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -// -// Automatically generated by addcopyright.py at 01/29/2013 -package com.cloud.baremetal.database; - -import com.cloud.utils.db.GenericDao; - -public interface BaremetalCmdbDao extends GenericDao { - -} +package com.cloud.network.dao; + +import java.util.List; + +import com.cloud.utils.db.GenericDao; + +public interface RouterNetworkDao extends GenericDao { + public List getRouterNetworks(long routerId); + public RouterNetworkVO findByRouterAndNetwork (long routerId, long networkId); +} diff --git a/server/src/com/cloud/network/dao/RouterNetworkDaoImpl.java b/server/src/com/cloud/network/dao/RouterNetworkDaoImpl.java index e560713aca5..b0b633c55e9 100644 --- a/server/src/com/cloud/network/dao/RouterNetworkDaoImpl.java +++ b/server/src/com/cloud/network/dao/RouterNetworkDaoImpl.java @@ -28,7 +28,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -public class RouterNetworkDaoImpl extends GenericDaoBase implements GenericDao{ +public class RouterNetworkDaoImpl extends GenericDaoBase implements RouterNetworkDao { protected final GenericSearchBuilder RouterNetworksSearch; protected final SearchBuilder AllFieldsSearch; diff --git a/server/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java b/server/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java index 2830abe699f..4745f9aad01 100644 --- a/server/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java +++ b/server/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java @@ -35,8 +35,8 @@ import com.cloud.utils.db.SearchCriteria; public class Site2SiteVpnConnectionDaoImpl extends GenericDaoBase implements Site2SiteVpnConnectionDao { private static final Logger s_logger = Logger.getLogger(Site2SiteVpnConnectionDaoImpl.class); - @Inject protected IPAddressDaoImpl _addrDao; - @Inject protected Site2SiteVpnGatewayDaoImpl _vpnGatewayDao; + @Inject protected IPAddressDao _addrDao; + @Inject protected Site2SiteVpnGatewayDao _vpnGatewayDao; private SearchBuilder AllFieldsSearch; private SearchBuilder VpcSearch; diff --git a/server/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java b/server/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java index 8305978f26c..fa4a9fa3df2 100644 --- a/server/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java +++ b/server/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java @@ -29,7 +29,7 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value={Site2SiteVpnGatewayDao.class}) public class Site2SiteVpnGatewayDaoImpl extends GenericDaoBase implements Site2SiteVpnGatewayDao { - @Inject protected IPAddressDaoImpl _addrDao; + @Inject protected IPAddressDao _addrDao; private static final Logger s_logger = Logger.getLogger(Site2SiteVpnGatewayDaoImpl.class); diff --git a/server/src/com/cloud/network/element/BareMetalElement.java b/server/src/com/cloud/network/element/BareMetalElement.java deleted file mode 100644 index 553fe1d63b2..00000000000 --- a/server/src/com/cloud/network/element/BareMetalElement.java +++ /dev/null @@ -1,128 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.network.element; - -import java.util.Map; -import java.util.Set; - -import javax.ejb.Local; -import javax.inject.Inject; - -import org.apache.log4j.Logger; - -import com.cloud.baremetal.ExternalDhcpManager; -import com.cloud.deploy.DeployDestination; -import com.cloud.exception.ConcurrentOperationException; -import com.cloud.exception.InsufficientCapacityException; -import com.cloud.exception.ResourceUnavailableException; -import com.cloud.host.Host; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.network.Network; -import com.cloud.network.Network.Capability; -import com.cloud.network.Network.Provider; -import com.cloud.network.Network.Service; -import com.cloud.network.PhysicalNetworkServiceProvider; -import com.cloud.offering.NetworkOffering; -import com.cloud.utils.component.AdapterBase; -import com.cloud.utils.db.DB; -import com.cloud.utils.db.Transaction; -import com.cloud.vm.NicProfile; -import com.cloud.vm.NicVO; -import com.cloud.vm.ReservationContext; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachineProfile; -import com.cloud.vm.dao.NicDao; - -@Local(value=NetworkElement.class) -public class BareMetalElement extends AdapterBase implements NetworkElement { - private static final Logger s_logger = Logger.getLogger(BareMetalElement.class); - @Inject NicDao _nicDao; - @Inject ExternalDhcpManager _dhcpMgr; - - @Override - public Map> getCapabilities() { - return null; - } - - @Override - public Provider getProvider() { - return null; - } - - @Override - public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) - throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - return true; - } - - @Override @DB - public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm, DeployDestination dest, - ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - Host host = dest.getHost(); - if (host == null || host.getHypervisorType() != HypervisorType.BareMetal) { - return true; - } - - Transaction txn = Transaction.currentTxn(); - txn.start(); - nic.setMacAddress(host.getPrivateMacAddress()); - NicVO vo = _nicDao.findById(nic.getId()); - assert vo != null : "Where ths nic " + nic.getId() + " going???"; - vo.setMacAddress(nic.getMacAddress()); - _nicDao.update(vo.getId(), vo); - txn.commit(); - s_logger.debug("Bare Metal changes mac address of nic " + nic.getId() + " to " + nic.getMacAddress()); - - return _dhcpMgr.addVirtualMachineIntoNetwork(network, nic, vm, dest, context); - } - - @Override - public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm, ReservationContext context) - throws ConcurrentOperationException, ResourceUnavailableException { - return true; - } - - @Override - public boolean shutdown(Network network, ReservationContext context, boolean cleanup) throws ConcurrentOperationException, ResourceUnavailableException { - return true; - } - - @Override - public boolean destroy(Network network, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - return true; - } - - @Override - public boolean isReady(PhysicalNetworkServiceProvider provider) { - return true; - } - - @Override - public boolean shutdownProviderInstances(PhysicalNetworkServiceProvider provider, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - return true; - } - - @Override - public boolean canEnableIndividualServices() { - return false; - } - - @Override - public boolean verifyServicesCombination(Set services) { - return true; - } -} diff --git a/server/src/com/cloud/network/element/ExternalDhcpElement.java b/server/src/com/cloud/network/element/ExternalDhcpElement.java deleted file mode 100755 index f7c465ddd35..00000000000 --- a/server/src/com/cloud/network/element/ExternalDhcpElement.java +++ /dev/null @@ -1,152 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.network.element; - -import java.util.HashMap; -import java.util.Map; -import java.util.Set; - -import javax.ejb.Local; -import javax.inject.Inject; - -import org.apache.log4j.Logger; - -import com.cloud.baremetal.ExternalDhcpManager; -import com.cloud.dc.DataCenter; -import com.cloud.dc.DataCenter.NetworkType; -import com.cloud.dc.Pod; -import com.cloud.deploy.DeployDestination; -import com.cloud.exception.ConcurrentOperationException; -import com.cloud.exception.InsufficientCapacityException; -import com.cloud.exception.ResourceUnavailableException; -import com.cloud.host.Host; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.network.Network; -import com.cloud.network.Network.Capability; -import com.cloud.network.Network.GuestType; -import com.cloud.network.Network.Provider; -import com.cloud.network.Network.Service; -import com.cloud.network.Networks.TrafficType; -import com.cloud.network.PhysicalNetworkServiceProvider; -import com.cloud.offering.NetworkOffering; -import com.cloud.utils.component.AdapterBase; -import com.cloud.vm.NicProfile; -import com.cloud.vm.ReservationContext; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachineProfile; - -@Local(value = NetworkElement.class) -public class ExternalDhcpElement extends AdapterBase implements NetworkElement, DhcpServiceProvider { - private static final Logger s_logger = Logger.getLogger(ExternalDhcpElement.class); - @Inject - ExternalDhcpManager _dhcpMgr; - private static final Map> capabilities = setCapabilities(); - - private boolean canHandle(DeployDestination dest, TrafficType trafficType, GuestType networkType) { - DataCenter dc = dest.getDataCenter(); - Pod pod = dest.getPod(); - - if ((pod != null && pod.getExternalDhcp()) && dc.getNetworkType() == NetworkType.Basic && trafficType == TrafficType.Guest - && networkType == Network.GuestType.Shared) { - s_logger.debug("External DHCP can handle"); - return true; - } - - return false; - } - - private static Map> setCapabilities() { - // No external dhcp support for Acton release - Map> capabilities = new HashMap>(); - //capabilities.put(Service.Dhcp, null); - return capabilities; - } - - @Override - public Map> getCapabilities() { - return capabilities; - } - - @Override - public Provider getProvider() { - return Provider.ExternalDhcpServer; - } - - @Override - public boolean implement(Network network, NetworkOffering offering, DeployDestination dest, ReservationContext context) - throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - if (!canHandle(dest, offering.getTrafficType(), network.getGuestType())) { - return false; - } - return true; - } - - @Override - public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm, DeployDestination dest, - ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - return true; - } - - @Override - public boolean release(Network network, NicProfile nic, VirtualMachineProfile vm, ReservationContext context) - throws ConcurrentOperationException, ResourceUnavailableException { - return true; - } - - @Override - public boolean shutdown(Network network, ReservationContext context, boolean cleanup) throws ConcurrentOperationException, ResourceUnavailableException { - return true; - } - - @Override - public boolean destroy(Network network, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - return true; - } - - @Override - public boolean isReady(PhysicalNetworkServiceProvider provider) { - // TODO Auto-generated method stub - return true; - } - - @Override - public boolean shutdownProviderInstances(PhysicalNetworkServiceProvider provider, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - // TODO Auto-generated method stub - return true; - } - - @Override - public boolean canEnableIndividualServices() { - return false; - } - - @Override - public boolean addDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) - throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { - Host host = dest.getHost(); - if (host.getHypervisorType() == HypervisorType.BareMetal || !canHandle(dest, network.getTrafficType(), network.getGuestType())) { - // BareMetalElement or DhcpElement handle this - return false; - } - return _dhcpMgr.addVirtualMachineIntoNetwork(network, nic, vm, dest, context); - } - - @Override - public boolean verifyServicesCombination(Set services) { - return true; - } -} diff --git a/server/src/com/cloud/network/element/VpcVirtualRouterElement.java b/server/src/com/cloud/network/element/VpcVirtualRouterElement.java index 27b1a2a7a9a..aa8f10d9c2a 100644 --- a/server/src/com/cloud/network/element/VpcVirtualRouterElement.java +++ b/server/src/com/cloud/network/element/VpcVirtualRouterElement.java @@ -446,7 +446,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc Long vpcId = ip.getVpcId(); Vpc vpc = _vpcMgr.getVpc(vpcId); - if (!_vpcMgr.vpcProviderEnabledInZone(vpc.getZoneId())) { + if (!_vpcMgr.vpcProviderEnabledInZone(vpc.getZoneId(), Provider.VPCVirtualRouter.getName())) { throw new ResourceUnavailableException("VPC provider is not enabled in zone " + vpc.getZoneId(), DataCenter.class, vpc.getZoneId()); } @@ -474,7 +474,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc Long vpcId = ip.getVpcId(); Vpc vpc = _vpcMgr.getVpc(vpcId); - if (!_vpcMgr.vpcProviderEnabledInZone(vpc.getZoneId())) { + if (!_vpcMgr.vpcProviderEnabledInZone(vpc.getZoneId(), Provider.VPCVirtualRouter.getName())) { throw new ResourceUnavailableException("VPC provider is not enabled in zone " + vpc.getZoneId(), DataCenter.class, vpc.getZoneId()); } diff --git a/server/src/com/cloud/network/guru/DirectNetworkGuru.java b/server/src/com/cloud/network/guru/DirectNetworkGuru.java index 7ea988feb48..8707cfd418c 100755 --- a/server/src/com/cloud/network/guru/DirectNetworkGuru.java +++ b/server/src/com/cloud/network/guru/DirectNetworkGuru.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.network.guru; +import java.util.List; + import javax.ejb.Local; import javax.inject.Inject; @@ -54,7 +56,9 @@ import com.cloud.utils.component.AdapterBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; import com.cloud.vm.Nic.ReservationStrategy; +import com.cloud.vm.dao.NicSecondaryIpDao; import com.cloud.vm.NicProfile; +import com.cloud.vm.NicSecondaryIp; import com.cloud.vm.ReservationContext; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @@ -79,7 +83,9 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { UserIpv6AddressDao _ipv6Dao; @Inject Ipv6AddressManager _ipv6Mgr; - + @Inject + NicSecondaryIpDao _nicSecondaryIpDao; + private static final TrafficType[] _trafficTypes = {TrafficType.Guest}; @Override @@ -173,6 +179,8 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { if (profile != null) { profile.setDns1(dc.getDns1()); profile.setDns2(dc.getDns2()); + profile.setIp6Dns1(dc.getIp6Dns1()); + profile.setIp6Dns2(dc.getIp6Dns2()); } } @@ -228,6 +236,16 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { txn.start(); _networkMgr.markIpAsUnavailable(ip.getId()); _ipAddressDao.unassignIpAddress(ip.getId()); + //unassign nic secondary ip address + s_logger.debug("remove nic " + nic.getId() + " secondary ip "); + List nicSecIps = null; + nicSecIps = _nicSecondaryIpDao.getSecondaryIpAddressesForNic(nic.getId()); + for (String secIp: nicSecIps) { + IPAddressVO pubIp = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), secIp); + _networkMgr.markIpAsUnavailable(pubIp.getId()); + _ipAddressDao.unassignIpAddress(pubIp.getId()); + } + txn.commit(); } } diff --git a/server/src/com/cloud/network/guru/GuestNetworkGuru.java b/server/src/com/cloud/network/guru/GuestNetworkGuru.java index ab8a06958da..9c0205a89b2 100755 --- a/server/src/com/cloud/network/guru/GuestNetworkGuru.java +++ b/server/src/com/cloud/network/guru/GuestNetworkGuru.java @@ -27,7 +27,6 @@ import javax.inject.Inject; import com.cloud.event.ActionEventUtils; import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.configuration.Config; import com.cloud.configuration.dao.ConfigurationDao; @@ -77,7 +76,6 @@ import com.cloud.vm.dao.NicDao; import com.cloud.network.Network.Provider; import com.cloud.network.Network.Service; -@Component @Local(value = NetworkGuru.class) public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGuru { private static final Logger s_logger = Logger.getLogger(GuestNetworkGuru.class); @@ -314,7 +312,12 @@ public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGur long dcId = dest.getDataCenter().getId(); //get physical network id - long physicalNetworkId = _networkModel.findPhysicalNetworkId(dcId, offering.getTags(), offering.getTrafficType()); + Long physicalNetworkId = network.getPhysicalNetworkId(); + + // physical network id can be null in Guest Network in Basic zone, so locate the physical network + if (physicalNetworkId == null) { + physicalNetworkId = _networkModel.findPhysicalNetworkId(dcId, offering.getTags(), offering.getTrafficType()); + } NetworkVO implemented = new NetworkVO(network.getTrafficType(), network.getMode(), network.getBroadcastDomainType(), network.getNetworkOfferingId(), State.Allocated, diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 1abca51060e..3cbd51d4cc4 100755 --- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -293,8 +293,6 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V UserVmDao _userVmDao; @Inject VMInstanceDao _vmDao; @Inject - UserStatisticsDao _statsDao = null; - @Inject NetworkOfferingDao _networkOfferingDao = null; @Inject GuestOSDao _guestOSDao = null; @@ -364,7 +362,9 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V private String _usageTimeZone = "GMT"; private final long mgmtSrvrId = MacAddress.getMacAddress().toLong(); private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 5; // 5 seconds - + private static final int USAGE_AGGREGATION_RANGE_MIN = 10; // 10 minutes, same as com.cloud.usage.UsageManagerImpl.USAGE_AGGREGATION_RANGE_MIN + private boolean _dailyOrHourly = false; + ScheduledExecutorService _executor; ScheduledExecutorService _checkExecutor; ScheduledExecutorService _networkStatsUpdateExecutor; @@ -728,6 +728,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V cal.roll(Calendar.DAY_OF_YEAR, true); cal.add(Calendar.MILLISECOND, -1); endDate = cal.getTime().getTime(); + _dailyOrHourly = true; } else if (_usageAggregationRange == HOURLY_TIME) { cal.roll(Calendar.HOUR_OF_DAY, false); cal.set(Calendar.MINUTE, 0); @@ -736,8 +737,15 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V cal.roll(Calendar.HOUR_OF_DAY, true); cal.add(Calendar.MILLISECOND, -1); endDate = cal.getTime().getTime(); + _dailyOrHourly = true; } else { endDate = cal.getTime().getTime(); + _dailyOrHourly = false; + } + + if (_usageAggregationRange < USAGE_AGGREGATION_RANGE_MIN) { + s_logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + USAGE_AGGREGATION_RANGE_MIN); + _usageAggregationRange = USAGE_AGGREGATION_RANGE_MIN; } _networkStatsUpdateExecutor.scheduleAtFixedRate(new NetworkStatsUpdateTask(), (endDate - System.currentTimeMillis()), @@ -854,7 +862,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V final NetworkUsageCommand usageCmd = new NetworkUsageCommand(privateIP, router.getHostName(), forVpc, routerNic.getIp4Address()); String routerType = router.getType().toString(); - UserStatisticsVO previousStats = _statsDao.findBy(router.getAccountId(), + UserStatisticsVO previousStats = _userStatsDao.findBy(router.getAccountId(), router.getDataCenterId(), network.getId(), (forVpc ? routerNic.getIp4Address() : null), router.getId(), routerType); NetworkUsageAnswer answer = null; try { @@ -876,7 +884,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V continue; } txn.start(); - UserStatisticsVO stats = _statsDao.lock(router.getAccountId(), + UserStatisticsVO stats = _userStatsDao.lock(router.getAccountId(), router.getDataCenterId(), network.getId(), (forVpc ? routerNic.getIp4Address() : null), router.getId(), routerType); if (stats == null) { s_logger.warn("unable to find stats for account: " + router.getAccountId()); @@ -912,7 +920,12 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V stats.setNetBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent()); } stats.setCurrentBytesSent(answer.getBytesSent()); - _statsDao.update(stats.getId(), stats); + if (! _dailyOrHourly) { + //update agg bytes + stats.setAggBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent()); + stats.setAggBytesReceived(stats.getNetBytesReceived() + stats.getCurrentBytesReceived()); + } + _userStatsDao.update(stats.getId(), stats); txn.commit(); } catch (Exception e) { txn.rollback(); @@ -954,7 +967,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V try { txn.start(); //get all stats with delta > 0 - List updatedStats = _statsDao.listUpdatedStats(); + List updatedStats = _userStatsDao.listUpdatedStats(); Date updatedTime = new Date(); for(UserStatisticsVO stat : updatedStats){ //update agg bytes @@ -1922,6 +1935,8 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V NicProfile controlNic = null; String defaultDns1 = null; String defaultDns2 = null; + String defaultIp6Dns1 = null; + String defaultIp6Dns2 = null; for (NicProfile nic : profile.getNics()) { int deviceId = nic.getDeviceId(); boolean ipv4 = false, ipv6 = false; @@ -1945,6 +1960,8 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V } defaultDns1 = nic.getDns1(); defaultDns2 = nic.getDns2(); + defaultIp6Dns1 = nic.getIp6Dns1(); + defaultIp6Dns2 = nic.getIp6Dns2(); } if (nic.getTrafficType() == TrafficType.Management) { @@ -2040,6 +2057,12 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V if (defaultDns2 != null) { buf.append(" dns2=").append(defaultDns2); } + if (defaultIp6Dns1 != null) { + buf.append(" ip6dns1=").append(defaultIp6Dns1); + } + if (defaultIp6Dns2 != null) { + buf.append(" ip6dns2=").append(defaultIp6Dns2); + } boolean useExtDns = !dnsProvided; /* For backward compatibility */ @@ -2295,8 +2318,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V if (_networkModel.isProviderSupportServiceInNetwork(guestNetworkId, Service.StaticNat, provider)) { if (ip.isOneToOneNat()) { - String dstIp = _networkModel.getIpInNetwork(ip.getAssociatedWithVmId(), guestNetworkId); - StaticNatImpl staticNat = new StaticNatImpl(ip.getAccountId(), ip.getDomainId(), guestNetworkId, ip.getId(), dstIp, false); + StaticNatImpl staticNat = new StaticNatImpl(ip.getAccountId(), ip.getDomainId(), guestNetworkId, ip.getId(), ip.getVmIp(), false); staticNats.add(staticNat); } } @@ -2391,8 +2413,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V if (addIp) { IPAddressVO ipVO = _ipAddressDao.findById(userIp.getId()); - PublicIp publicIp = new PublicIp(ipVO, _vlanDao.findById(userIp.getVlanId()), - NetUtils.createSequenceBasedMacAddress(ipVO.getMacAddress())); + PublicIp publicIp = PublicIp.createFromAddrAndVlan(ipVO, _vlanDao.findById(userIp.getVlanId())); allPublicIps.add(publicIp); } } @@ -3095,9 +3116,9 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V if (guestOS.getDisplayName().startsWith(name)) { needGateway = true; break; + } } } - } if (!needGateway) { gatewayIp = "0.0.0.0"; } @@ -3105,6 +3126,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V dhcpCommand.setIp6Gateway(nic.getIp6Gateway()); dhcpCommand.setDefaultDns(findDefaultDnsIp(vm.getId())); dhcpCommand.setDuid(NetUtils.getDuidLL(nic.getMacAddress())); + dhcpCommand.setDefault(nic.isDefaultNic()); dhcpCommand.setAccessDetail(NetworkElementCommand.ROUTER_IP, getRouterControlIp(router.getId())); dhcpCommand.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName()); @@ -3588,7 +3610,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V boolean forVpc = router.getVpcId() != null; final NetworkUsageCommand usageCmd = new NetworkUsageCommand(privateIP, router.getHostName(), forVpc, routerNic.getIp4Address()); - UserStatisticsVO previousStats = _statsDao.findBy(router.getAccountId(), + UserStatisticsVO previousStats = _userStatsDao.findBy(router.getAccountId(), router.getDataCenterId(), network.getId(), null, router.getId(), router.getType().toString()); NetworkUsageAnswer answer = null; try { @@ -3610,7 +3632,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V continue; } txn.start(); - UserStatisticsVO stats = _statsDao.lock(router.getAccountId(), + UserStatisticsVO stats = _userStatsDao.lock(router.getAccountId(), router.getDataCenterId(), network.getId(), null, router.getId(), router.getType().toString()); if (stats == null) { s_logger.warn("unable to find stats for account: " + router.getAccountId()); @@ -3646,7 +3668,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V stats.setNetBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent()); } stats.setCurrentBytesSent(answer.getBytesSent()); - _statsDao.update(stats.getId(), stats); + _userStatsDao.update(stats.getId(), stats); txn.commit(); } catch (Exception e) { txn.rollback(); diff --git a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java index d7fe3e05d97..22d823fb364 100644 --- a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java @@ -341,11 +341,11 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian _agentMgr.send(dest.getHost().getId(), cmds); PlugNicAnswer plugNicAnswer = cmds.getAnswer(PlugNicAnswer.class); if (!(plugNicAnswer != null && plugNicAnswer.getResult())) { - s_logger.warn("Unable to plug nic for vm " + vm.getHostName()); + s_logger.warn("Unable to plug nic for vm " + vm.getName()); result = false; } } catch (OperationTimedoutException e) { - throw new AgentUnavailableException("Unable to plug nic for router " + vm.getHostName() + " in network " + network, + throw new AgentUnavailableException("Unable to plug nic for router " + vm.getName() + " in network " + network, dest.getHost().getId(), e); } } else { @@ -367,7 +367,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian if (router.getState() == State.Running) { try { - Commands cmds = new Commands(OnError.Stop); + Commands cmds = new Commands(OnError.Stop); UnPlugNicCommand unplugNicCmd = new UnPlugNicCommand(nic, vm.getName()); cmds.addCommand("unplugnic", unplugNicCmd); _agentMgr.send(dest.getHost().getId(), cmds); @@ -448,15 +448,20 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian SetupGuestNetworkCommand setupCmd = new SetupGuestNetworkCommand(dhcpRange, networkDomain, false, null, defaultDns1, defaultDns2, add, _itMgr.toNicTO(nicProfile, router.getHypervisorType())); - long guestVlanTag = Long.parseLong(network.getBroadcastUri().getHost()); + String brd = NetUtils.long2Ip(NetUtils.ip2Long(guestNic.getIp4Address()) | ~NetUtils.ip2Long(guestNic.getNetmask())); setupCmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, getRouterControlIp(router.getId())); setupCmd.setAccessDetail(NetworkElementCommand.ROUTER_GUEST_IP, getRouterIpInNetwork(network.getId(), router.getId())); - setupCmd.setAccessDetail(NetworkElementCommand.GUEST_VLAN_TAG, String.valueOf(guestVlanTag)); + setupCmd.setAccessDetail(NetworkElementCommand.GUEST_NETWORK_GATEWAY, network.getGateway()); setupCmd.setAccessDetail(NetworkElementCommand.GUEST_BRIDGE, brd); setupCmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName()); + if (network.getBroadcastDomainType() == BroadcastDomainType.Vlan) { + long guestVlanTag = Long.parseLong(network.getBroadcastUri().getHost()); + setupCmd.setAccessDetail(NetworkElementCommand.GUEST_VLAN_TAG, String.valueOf(guestVlanTag)); + } + return setupCmd; } @@ -571,8 +576,8 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian } Commands netUsagecmds = new Commands(OnError.Continue); - VpcVO vpc = _vpcDao.findById(router.getVpcId()); - + VpcVO vpc = _vpcDao.findById(router.getVpcId()); + //2) Plug the nics for (String vlanTag : nicsToPlug.keySet()) { PublicIpAddress ip = nicsToPlug.get(vlanTag); @@ -609,9 +614,9 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian } //Create network usage commands. Send commands to router after IPAssoc NetworkUsageCommand netUsageCmd = new NetworkUsageCommand(router.getPrivateIpAddress(), router.getInstanceName(), true, defaultNic.getIp4Address(), vpc.getCidr()); - netUsagecmds.addCommand(netUsageCmd); - UserStatisticsVO stats = _userStatsDao.findBy(router.getAccountId(), router.getDataCenterId(), - publicNtwk.getId(), publicNic.getIp4Address(), router.getId(), router.getType().toString()); + netUsagecmds.addCommand(netUsageCmd); + UserStatisticsVO stats = _userStatsDao.findBy(router.getAccountId(), router.getDataCenterId(), + publicNtwk.getId(), publicNic.getIp4Address(), router.getId(), router.getType().toString()); if (stats == null) { stats = new UserStatisticsVO(router.getAccountId(), router.getDataCenterId(), publicNic.getIp4Address(), router.getId(), router.getType().toString(), publicNtwk.getId()); @@ -654,8 +659,8 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian } }); if(result && netUsagecmds.size() > 0){ - //After successful ipassoc, send commands to router - sendCommandsToRouter(router, netUsagecmds); + //After successful ipassoc, send commands to router + sendCommandsToRouter(router, netUsagecmds); } return result; } @@ -803,8 +808,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian publicNic.getIp4Address()); if (userIp.isSourceNat()) { - PublicIp publicIp = new PublicIp(userIp, _vlanDao.findById(userIp.getVlanId()), - NetUtils.createSequenceBasedMacAddress(userIp.getMacAddress())); + PublicIp publicIp = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); sourceNat.add(publicIp); if (router.getPublicIpAddress() == null) { @@ -821,7 +825,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian NetworkUsageCommand netUsageCmd = new NetworkUsageCommand(router.getPrivateIpAddress(), router.getInstanceName(), true, publicNic.getIp4Address(), vpc.getCidr()); usageCmds.add(netUsageCmd); UserStatisticsVO stats = _userStatsDao.findBy(router.getAccountId(), router.getDataCenterId(), - publicNtwk.getId(), publicNic.getIp4Address(), router.getId(), router.getType().toString()); + publicNtwk.getId(), publicNic.getIp4Address(), router.getId(), router.getType().toString()); if (stats == null) { stats = new UserStatisticsVO(router.getAccountId(), router.getDataCenterId(), publicNic.getIp4Address(), router.getId(), router.getType().toString(), publicNtwk.getId()); @@ -1069,7 +1073,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian s_logger.warn("Unable to apply StaticRoute, virtual router is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply StaticRoute on the backend," + - " virtual router is not in the right state", DataCenter.class, router.getDataCenterId()); + " virtual router is not in the right state", DataCenter.class, router.getDataCenterId()); } } return result; @@ -1227,8 +1231,7 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian //4) allocate nic for additional public network(s) List ips = _ipAddressDao.listByAssociatedVpc(vpcId, false); for (IPAddressVO ip : ips) { - PublicIp publicIp = new PublicIp(ip, _vlanDao.findById(ip.getVlanId()), - NetUtils.createSequenceBasedMacAddress(ip.getMacAddress())); + PublicIp publicIp = PublicIp.createFromAddrAndVlan(ip, _vlanDao.findById(ip.getVlanId())); if ((ip.getState() == IpAddress.State.Allocated || ip.getState() == IpAddress.State.Allocating) && _vpcMgr.ipUsedInVpc(ip)&& !publicVlans.contains(publicIp.getVlanTag())) { s_logger.debug("Allocating nic for router in vlan " + publicIp.getVlanTag()); diff --git a/server/src/com/cloud/network/rules/RulesManagerImpl.java b/server/src/com/cloud/network/rules/RulesManagerImpl.java index 614d30820b4..29ed5f36d5e 100755 --- a/server/src/com/cloud/network/rules/RulesManagerImpl.java +++ b/server/src/com/cloud/network/rules/RulesManagerImpl.java @@ -78,10 +78,13 @@ import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.Ip; import com.cloud.vm.Nic; +import com.cloud.vm.NicSecondaryIp; import com.cloud.vm.UserVmVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Type; import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.NicSecondaryIpDao; +import com.cloud.vm.dao.NicSecondaryIpVO; import com.cloud.vm.dao.UserVmDao; @Component @@ -123,6 +126,8 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules ResourceTagDao _resourceTagDao; @Inject VpcManager _vpcMgr; + @Inject + NicSecondaryIpDao _nicSecondaryDao; @Override public void checkIpAndUserVm(IpAddress ipAddress, UserVm userVm, Account caller) { @@ -172,7 +177,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules @Override @DB @ActionEvent(eventType = EventTypes.EVENT_NET_RULE_ADD, eventDescription = "creating forwarding rule", create = true) - public PortForwardingRule createPortForwardingRule(PortForwardingRule rule, Long vmId, boolean openFirewall) + public PortForwardingRule createPortForwardingRule(PortForwardingRule rule, Long vmId, Ip vmIp, boolean openFirewall) throws NetworkRuleConflictException { UserContext ctx = UserContext.current(); Account caller = ctx.getCaller(); @@ -192,6 +197,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules Network network = _networkModel.getNetwork(networkId); //associate ip address to network (if needed) boolean performedIpAssoc = false; + Nic guestNic; if (ipAddress.getAssociatedWithNetworkId() == null) { boolean assignToVpcNtwk = network.getVpcId() != null && ipAddress.getVpcId() != null && ipAddress.getVpcId().longValue() == network.getVpcId(); @@ -244,13 +250,26 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules // Verify that vm has nic in the network Ip dstIp = rule.getDestinationIpAddress(); - Nic guestNic = _networkModel.getNicInNetwork(vmId, networkId); + guestNic = _networkModel.getNicInNetwork(vmId, networkId); if (guestNic == null || guestNic.getIp4Address() == null) { throw new InvalidParameterValueException("Vm doesn't belong to network associated with ipAddress"); } else { dstIp = new Ip(guestNic.getIp4Address()); } + if (vmIp != null) { + //vm ip is passed so it can be primary or secondary ip addreess. + if (!dstIp.equals(vmIp)) { + //the vm ip is secondary ip to the nic. + // is vmIp is secondary ip or not + NicSecondaryIp secondaryIp = _nicSecondaryDao.findByIp4AddressAndNicId(vmIp.toString(), guestNic.getId()); + if (secondaryIp == null) { + throw new InvalidParameterValueException("IP Address is not in the VM nic's network "); + } + dstIp = vmIp; + } + } + //if start port and end port are passed in, and they are not equal to each other, perform the validation boolean validatePortRange = false; if (rule.getSourcePortStart().intValue() != rule.getSourcePortEnd().intValue() @@ -350,8 +369,8 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules throw new InvalidParameterValueException("Can't create ip forwarding rules for the network where elasticIP service is enabled"); } - String dstIp = _networkModel.getIpInNetwork(ipAddress.getAssociatedWithVmId(), networkId); - + //String dstIp = _networkModel.getIpInNetwork(ipAddress.getAssociatedWithVmId(), networkId); + String dstIp = ipAddress.getVmIp(); Transaction txn = Transaction.currentTxn(); txn.start(); @@ -397,14 +416,13 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules @Override @ActionEvent(eventType = EventTypes.EVENT_ENABLE_STATIC_NAT, eventDescription = "enabling static nat") - public boolean enableStaticNat(long ipId, long vmId, long networkId, boolean isSystemVm) + public boolean enableStaticNat(long ipId, long vmId, long networkId, boolean isSystemVm, String vmGuestIp) throws NetworkRuleConflictException, ResourceUnavailableException { UserContext ctx = UserContext.current(); Account caller = ctx.getCaller(); UserContext.current().setEventDetails("Ip Id: " + ipId); // Verify input parameters - IPAddressVO ipAddress = _ipAddressDao.findById(ipId); if (ipAddress == null) { throw new InvalidParameterValueException("Unable to find ip address by id " + ipId); @@ -414,6 +432,10 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules boolean performedIpAssoc = false; boolean isOneToOneNat = ipAddress.isOneToOneNat(); Long associatedWithVmId = ipAddress.getAssociatedWithVmId(); + Nic guestNic; + NicSecondaryIpVO nicSecIp = null; + String dstIp = null; + try { Network network = _networkModel.getNetwork(networkId); if (network == null) { @@ -421,11 +443,11 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules } // Check that vm has a nic in the network - Nic guestNic = _networkModel.getNicInNetwork(vmId, networkId); + guestNic = _networkModel.getNicInNetwork(vmId, networkId); if (guestNic == null) { throw new InvalidParameterValueException("Vm doesn't belong to the network with specified id"); } - + dstIp = guestNic.getIp4Address(); if (!_networkModel.areServicesSupportedInNetwork(network.getId(), Service.StaticNat)) { throw new InvalidParameterValueException("Unable to create static nat rule; StaticNat service is not " + @@ -466,13 +488,36 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules // Check permissions checkIpAndUserVm(ipAddress, vm, caller); + //is static nat is for vm secondary ip + //dstIp = guestNic.getIp4Address(); + if (vmGuestIp != null) { + //dstIp = guestNic.getIp4Address(); + + if (!dstIp.equals(vmGuestIp)) { + //check whether the secondary ip set to the vm or not + boolean secondaryIpSet = _networkMgr.isSecondaryIpSetForNic(guestNic.getId()); + if (!secondaryIpSet) { + throw new InvalidParameterValueException("VM ip " + vmGuestIp + " address not belongs to the vm"); + } + //check the ip belongs to the vm or not + nicSecIp = _nicSecondaryDao.findByIp4AddressAndNicId(vmGuestIp, guestNic.getId()); + if (nicSecIp == null) { + throw new InvalidParameterValueException("VM ip " + vmGuestIp + " address not belongs to the vm"); + } + dstIp = nicSecIp.getIp4Address(); + // Set public ip column with the vm ip + } + } + // Verify ip address parameter - isIpReadyForStaticNat(vmId, ipAddress, caller, ctx.getCallerUserId()); + // checking vm id is not sufficient, check for the vm ip + isIpReadyForStaticNat(vmId, ipAddress, dstIp, caller, ctx.getCallerUserId()); } ipAddress.setOneToOneNat(true); ipAddress.setAssociatedWithVmId(vmId); + ipAddress.setVmIp(dstIp); if (_ipAddressDao.update(ipAddress.getId(), ipAddress)) { // enable static nat on the backend s_logger.trace("Enabling static nat for ip address " + ipAddress + " and vm id=" + vmId + " on the backend"); @@ -483,6 +528,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules s_logger.warn("Failed to enable static nat rule for ip address " + ipId + " on the backend"); ipAddress.setOneToOneNat(isOneToOneNat); ipAddress.setAssociatedWithVmId(associatedWithVmId); + ipAddress.setVmIp(null); _ipAddressDao.update(ipAddress.getId(), ipAddress); } } else { @@ -490,16 +536,17 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules } } finally { - if (performedIpAssoc) { - //if the rule is the last one for the ip address assigned to VPC, unassign it from the network - IpAddress ip = _ipAddressDao.findById(ipAddress.getId()); - _vpcMgr.unassignIPFromVpcNetwork(ip.getId(), networkId); + if (performedIpAssoc) { + //if the rule is the last one for the ip address assigned to VPC, unassign it from the network + IpAddress ip = _ipAddressDao.findById(ipAddress.getId()); + _vpcMgr.unassignIPFromVpcNetwork(ip.getId(), networkId); } } return false; } - protected void isIpReadyForStaticNat(long vmId, IPAddressVO ipAddress, Account caller, long callerUserId) + protected void isIpReadyForStaticNat(long vmId, IPAddressVO ipAddress, + String vmIp, Account caller, long callerUserId) throws NetworkRuleConflictException, ResourceUnavailableException { if (ipAddress.isSourceNat()) { throw new InvalidParameterValueException("Can't enable static, ip address " + ipAddress + " is a sourceNat ip address"); @@ -519,7 +566,8 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules throw new NetworkRuleConflictException("Failed to enable static for the ip address " + ipAddress + " and vm id=" + vmId + " as it's already assigned to antoher vm"); } - IPAddressVO oldIP = _ipAddressDao.findByAssociatedVmId(vmId); + //check wether the vm ip is alreday associated with any public ip address + IPAddressVO oldIP = _ipAddressDao.findByAssociatedVmIdAndVmIp(vmId, vmIp); if (oldIP != null) { // If elasticIP functionality is supported in the network, we always have to disable static nat on the old @@ -538,9 +586,9 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules if (!reassignStaticNat) { throw new InvalidParameterValueException("Failed to enable static nat for the ip address id=" + ipAddress.getId() + " as vm id=" + vmId + " is already associated with ip id=" + oldIP.getId()); } - // unassign old static nat rule - s_logger.debug("Disassociating static nat for ip " + oldIP); - if (!disableStaticNat(oldIP.getId(), caller, callerUserId, true)) { + // unassign old static nat rule + s_logger.debug("Disassociating static nat for ip " + oldIP); + if (!disableStaticNat(oldIP.getId(), caller, callerUserId, true)) { throw new CloudRuntimeException("Failed to disable old static nat rule for vm id=" + vmId + " and ip " + oldIP); } } @@ -890,8 +938,8 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules List staticNats = new ArrayList(); for (IPAddressVO ip : ips) { // Get nic IP4 address - String dstIp = _networkModel.getIpInNetwork(ip.getAssociatedWithVmId(), networkId); - StaticNatImpl staticNat = new StaticNatImpl(ip.getAllocatedToAccountId(), ip.getAllocatedInDomainId(), networkId, ip.getId(), dstIp, false); + //String dstIp = _networkModel.getIpInNetwork(ip.getAssociatedWithVmId(), networkId); + StaticNatImpl staticNat = new StaticNatImpl(ip.getAllocatedToAccountId(), ip.getAllocatedInDomainId(), networkId, ip.getId(), ip.getVmIp(), false); staticNats.add(staticNat); } @@ -1209,6 +1257,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules boolean isIpSystem = ipAddress.getSystem(); ipAddress.setOneToOneNat(false); ipAddress.setAssociatedWithVmId(null); + ipAddress.setVmIp(null); if (isIpSystem && !releaseIpIfElastic) { ipAddress.setSystem(false); } @@ -1248,11 +1297,11 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules throw ex; } - String dstIp; - if (forRevoke) { - dstIp = _networkModel.getIpInNetworkIncludingRemoved(ip.getAssociatedWithVmId(), rule.getNetworkId()); - } else { - dstIp = _networkModel.getIpInNetwork(ip.getAssociatedWithVmId(), rule.getNetworkId()); + String dstIp = ip.getVmIp(); + if (dstIp == null) { + InvalidParameterValueException ex = new InvalidParameterValueException("VM ip address of the specified public ip is not set "); + ex.addProxyObject(ruleVO, rule.getId(), "ruleId"); + throw ex; } return new StaticNatRuleImpl(ruleVO, dstIp); @@ -1333,12 +1382,16 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules // create new static nat rule // Get nic IP4 address + Nic guestNic = _networkModel.getNicInNetwork(vm.getId(), networkId); + if (guestNic == null) { + throw new InvalidParameterValueException("Vm doesn't belong to the network with specified id"); + } String dstIp; - if (forRevoke) { - dstIp = _networkModel.getIpInNetworkIncludingRemoved(sourceIp.getAssociatedWithVmId(), networkId); - } else { - dstIp = _networkModel.getIpInNetwork(sourceIp.getAssociatedWithVmId(), networkId); + + dstIp = sourceIp.getVmIp(); + if (dstIp == null) { + throw new InvalidParameterValueException("Vm ip is not set as dnat ip for this public ip"); } StaticNatImpl staticNat = new StaticNatImpl(sourceIp.getAllocatedToAccountId(), sourceIp.getAllocatedInDomainId(), @@ -1373,7 +1426,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules boolean isSystemVM = (vm.getType() == Type.ConsoleProxy || vm.getType() == Type.SecondaryStorageVm); try { - success = enableStaticNat(ip.getId(), vm.getId(), guestNetwork.getId(), isSystemVM); + success = enableStaticNat(ip.getId(), vm.getId(), guestNetwork.getId(), isSystemVM, null); } catch (NetworkRuleConflictException ex) { s_logger.warn("Failed to enable static nat as a part of enabling elasticIp and staticNat for vm " + vm + " in guest network " + guestNetwork + " due to exception ", ex); diff --git a/server/src/com/cloud/network/rules/dao/PortForwardingRulesDao.java b/server/src/com/cloud/network/rules/dao/PortForwardingRulesDao.java index 91f08e7a8ff..682a941856c 100644 --- a/server/src/com/cloud/network/rules/dao/PortForwardingRulesDao.java +++ b/server/src/com/cloud/network/rules/dao/PortForwardingRulesDao.java @@ -41,5 +41,7 @@ public interface PortForwardingRulesDao extends GenericDao listByNetwork(long networkId); List listByAccount(long accountId); + + List listByDestIpAddr(String ip4Address); } diff --git a/server/src/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java b/server/src/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java index 5406ab624e0..cc780cbd446 100644 --- a/server/src/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java +++ b/server/src/com/cloud/network/rules/dao/PortForwardingRulesDaoImpl.java @@ -23,6 +23,7 @@ import javax.inject.Inject; import org.springframework.stereotype.Component; +import com.cloud.network.dao.FirewallRulesCidrsDao; import com.cloud.network.dao.FirewallRulesCidrsDaoImpl; import com.cloud.network.rules.FirewallRule.Purpose; import com.cloud.network.rules.FirewallRule.State; @@ -32,6 +33,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.vm.dao.NicSecondaryIpVO; @Component @Local(value=PortForwardingRulesDao.class) @@ -43,7 +45,7 @@ public class PortForwardingRulesDaoImpl extends GenericDaoBase AllRulesSearchByVM; protected final SearchBuilder ActiveRulesSearchByAccount; - @Inject protected FirewallRulesCidrsDaoImpl _portForwardingRulesCidrsDao; + @Inject protected FirewallRulesCidrsDao _portForwardingRulesCidrsDao; protected PortForwardingRulesDaoImpl() { super(); @@ -55,6 +57,7 @@ public class PortForwardingRulesDaoImpl extends GenericDaoBase listByDestIpAddr(String ip4Address) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("dstIp", ip4Address); + return listBy(sc); + } } diff --git a/server/src/com/cloud/network/security/SecurityGroupManagerImpl.java b/server/src/com/cloud/network/security/SecurityGroupManagerImpl.java index eafe88e36a4..1c189c44688 100755 --- a/server/src/com/cloud/network/security/SecurityGroupManagerImpl.java +++ b/server/src/com/cloud/network/security/SecurityGroupManagerImpl.java @@ -46,8 +46,10 @@ import org.apache.cloudstack.api.command.user.securitygroup.RevokeSecurityGroupI import org.apache.commons.codec.digest.DigestUtils; import org.apache.log4j.Logger; +import com.amazonaws.services.identitymanagement.model.User; import com.cloud.agent.AgentManager; import com.cloud.agent.api.NetworkRulesSystemVmCommand; +import com.cloud.agent.api.NetworkRulesVmSecondaryIpCommand; import com.cloud.agent.api.SecurityGroupRulesCmd; import com.cloud.agent.api.SecurityGroupRulesCmd.IpPortAndProto; import com.cloud.agent.manager.Commands; @@ -67,12 +69,6 @@ import com.cloud.network.NetworkManager; import com.cloud.network.NetworkModel; import com.cloud.network.security.SecurityGroupWork.Step; import com.cloud.network.security.SecurityRule.SecurityRuleType; -import com.cloud.network.security.dao.SecurityGroupDao; -import com.cloud.network.security.dao.SecurityGroupRuleDao; -import com.cloud.network.security.dao.SecurityGroupRulesDao; -import com.cloud.network.security.dao.SecurityGroupVMMapDao; -import com.cloud.network.security.dao.SecurityGroupWorkDao; -import com.cloud.network.security.dao.VmRulesetLogDao; import com.cloud.network.security.dao.*; import com.cloud.projects.ProjectManager; import com.cloud.tags.dao.ResourceTagDao; @@ -97,6 +93,8 @@ import com.cloud.utils.net.NetUtils; import com.cloud.vm.*; import com.cloud.vm.VirtualMachine.Event; import com.cloud.vm.VirtualMachine.State; +import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.NicSecondaryIpDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; import edu.emory.mathcs.backport.java.util.Collections; @@ -149,6 +147,10 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro ProjectManager _projectMgr; @Inject ResourceTagDao _resourceTagDao; + @Inject + NicDao _nicDao; + @Inject + NicSecondaryIpDao _nicSecIpDao; ScheduledExecutorService _executorPool; ScheduledExecutorService _cleanupExecutor; @@ -489,7 +491,7 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro return affectedVms; } - protected SecurityGroupRulesCmd generateRulesetCmd(String vmName, String guestIp, String guestMac, Long vmId, String signature, long seqnum, Map> ingressRules, Map> egressRules) { + protected SecurityGroupRulesCmd generateRulesetCmd(String vmName, String guestIp, String guestMac, Long vmId, String signature, long seqnum, Map> ingressRules, Map> egressRules, List secIps) { List ingressResult = new ArrayList(); List egressResult = new ArrayList(); for (PortAndProto pAp : ingressRules.keySet()) { @@ -506,7 +508,7 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro egressResult.add(ipPortAndProto); } } - return new SecurityGroupRulesCmd(guestIp, guestMac, vmName, vmId, signature, seqnum, ingressResult.toArray(new IpPortAndProto[ingressResult.size()]), egressResult.toArray(new IpPortAndProto[egressResult.size()])); + return new SecurityGroupRulesCmd(guestIp, guestMac, vmName, vmId, signature, seqnum, ingressResult.toArray(new IpPortAndProto[ingressResult.size()]), egressResult.toArray(new IpPortAndProto[egressResult.size()]), secIps); } protected void handleVmStopped(VMInstanceVO vm) { @@ -947,8 +949,19 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro Map> egressRules = generateRulesForVM(userVmId, SecurityRuleType.EgressRule); agentId = vm.getHostId(); if (agentId != null) { + // get nic secondary ip address + String privateIp = vm.getPrivateIpAddress(); + NicVO nic = _nicDao.findByIp4AddressAndVmId(privateIp, vm.getId()); + List nicSecIps = null; + if (nic != null) { + if (nic.getSecondaryIp()) { + //get secondary ips of the vm + long networkId = nic.getNetworkId(); + nicSecIps = _nicSecIpDao.getSecondaryIpAddressesForNic(nic.getId()); + } + } SecurityGroupRulesCmd cmd = generateRulesetCmd( vm.getInstanceName(), vm.getPrivateIpAddress(), vm.getPrivateMacAddress(), vm.getId(), generateRulesetSignature(ingressRules, egressRules), seqnum, - ingressRules, egressRules); + ingressRules, egressRules, nicSecIps); Commands cmds = new Commands(cmd); try { _agentMgr.send(agentId, cmds, _answerListener); @@ -1272,4 +1285,66 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro return true; } } + + @Override + public boolean securityGroupRulesForVmSecIp(Long nicId, Long networkId, + String secondaryIp, boolean ruleAction) { + + String vmMac = null; + String vmName = null; + + if (secondaryIp == null || nicId == null || networkId == null) { + throw new InvalidParameterValueException("Vm nicId or networkId or secondaryIp can't be null"); + } + + NicVO nic = _nicDao.findById(nicId); + Long vmId = nic.getInstanceId(); + + // Validate parameters + List vmSgGrps = getSecurityGroupsForVm(vmId); + if (vmSgGrps == null) { + s_logger.debug("Vm is not in any Security group "); + return true; + } + + Account caller = UserContext.current().getCaller(); + + for (SecurityGroupVO securityGroup: vmSgGrps) { + Account owner = _accountMgr.getAccount(securityGroup.getAccountId()); + if (owner == null) { + throw new InvalidParameterValueException("Unable to find security group owner by id=" + securityGroup.getAccountId()); + } + // Verify permissions + _accountMgr.checkAccess(caller, null, true, securityGroup); + } + + UserVm vm = _userVMDao.findById(vmId); + if (vm.getType() != VirtualMachine.Type.User) { + throw new InvalidParameterValueException("Can't configure the SG ipset, arprules rules for the non user vm"); + } + + if (vm != null) { + vmMac = vm.getPrivateMacAddress(); + vmName = vm.getInstanceName(); + if (vmMac == null || vmName == null) { + throw new InvalidParameterValueException("vm name or vm mac can't be null"); + } + } + + //create command for the to add ip in ipset and arptables rules + NetworkRulesVmSecondaryIpCommand cmd = new NetworkRulesVmSecondaryIpCommand(vmName, vmMac, secondaryIp, ruleAction); + s_logger.debug("Asking agent to configure rules for vm secondary ip"); + Commands cmds = null; + + cmds = new Commands(cmd); + try { + _agentMgr.send(vm.getHostId(), cmds); + } catch (AgentUnavailableException e) { + s_logger.debug(e.toString()); + } catch (OperationTimedoutException e) { + s_logger.debug(e.toString()); + } + + return true; + } } diff --git a/server/src/com/cloud/network/security/SecurityGroupManagerImpl2.java b/server/src/com/cloud/network/security/SecurityGroupManagerImpl2.java index a3a0fc300f9..a42881ec905 100644 --- a/server/src/com/cloud/network/security/SecurityGroupManagerImpl2.java +++ b/server/src/com/cloud/network/security/SecurityGroupManagerImpl2.java @@ -40,6 +40,7 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.Profiler; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.mgmt.JmxUtil; +import com.cloud.vm.NicVO; import com.cloud.vm.VirtualMachine.State; import com.cloud.network.security.SecurityRule.SecurityRuleType; @@ -169,9 +170,19 @@ public class SecurityGroupManagerImpl2 extends SecurityGroupManagerImpl{ Map> egressRules = generateRulesForVM(userVmId, SecurityRuleType.EgressRule); Long agentId = vm.getHostId(); if (agentId != null) { + String privateIp = vm.getPrivateIpAddress(); + NicVO nic = _nicDao.findByIp4AddressAndVmId(privateIp, vm.getId()); + List nicSecIps = null; + if (nic != null) { + if (nic.getSecondaryIp()) { + //get secondary ips of the vm + long networkId = nic.getNetworkId(); + nicSecIps = _nicSecIpDao.getSecondaryIpAddressesForNic(nic.getId()); + } + } SecurityGroupRulesCmd cmd = generateRulesetCmd(vm.getInstanceName(), vm.getPrivateIpAddress(), vm.getPrivateMacAddress(), vm.getId(), null, - work.getLogsequenceNumber(), ingressRules, egressRules); + work.getLogsequenceNumber(), ingressRules, egressRules, nicSecIps); cmd.setMsId(_serverId); if (s_logger.isDebugEnabled()) { s_logger.debug("SecurityGroupManager v2: sending ruleset update for vm " + vm.getInstanceName() + diff --git a/server/src/com/cloud/network/security/dao/SecurityGroupDaoImpl.java b/server/src/com/cloud/network/security/dao/SecurityGroupDaoImpl.java index 68112c0a7c1..dfa6a2e53ad 100644 --- a/server/src/com/cloud/network/security/dao/SecurityGroupDaoImpl.java +++ b/server/src/com/cloud/network/security/dao/SecurityGroupDaoImpl.java @@ -25,6 +25,7 @@ import org.springframework.stereotype.Component; import com.cloud.network.security.SecurityGroupVO; import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.tags.dao.ResourceTagDao; import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.utils.db.DB; @@ -39,7 +40,7 @@ public class SecurityGroupDaoImpl extends GenericDaoBase private SearchBuilder AccountIdSearch; private SearchBuilder AccountIdNameSearch; private SearchBuilder AccountIdNamesSearch; - @Inject ResourceTagsDaoImpl _tagsDao; + @Inject ResourceTagDao _tagsDao; protected SecurityGroupDaoImpl() { diff --git a/server/src/com/cloud/network/vpc/VpcManager.java b/server/src/com/cloud/network/vpc/VpcManager.java index 8d49aa1615c..714330dd5aa 100644 --- a/server/src/com/cloud/network/vpc/VpcManager.java +++ b/server/src/com/cloud/network/vpc/VpcManager.java @@ -71,9 +71,10 @@ public interface VpcManager extends VpcService{ /** * @param zoneId + * @param provider * @return */ - boolean vpcProviderEnabledInZone(long zoneId); + boolean vpcProviderEnabledInZone(long zoneId, String provider); /** * @param vpcId diff --git a/server/src/com/cloud/network/vpc/VpcManagerImpl.java b/server/src/com/cloud/network/vpc/VpcManagerImpl.java index 7197c363264..fee4ef310f7 100644 --- a/server/src/com/cloud/network/vpc/VpcManagerImpl.java +++ b/server/src/com/cloud/network/vpc/VpcManagerImpl.java @@ -31,6 +31,7 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.network.element.StaticNatServiceProvider; import org.apache.cloudstack.api.command.user.vpc.ListStaticRoutesCmd; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -84,6 +85,7 @@ import com.cloud.network.vpc.dao.VpcDao; import com.cloud.network.vpc.dao.VpcGatewayDao; import com.cloud.network.vpc.dao.VpcOfferingDao; import com.cloud.network.vpc.dao.VpcOfferingServiceMapDao; +import com.cloud.network.vpc.dao.VpcServiceMapDao; import com.cloud.network.vpn.Site2SiteVpnManager; import com.cloud.offering.NetworkOffering; import com.cloud.offerings.NetworkOfferingServiceMapVO; @@ -173,10 +175,11 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ VlanDao _vlanDao = null; @Inject ResourceLimitService _resourceLimitMgr; - + @Inject + VpcServiceMapDao _vpcSrvcDao; private final ScheduledExecutorService _executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("VpcChecker")); - private VpcProvider vpcElement = null; + private List vpcElements = null; private final List nonSupportedServices = Arrays.asList(Service.SecurityGroup, Service.Firewall); int _cleanupInterval; @@ -255,7 +258,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ @Override @ActionEvent(eventType = EventTypes.EVENT_VPC_OFFERING_CREATE, eventDescription = "creating vpc offering", create=true) - public VpcOffering createVpcOffering(String name, String displayText, List supportedServices) { + public VpcOffering createVpcOffering(String name, String displayText, List supportedServices, Map> serviceProviders) { Map> svcProviderMap = new HashMap>(); Set defaultProviders = new HashSet(); defaultProviders.add(Provider.VPCVirtualRouter); @@ -291,7 +294,34 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ } svcProviderMap.put(Service.Gateway, defaultProviders); - + + if (serviceProviders != null) { + for (String serviceStr : serviceProviders.keySet()) { + Network.Service service = Network.Service.getService(serviceStr); + if (svcProviderMap.containsKey(service)) { + Set providers = new HashSet(); + // don't allow to specify more than 1 provider per service + if (serviceProviders.get(serviceStr) != null && serviceProviders.get(serviceStr).size() > 1) { + throw new InvalidParameterValueException("In the current release only one provider can be " + + "specified for the service"); + } + for (String prvNameStr : serviceProviders.get(serviceStr)) { + // check if provider is supported + Network.Provider provider = Network.Provider.getProvider(prvNameStr); + if (provider == null) { + throw new InvalidParameterValueException("Invalid service provider: " + prvNameStr); + } + + providers.add(provider); + } + svcProviderMap.put(service, providers); + } else { + throw new InvalidParameterValueException("Service " + serviceStr + " is not enabled for the network " + + "offering, can't add a provider to it"); + } + } + } + VpcOffering offering = createVpcOffering(name, displayText, svcProviderMap, false, null); UserContext.current().setEventDetails(" Id: " + offering.getId() + " Name: " + name); @@ -556,11 +586,11 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ } @Override - public boolean vpcProviderEnabledInZone(long zoneId) + public boolean vpcProviderEnabledInZone(long zoneId, String provider) { //the provider has to be enabled at least in one network in the zone for (PhysicalNetwork pNtwk : _pNtwkDao.listByZone(zoneId)) { - if (_ntwkModel.isProviderEnabledInPhysicalNetwork(pNtwk.getId(), Provider.VPCVirtualRouter.getName())) { + if (_ntwkModel.isProviderEnabledInPhysicalNetwork(pNtwk.getId(), provider)) { return true; } } @@ -573,11 +603,6 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ protected Vpc createVpc(long zoneId, long vpcOffId, Account vpcOwner, String vpcName, String displayText, String cidr, String networkDomain) { - if (!vpcProviderEnabledInZone(zoneId)) { - throw new InvalidParameterValueException("Provider " + Provider.VPCVirtualRouter.getName() + - " should be enabled in at least one physical network of the zone specified"); - } - //Validate CIDR if (!NetUtils.isValidCIDR(cidr)) { throw new InvalidParameterValueException("Invalid CIDR specified " + cidr); @@ -601,7 +626,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ txn.start(); VpcVO vpc = new VpcVO (zoneId, vpcName, displayText, vpcOwner.getId(), vpcOwner.getDomainId(), vpcOffId, cidr, networkDomain); - vpc = _vpcDao.persist(vpc); + vpc = _vpcDao.persist(vpc, finalizeServicesAndProvidersForVpc(zoneId, vpcOffId)); _resourceLimitMgr.incrementResourceCount(vpcOwner.getId(), ResourceType.vpc); txn.commit(); @@ -609,7 +634,44 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ return vpc; } - + + private Map finalizeServicesAndProvidersForVpc(long zoneId, long offeringId) { + Map svcProviders = new HashMap(); + Map> providerSvcs = new HashMap>(); + List servicesMap = _vpcOffSvcMapDao.listByVpcOffId(offeringId); + + for (VpcOfferingServiceMapVO serviceMap : servicesMap) { + if (svcProviders.containsKey(serviceMap.getService())) { + // FIXME - right now we pick up the first provider from the list, need to add more logic based on + // provider load, etc + continue; + } + + String service = serviceMap.getService(); + String provider = serviceMap.getProvider(); + + if (provider == null) { + // Default to VPCVirtualRouter + provider = Provider.VPCVirtualRouter.getName(); + } + + + if (!vpcProviderEnabledInZone(zoneId, provider)) { + throw new InvalidParameterValueException("Provider " + provider + + " should be enabled in at least one physical network of the zone specified"); + } + + svcProviders.put(service, provider); + List l = providerSvcs.get(provider); + if (l == null) { + providerSvcs.put(provider, l = new ArrayList()); + } + l.add(service); + } + + return svcProviders; + } + @Override @ActionEvent(eventType = EventTypes.EVENT_VPC_DELETE, eventDescription = "deleting VPC") public boolean deleteVpc(long vpcId) throws ConcurrentOperationException, ResourceUnavailableException { @@ -903,13 +965,19 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ protected boolean startVpc(Vpc vpc, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { //deploy provider - if (getVpcElement().implementVpc(vpc, dest, context)) { - s_logger.debug("Vpc " + vpc + " has started succesfully"); - return true; - } else { - s_logger.warn("Vpc " + vpc + " failed to start"); - return false; + boolean success = true; + List providersToImplement = getVpcProviders(vpc.getId()); + for (VpcProvider element: getVpcElements()){ + if(providersToImplement.contains(element.getProvider())){ + if (element.implementVpc(vpc, dest, context)) { + s_logger.debug("Vpc " + vpc + " has started succesfully"); + } else { + s_logger.warn("Vpc " + vpc + " failed to start"); + success = false; + } + } } + return success; } @Override @@ -928,15 +996,22 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ //shutdown provider s_logger.debug("Shutting down vpc " + vpc); - ReservationContext context = new ReservationContextImpl(null, null, _accountMgr.getActiveUser(ctx.getCallerUserId()), caller); - boolean success = getVpcElement().shutdownVpc(vpc, context); - //TODO - shutdown all vpc resources here (ACLs, gateways, etc) - if (success) { - s_logger.debug("Vpc " + vpc + " has been shutdown succesfully"); - } else { - s_logger.warn("Vpc " + vpc + " failed to shutdown"); + + boolean success = true; + List providersToImplement = getVpcProviders(vpc.getId()); + ReservationContext context = new ReservationContextImpl(null, null, _accountMgr.getActiveUser(ctx.getCallerUserId()), caller); + for (VpcProvider element: getVpcElements()){ + if(providersToImplement.contains(element.getProvider())){ + if (element.shutdownVpc(vpc, context)) { + s_logger.debug("Vpc " + vpc + " has been shutdown succesfully"); + } else { + s_logger.warn("Vpc " + vpc + " failed to shutdown"); + success = false; + } + } } + return success; } @@ -1085,16 +1160,17 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ } - protected VpcProvider getVpcElement() { - if (vpcElement == null) { - vpcElement = ((VpcProvider)_ntwkModel.getElementImplementingProvider(Provider.VPCVirtualRouter.getName())); + protected List getVpcElements() { + if (vpcElements == null) { + vpcElements = new ArrayList(); + vpcElements.add((VpcProvider)_ntwkModel.getElementImplementingProvider(Provider.VPCVirtualRouter.getName())); } - if (vpcElement == null) { - throw new CloudRuntimeException("Failed to initialize vpc element"); + if (vpcElements == null) { + throw new CloudRuntimeException("Failed to initialize vpc elements"); } - - return vpcElement; + + return vpcElements; } @Override @@ -1233,13 +1309,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ ex.addProxyObject("vpc", vpcId, "VPC"); throw ex; } - - //allow only one private gateway per vpc - VpcGatewayVO gatewayVO = _vpcGatewayDao.getPrivateGatewayForVpc(vpcId); - if (gatewayVO != null) { - throw new InvalidParameterValueException("Private ip address already exists for vpc " + vpc); - } - + //Validate physical network if (physicalNetworkId == null) { List pNtwks = _ntwkModel.getPhysicalNtwksSupportingTrafficType(vpc.getZoneId(), TrafficType.Guest); @@ -1258,7 +1328,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ vlan, ipAddress, null, gateway, netmask, gatewayOwnerId, vpcId); //2) create gateway entry - gatewayVO = new VpcGatewayVO(ipAddress, VpcGateway.Type.Private, vpcId, privateNtwk.getDataCenterId(), + VpcGatewayVO gatewayVO = new VpcGatewayVO(ipAddress, VpcGateway.Type.Private, vpcId, privateNtwk.getDataCenterId(), privateNtwk.getId(), vlan, gateway, netmask, vpc.getAccountId(), vpc.getDomainId()); _vpcGatewayDao.persist(gatewayVO); @@ -1274,10 +1344,14 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ public PrivateGateway applyVpcPrivateGateway(long gatewayId, boolean destroyOnFailure) throws ConcurrentOperationException, ResourceUnavailableException { VpcGatewayVO vo = _vpcGatewayDao.findById(gatewayId); - boolean success = false; + boolean success = true; try { PrivateGateway gateway = getVpcPrivateGateway(gatewayId); - success = getVpcElement().createPrivateGateway(gateway); + for (VpcProvider provider: getVpcElements()){ + if(!provider.createPrivateGateway(gateway)){ + success = false; + } + } if (success) { s_logger.debug("Private gateway " + gateway + " was applied succesfully on the backend"); if (vo.getState() != VpcGateway.State.Ready) { @@ -1333,11 +1407,13 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ //1) delete the gateway on the backend PrivateGateway gateway = getVpcPrivateGateway(gatewayId); - if (getVpcElement().deletePrivateGateway(gateway)) { - s_logger.debug("Private gateway " + gateway + " was applied succesfully on the backend"); - } else { - s_logger.warn("Private gateway " + gateway + " failed to apply on the backend"); - return false; + for (VpcProvider provider: getVpcElements()){ + if (provider.deletePrivateGateway(gateway)) { + s_logger.debug("Private gateway " + gateway + " was applied succesfully on the backend"); + } else { + s_logger.warn("Private gateway " + gateway + " failed to apply on the backend"); + return false; + } } //2) Delete private gateway from the DB @@ -1505,11 +1581,19 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ Vpc vpc = getVpc(routes.get(0).getVpcId()); s_logger.debug("Applying static routes for vpc " + vpc); - if (getVpcElement().applyStaticRoutes(vpc, routes)) { - s_logger.debug("Applied static routes for vpc " + vpc); - } else { - s_logger.warn("Failed to apply static routes for vpc " + vpc); - return false; + String staticNatProvider = _vpcSrvcDao.getProviderForServiceInVpc(vpc.getId(), Service.StaticNat); + + for (VpcProvider provider: getVpcElements()){ + if (!(provider instanceof StaticNatServiceProvider && provider.getName().equalsIgnoreCase(staticNatProvider))) { + continue; + } + + if (provider.applyStaticRoutes(vpc, routes)) { + s_logger.debug("Applied static routes for vpc " + vpc); + } else { + s_logger.warn("Failed to apply static routes for vpc " + vpc); + return false; + } } return true; @@ -1929,8 +2013,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ PublicIp ipToReturn = null; if (sourceNatIp != null) { - ipToReturn = new PublicIp(sourceNatIp, _vlanDao.findById(sourceNatIp.getVlanId()), - NetUtils.createSequenceBasedMacAddress(sourceNatIp.getMacAddress())); + ipToReturn = PublicIp.createFromAddrAndVlan(sourceNatIp, _vlanDao.findById(sourceNatIp.getVlanId())); } else { ipToReturn = _ntwkMgr.assignDedicateIpAddress(owner, null, vpc.getId(), dcId, true); } @@ -1941,7 +2024,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ @Override public Network updateVpcGuestNetwork(long networkId, String name, String displayText, Account callerAccount, - User callerUser, String domainSuffix, Long ntwkOffId, Boolean changeCidr) { + User callerUser, String domainSuffix, Long ntwkOffId, Boolean changeCidr, String guestVmCidr) { NetworkVO network = _ntwkDao.findById(networkId); if (network == null) { throw new InvalidParameterValueException("Couldn't find network by id"); @@ -1953,7 +2036,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ } return _ntwkSvc.updateGuestNetwork(networkId, name, displayText, callerAccount, callerUser, domainSuffix, - ntwkOffId, changeCidr); + ntwkOffId, changeCidr, guestVmCidr); } @Override @@ -1964,4 +2047,16 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager{ hTypes.add(HypervisorType.KVM); return hTypes; } + + private List getVpcProviders(long vpcId) { + List providerNames = _vpcSrvcDao.getDistinctProviders(vpcId); + Map providers = new HashMap(); + for (String providerName : providerNames) { + if(!providers.containsKey(providerName)){ + providers.put(providerName, Network.Provider.getProvider(providerName)); + } + } + + return new ArrayList(providers.values()); + } } diff --git a/server/src/com/cloud/network/vpc/VpcServiceMapVO.java b/server/src/com/cloud/network/vpc/VpcServiceMapVO.java new file mode 100644 index 00000000000..6f229096404 --- /dev/null +++ b/server/src/com/cloud/network/vpc/VpcServiceMapVO.java @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.vpc; + +import java.util.Date; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import com.cloud.network.Network.Provider; +import com.cloud.network.Network.Service; +import com.cloud.utils.db.GenericDao; + +@Entity +@Table(name="vpc_service_map") +public class VpcServiceMapVO { + @Id + @GeneratedValue(strategy=GenerationType.IDENTITY) + @Column(name="id") + long id; + + @Column(name="vpc_id") + long vpcId; + + @Column(name="service") + String service; + + @Column(name="provider") + String provider; + + @Column(name=GenericDao.CREATED_COLUMN) + Date created; + + public long getId() { + return id; + } + + public long getVpcId() { + return vpcId; + } + + public String getService() { + return service; + } + + public String getProvider() { + return provider; + } + + public Date getCreated() { + return created; + } + + public VpcServiceMapVO() { + } + + public VpcServiceMapVO(long vpcId, Service service, Provider provider) { + this.vpcId = vpcId; + this.service = service.getName(); + this.provider = provider.getName(); + } + + public String toString() { + StringBuilder buf = new StringBuilder("[VPC Service["); + return buf.append(vpcId).append("-").append(service).append("-").append(provider).append("]").toString(); + } +} + + + + + diff --git a/server/src/com/cloud/network/vpc/dao/StaticRouteDaoImpl.java b/server/src/com/cloud/network/vpc/dao/StaticRouteDaoImpl.java index 0ebccabfa8e..fac35a91a53 100644 --- a/server/src/com/cloud/network/vpc/dao/StaticRouteDaoImpl.java +++ b/server/src/com/cloud/network/vpc/dao/StaticRouteDaoImpl.java @@ -26,6 +26,7 @@ import org.springframework.stereotype.Component; import com.cloud.network.vpc.StaticRoute; import com.cloud.network.vpc.StaticRouteVO; import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.tags.dao.ResourceTagDao; import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.utils.db.DB; @@ -44,7 +45,7 @@ public class StaticRouteDaoImpl extends GenericDaoBase impl protected final SearchBuilder AllFieldsSearch; protected final SearchBuilder NotRevokedSearch; protected final GenericSearchBuilder RoutesByGatewayCount; - @Inject ResourceTagsDaoImpl _tagsDao; + @Inject ResourceTagDao _tagsDao; protected StaticRouteDaoImpl() { super(); diff --git a/server/src/com/cloud/network/vpc/dao/VpcDao.java b/server/src/com/cloud/network/vpc/dao/VpcDao.java index 80e5e15f9c0..5a33217c028 100644 --- a/server/src/com/cloud/network/vpc/dao/VpcDao.java +++ b/server/src/com/cloud/network/vpc/dao/VpcDao.java @@ -17,6 +17,7 @@ package com.cloud.network.vpc.dao; import java.util.List; +import java.util.Map; import com.cloud.network.vpc.Vpc; import com.cloud.network.vpc.VpcVO; @@ -39,4 +40,8 @@ public interface VpcDao extends GenericDao{ long countByAccountId(long accountId); + VpcVO persist(VpcVO vpc, Map serviceProviderMap); + + void persistVpcServiceProviders(long vpcId, + Map serviceProviderMap); } diff --git a/server/src/com/cloud/network/vpc/dao/VpcDaoImpl.java b/server/src/com/cloud/network/vpc/dao/VpcDaoImpl.java index a9b5e182b60..6560b90ce7d 100644 --- a/server/src/com/cloud/network/vpc/dao/VpcDaoImpl.java +++ b/server/src/com/cloud/network/vpc/dao/VpcDaoImpl.java @@ -17,16 +17,19 @@ package com.cloud.network.vpc.dao; import java.util.List; +import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; +import com.cloud.network.Network; +import com.cloud.network.vpc.VpcServiceMapVO; import org.springframework.stereotype.Component; import com.cloud.network.vpc.Vpc; import com.cloud.network.vpc.VpcVO; import com.cloud.server.ResourceTag.TaggedResourceType; -import com.cloud.tags.dao.ResourceTagsDaoImpl; +import com.cloud.tags.dao.ResourceTagDao; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @@ -44,7 +47,9 @@ public class VpcDaoImpl extends GenericDaoBase implements VpcDao{ final GenericSearchBuilder CountByOfferingId; final SearchBuilder AllFieldsSearch; final GenericSearchBuilder CountByAccountId; - @Inject ResourceTagsDaoImpl _tagsDao; + + @Inject ResourceTagDao _tagsDao; + @Inject VpcServiceMapDao _vpcSvcMap; protected VpcDaoImpl() { super(); @@ -120,5 +125,28 @@ public class VpcDaoImpl extends GenericDaoBase implements VpcDao{ List results = customSearch(sc, null); return results.get(0); } + + @Override + @DB + public VpcVO persist(VpcVO vpc, Map serviceProviderMap) { + Transaction txn = Transaction.currentTxn(); + txn.start(); + VpcVO newVpc = super.persist(vpc); + persistVpcServiceProviders(vpc.getId(), serviceProviderMap); + txn.commit(); + return newVpc; + } + + @Override + @DB + public void persistVpcServiceProviders(long vpcId, Map serviceProviderMap) { + Transaction txn = Transaction.currentTxn(); + txn.start(); + for (String service : serviceProviderMap.keySet()) { + VpcServiceMapVO serviceMap = new VpcServiceMapVO(vpcId, Network.Service.getService(service), Network.Provider.getProvider(serviceProviderMap.get(service))); + _vpcSvcMap.persist(serviceMap); + } + txn.commit(); + } } diff --git a/server/src/com/cloud/network/vpc/dao/VpcServiceMapDao.java b/server/src/com/cloud/network/vpc/dao/VpcServiceMapDao.java new file mode 100644 index 00000000000..8c4537ede4e --- /dev/null +++ b/server/src/com/cloud/network/vpc/dao/VpcServiceMapDao.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.vpc.dao; + +import java.util.List; + +import com.cloud.network.Network.Provider; +import com.cloud.network.Network.Service; +import com.cloud.network.dao.NetworkServiceMapVO; +import com.cloud.network.vpc.VpcServiceMapVO; +import com.cloud.utils.db.GenericDao; + +/** + * VpcServiceMapDao deals with searches and operations done on the + * vpc_service_map table. + * + */ +public interface VpcServiceMapDao extends GenericDao{ + boolean areServicesSupportedInVpc(long vpcId, Service... services); + boolean canProviderSupportServiceInVpc(long vpcId, Service service, Provider provider); + List getServicesInVpc(long vpcId); + String getProviderForServiceInVpc(long vpcId, Service service); + void deleteByVpcId(long vpcId); + List getDistinctProviders(long vpcId); + String isProviderForVpc(long vpcId, Provider provider); +} \ No newline at end of file diff --git a/server/src/com/cloud/network/vpc/dao/VpcServiceMapDaoImpl.java b/server/src/com/cloud/network/vpc/dao/VpcServiceMapDaoImpl.java new file mode 100644 index 00000000000..a992181f864 --- /dev/null +++ b/server/src/com/cloud/network/vpc/dao/VpcServiceMapDaoImpl.java @@ -0,0 +1,115 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.vpc.dao; + +import java.util.List; + +import javax.ejb.Local; + +import com.cloud.exception.UnsupportedServiceException; +import com.cloud.network.Network.Provider; +import com.cloud.network.Network.Service; +import com.cloud.network.dao.NetworkServiceMapVO; +import com.cloud.network.vpc.VpcServiceMapVO; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import org.springframework.stereotype.Component; + +@Component +@Local(value=VpcServiceMapDao.class) @DB(txn=false) +public class VpcServiceMapDaoImpl extends GenericDaoBase implements VpcServiceMapDao { + final SearchBuilder AllFieldsSearch; + final SearchBuilder MultipleServicesSearch; + final GenericSearchBuilder DistinctProvidersSearch; + + protected VpcServiceMapDaoImpl(){ + super(); + AllFieldsSearch = createSearchBuilder(); + AllFieldsSearch.and("vpcId", AllFieldsSearch.entity().getVpcId(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("service", AllFieldsSearch.entity().getService(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("provider", AllFieldsSearch.entity().getProvider(), SearchCriteria.Op.EQ); + AllFieldsSearch.done(); + + MultipleServicesSearch = createSearchBuilder(); + MultipleServicesSearch.and("vpcId", MultipleServicesSearch.entity().getVpcId(), SearchCriteria.Op.EQ); + MultipleServicesSearch.and("service", MultipleServicesSearch.entity().getService(), SearchCriteria.Op.IN); + MultipleServicesSearch.and("provider", MultipleServicesSearch.entity().getProvider(), SearchCriteria.Op.EQ); + MultipleServicesSearch.done(); + + DistinctProvidersSearch = createSearchBuilder(String.class); + DistinctProvidersSearch.and("vpcId", DistinctProvidersSearch.entity().getVpcId(), SearchCriteria.Op.EQ); + DistinctProvidersSearch.and("provider", DistinctProvidersSearch.entity().getProvider(), SearchCriteria.Op.EQ); + DistinctProvidersSearch.selectField(DistinctProvidersSearch.entity().getProvider()); + DistinctProvidersSearch.done(); + } + + @Override + public boolean areServicesSupportedInVpc(long vpcId, Service... services) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean canProviderSupportServiceInVpc(long vpcId, Service service, + Provider provider) { + // TODO Auto-generated method stub + return false; + } + + @Override + public List getServicesInVpc(long vpcId) { + // TODO Auto-generated method stub + return null; + } + + @Override + public String getProviderForServiceInVpc(long vpcId, Service service) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("vpcId", vpcId); + sc.setParameters("service", service.getName()); + VpcServiceMapVO ntwkSvc = findOneBy(sc); + if (ntwkSvc == null) { + throw new UnsupportedServiceException("Service " + service.getName() + " is not supported in the vpc id=" + vpcId); + } + + return ntwkSvc.getProvider(); + } + + @Override + public void deleteByVpcId(long vpcId) { + // TODO Auto-generated method stub + + } + + @Override + public List getDistinctProviders(long vpcId) { + SearchCriteria sc = DistinctProvidersSearch.create(); + sc.setParameters("vpcId", vpcId); + List results = customSearch(sc, null); + return results; + } + + @Override + public String isProviderForVpc(long vpcId, Provider provider) { + // TODO Auto-generated method stub + return null; + } + +} diff --git a/server/src/com/cloud/projects/ProjectManagerImpl.java b/server/src/com/cloud/projects/ProjectManagerImpl.java index 45a9a242147..33feb5dd57e 100755 --- a/server/src/com/cloud/projects/ProjectManagerImpl.java +++ b/server/src/com/cloud/projects/ProjectManagerImpl.java @@ -23,6 +23,7 @@ import java.util.Map; import java.util.Properties; import java.util.Random; import java.util.TimeZone; +import java.util.UUID; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -204,7 +205,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { StringBuilder acctNm = new StringBuilder("PrjAcct-"); acctNm.append(name).append("-").append(owner.getDomainId()); - Account projectAccount = _accountMgr.createAccount(acctNm.toString(), Account.ACCOUNT_TYPE_PROJECT, domainId, null, null, "", 0); + Account projectAccount = _accountMgr.createAccount(acctNm.toString(), Account.ACCOUNT_TYPE_PROJECT, domainId, null, null, UUID.randomUUID().toString(), 0); Project project = _projectDao.persist(new ProjectVO(name, displayText, owner.getDomainId(), projectAccount.getId())); diff --git a/server/src/com/cloud/projects/dao/ProjectDaoImpl.java b/server/src/com/cloud/projects/dao/ProjectDaoImpl.java index e07aecc5ec6..ecf40dacaff 100644 --- a/server/src/com/cloud/projects/dao/ProjectDaoImpl.java +++ b/server/src/com/cloud/projects/dao/ProjectDaoImpl.java @@ -27,6 +27,7 @@ import org.springframework.stereotype.Component; import com.cloud.projects.Project; import com.cloud.projects.ProjectVO; import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.tags.dao.ResourceTagDao; import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.utils.db.DB; @@ -45,7 +46,7 @@ public class ProjectDaoImpl extends GenericDaoBase implements P protected GenericSearchBuilder CountByDomain; protected GenericSearchBuilder ProjectAccountSearch; // ResourceTagsDaoImpl _tagsDao = ComponentLocator.inject(ResourceTagsDaoImpl.class); - @Inject ResourceTagsDaoImpl _tagsDao; + @Inject ResourceTagDao _tagsDao; protected ProjectDaoImpl() { AllFieldsSearch = createSearchBuilder(); diff --git a/server/src/com/cloud/resource/DiscovererBase.java b/server/src/com/cloud/resource/DiscovererBase.java index 940608c4419..b7c5b6f58de 100644 --- a/server/src/com/cloud/resource/DiscovererBase.java +++ b/server/src/com/cloud/resource/DiscovererBase.java @@ -128,6 +128,7 @@ public abstract class DiscovererBase extends AdapterBase implements Discoverer { params.put("secondary.storage.vm", "false"); params.put("max.template.iso.size", _configDao.getValue(Config.MaxTemplateAndIsoSize.toString())); params.put("migratewait", _configDao.getValue(Config.MigrateWait.toString())); + params.put(Config.XenMaxNics.toString().toLowerCase(), _configDao.getValue(Config.XenMaxNics.toString())); return params; } diff --git a/server/src/com/cloud/resource/ResourceManager.java b/server/src/com/cloud/resource/ResourceManager.java index 266ba948afc..b0ab9269529 100755 --- a/server/src/com/cloud/resource/ResourceManager.java +++ b/server/src/com/cloud/resource/ResourceManager.java @@ -100,6 +100,7 @@ public interface ResourceManager extends ResourceService{ public List listHostsInClusterByStatus(long clusterId, Status status); public List listAllUpAndEnabledHostsInOneZoneByType(Host.Type type, long dcId); + public List listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType type, long dcId); public List listAllHostsInOneZoneByType(Host.Type type, long dcId); diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java index 79ccdb30198..15d32e0640d 100755 --- a/server/src/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/com/cloud/resource/ResourceManagerImpl.java @@ -30,6 +30,7 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.*; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.cluster.AddClusterCmd; import org.apache.cloudstack.api.command.admin.cluster.DeleteClusterCmd; @@ -44,6 +45,8 @@ import org.apache.cloudstack.api.command.admin.storage.AddS3Cmd; import org.apache.cloudstack.api.command.admin.storage.ListS3sCmd; import org.apache.cloudstack.api.command.admin.swift.AddSwiftCmd; import org.apache.cloudstack.api.command.admin.swift.ListSwiftsCmd; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -71,12 +74,6 @@ import com.cloud.cluster.ManagementServerNode; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.dc.ClusterDetailsDao; -import com.cloud.dc.ClusterVO; -import com.cloud.dc.DataCenterIpAddressVO; -import com.cloud.dc.DataCenterVO; -import com.cloud.dc.HostPodVO; -import com.cloud.dc.PodCluster; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.ClusterVSMMapDao; import com.cloud.dc.dao.DataCenterDao; @@ -116,13 +113,11 @@ import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.StorageService; import com.cloud.storage.Swift; import com.cloud.storage.SwiftVO; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.GuestOSCategoryDao; -import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.s3.S3Manager; @@ -198,7 +193,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, @Inject protected GuestOSCategoryDao _guestOSCategoryDao; @Inject - protected StoragePoolDao _storagePoolDao; + protected PrimaryDataStoreDao _storagePoolDao; @Inject protected DataCenterIpAddressDao _privateIPAddressDao; @Inject @@ -460,6 +455,11 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, + cmd.getHypervisor()); } + if (hypervisorType == HypervisorType.VMware) { + Map allParams = cmd.getFullUrlParams(); + discoverer.putParam(allParams); + } + List result = new ArrayList(); long clusterId = 0; @@ -483,6 +483,11 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, clusterId = cluster.getId(); result.add(cluster); + ClusterDetailsVO cluster_detail_cpu = new ClusterDetailsVO(clusterId, "cpuOvercommitRatio", Float.toString(cmd.getCpuOvercommitRatio())); + ClusterDetailsVO cluster_detail_ram = new ClusterDetailsVO(clusterId, "memoryOvercommitRatio", Float.toString(cmd.getMemoryOvercommitRaito())); + _clusterDetailsDao.persist(cluster_detail_cpu); + _clusterDetailsDao.persist(cluster_detail_ram); + if (clusterType == Cluster.ClusterType.CloudManaged) { return result; } @@ -494,6 +499,21 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, details.put("password", password); _clusterDetailsDao.persist(cluster.getId(), details); + _clusterDetailsDao.persist(cluster_detail_cpu); + _clusterDetailsDao.persist(cluster_detail_ram); + //create a new entry only if the overcommit ratios are greater than 1. + if(cmd.getCpuOvercommitRatio().compareTo(1f) > 0) { + cluster_detail_cpu = new ClusterDetailsVO(clusterId, "cpuOvercommitRatio", Float.toString(cmd.getCpuOvercommitRatio())); + _clusterDetailsDao.persist(cluster_detail_cpu); + } + + + if(cmd.getMemoryOvercommitRaito().compareTo(1f) > 0) { + cluster_detail_ram = new ClusterDetailsVO(clusterId, "memoryOvercommitRatio", Float.toString(cmd.getMemoryOvercommitRaito())); + _clusterDetailsDao.persist(cluster_detail_ram); + } + + boolean success = false; try { try { @@ -750,6 +770,13 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } } clusterId = cluster.getId(); + if (_clusterDetailsDao.findDetail(clusterId,"cpuOvercommitRatio") == null) { + ClusterDetailsVO cluster_cpu_detail = new ClusterDetailsVO(clusterId,"cpuOvercommitRatio","1"); + ClusterDetailsVO cluster_memory_detail = new ClusterDetailsVO(clusterId,"memoryOvercommitRatio","1"); + _clusterDetailsDao.persist(cluster_cpu_detail); + _clusterDetailsDao.persist(cluster_memory_detail); + } + } try { @@ -1061,7 +1088,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, @Override @DB public Cluster updateCluster(Cluster clusterToUpdate, String clusterType, - String hypervisor, String allocationState, String managedstate) { + String hypervisor, String allocationState, String managedstate,Float memoryovercommitratio, Float cpuovercommitratio) { ClusterVO cluster = (ClusterVO) clusterToUpdate; // Verify cluster information and update the cluster if needed @@ -1144,6 +1171,31 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } } + ClusterDetailsVO memory_detail = _clusterDetailsDao.findDetail(cluster.getId(),"memoryOvercommitRatio"); + if( memory_detail == null){ + if (memoryovercommitratio.compareTo(1f) > 0){ + memory_detail = new ClusterDetailsVO(cluster.getId(),"memoryOvercommitRatio",Float.toString(memoryovercommitratio)); + _clusterDetailsDao.persist(memory_detail); + } + } + else { + memory_detail.setValue(Float.toString(memoryovercommitratio)); + _clusterDetailsDao.update(memory_detail.getId(),memory_detail); + } + + ClusterDetailsVO cpu_detail = _clusterDetailsDao.findDetail(cluster.getId(),"cpuOvercommitRatio"); + if( cpu_detail == null){ + if (cpuovercommitratio.compareTo(1f) > 0){ + cpu_detail = new ClusterDetailsVO(cluster.getId(),"cpuOvercommitRatio",Float.toString(cpuovercommitratio)); + _clusterDetailsDao.persist(cpu_detail); + } + } + else { + cpu_detail.setValue(Float.toString(cpuovercommitratio)); + _clusterDetailsDao.update(cpu_detail.getId(),cpu_detail); + } + + if (doUpdate) { Transaction txn = Transaction.currentTxn(); try { @@ -2223,20 +2275,22 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, User caller = _accountMgr.getActiveUser(UserContext.current() .getCallerUserId()); - if (forceDestroyStorage) { - // put local storage into mainenance mode, will set all the VMs on - // this local storage into stopped state - StoragePool storagePool = _storageMgr.findLocalStorageOnHost(host + + if (forceDestroyStorage) { + // put local storage into mainenance mode, will set all the VMs on + // this local storage into stopped state + StoragePoolVO storagePool = _storageMgr.findLocalStorageOnHost(host .getId()); if (storagePool != null) { if (storagePool.getStatus() == StoragePoolStatus.Up || storagePool.getStatus() == StoragePoolStatus.ErrorInMaintenance) { - try { - storagePool = _storageSvr + try { + StoragePool pool = _storageSvr .preparePrimaryStorageForMaintenance(storagePool .getId()); - if (storagePool == null) { - s_logger.debug("Failed to set primary storage into maintenance mode"); + if (pool == null) { + s_logger.debug("Failed to set primary storage into maintenance mode"); + throw new UnableDeleteHostException( "Failed to set primary storage into maintenance mode"); } @@ -2780,4 +2834,17 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } return pcs; } + + @Override + public List listAllUpAndEnabledHostsInOneZoneByHypervisor( + HypervisorType type, long dcId) { + SearchCriteriaService sc = SearchCriteria2 + .create(HostVO.class); + sc.addAnd(sc.getEntity().getHypervisorType(), Op.EQ, type); + sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId); + sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up); + sc.addAnd(sc.getEntity().getResourceState(), Op.EQ, + ResourceState.Enabled); + return sc.list(); + } } diff --git a/server/src/com/cloud/resourcelimit/ResourceLimitManagerImpl.java b/server/src/com/cloud/resourcelimit/ResourceLimitManagerImpl.java index 7ff06af9409..23c079679b1 100755 --- a/server/src/com/cloud/resourcelimit/ResourceLimitManagerImpl.java +++ b/server/src/com/cloud/resourcelimit/ResourceLimitManagerImpl.java @@ -628,7 +628,7 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim ResourceType resourceType = null; if (typeId != null) { - for (ResourceType type : resourceTypes) { + for (ResourceType type : Resource.ResourceType.values()) { if (type.getOrdinal() == typeId.intValue()) { resourceType = type; } diff --git a/server/src/com/cloud/server/ConfigurationServerImpl.java b/server/src/com/cloud/server/ConfigurationServerImpl.java index 6bf7f955269..8c665ad1eee 100755 --- a/server/src/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/com/cloud/server/ConfigurationServerImpl.java @@ -130,7 +130,6 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio @Inject private ResourceCountDao _resourceCountDao; @Inject private NetworkOfferingServiceMapDao _ntwkOfferingServiceMapDao; @Inject private IdentityDao _identityDao; - @Inject private RegionDao _regionDao; public ConfigurationServerImpl() { setRunLevel(ComponentLifecycle.RUN_LEVEL_FRAMEWORK_BOOTSTRAP); @@ -152,6 +151,8 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio @DB public void persistDefaultValues() throws InternalErrorException { + fixupScriptFileAttribute(); + // Create system user and admin user saveUser(); @@ -234,8 +235,6 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio // Create default networks createDefaultNetworks(); - createDefaultRegion(); - // Create userIpAddress ranges // Update existing vlans with networkId @@ -338,21 +337,23 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio @DB protected void saveUser() { - //ToDo: Add regionId to default users and accounts + int region_id = _configDao.getRegionId(); // insert system account - String insertSql = "INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, region_id) VALUES (1, UUID(), 'system', '1', '1', '1')"; + String insertSql = "INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, region_id) VALUES (1, UUID(), 'system', '1', '1', ?)"; Transaction txn = Transaction.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); + stmt.setInt(1, region_id); stmt.executeUpdate(); } catch (SQLException ex) { } // insert system user insertSql = "INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, lastname, created, region_id)" + - " VALUES (1, UUID(), 'system', RAND(), 1, 'system', 'cloud', now(), '1')"; + " VALUES (1, UUID(), 'system', RAND(), 1, 'system', 'cloud', now(), ?)"; txn = Transaction.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); + stmt.setInt(1, region_id); stmt.executeUpdate(); } catch (SQLException ex) { } @@ -365,21 +366,23 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio String lastname = "cloud"; // create an account for the admin user first - insertSql = "INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, region_id) VALUES (" + id + ", UUID(), '" + username + "', '1', '1', '1')"; + insertSql = "INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, region_id) VALUES (" + id + ", UUID(), '" + username + "', '1', '1', ?)"; txn = Transaction.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); + stmt.setInt(1, region_id); stmt.executeUpdate(); } catch (SQLException ex) { } // now insert the user insertSql = "INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, lastname, created, state, region_id) " + - "VALUES (" + id + ", UUID(), '" + username + "', RAND(), 2, '" + firstname + "','" + lastname + "',now(), 'disabled', '1')"; + "VALUES (" + id + ", UUID(), '" + username + "', RAND(), 2, '" + firstname + "','" + lastname + "',now(), 'disabled', ?)"; txn = Transaction.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); + stmt.setInt(1, region_id); stmt.executeUpdate(); } catch (SQLException ex) { } @@ -600,8 +603,16 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio throw new CloudRuntimeException("No home directory was detected for the user '" + username + "'. Please check the profile of this user."); } - File privkeyfile = new File(homeDir + "/.ssh/id_rsa"); - File pubkeyfile = new File(homeDir + "/.ssh/id_rsa.pub"); + // Using non-default file names (id_rsa.cloud and id_rsa.cloud.pub) in developer mode. This is to prevent SSH keys overwritten for user running management server + File privkeyfile = null; + File pubkeyfile = null; + if (devel) { + privkeyfile = new File(homeDir + "/.ssh/id_rsa.cloud"); + pubkeyfile = new File(homeDir + "/.ssh/id_rsa.cloud.pub"); + } else { + privkeyfile = new File(homeDir + "/.ssh/id_rsa"); + pubkeyfile = new File(homeDir + "/.ssh/id_rsa.pub"); + } if (already == null || already.isEmpty()) { if (s_logger.isInfoEnabled()) { @@ -658,13 +669,8 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } } else { - s_logger.info("Keypairs already in database"); - if (username.equalsIgnoreCase("cloud")) { - s_logger.info("Keypairs already in database, updating local copy"); - updateKeyPairsOnDisk(homeDir); - } else { - s_logger.info("Keypairs already in database, skip updating local copy (not running as cloud user)"); - } + s_logger.info("Keypairs already in database, updating local copy"); + updateKeyPairsOnDisk(homeDir); } s_logger.info("Going to update systemvm iso with generated keypairs if needed"); try { @@ -702,17 +708,43 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } } + + private void fixupScriptFileAttribute() { + // TODO : this is a hacking fix to workaround that executable bit is not preserved in WAR package + String scriptPath = Script.findScript("", "scripts/vm/systemvm/injectkeys.sh"); + if(scriptPath != null) { + File file = new File(scriptPath); + if(!file.canExecute()) { + s_logger.info("Some of the shell script files may not have executable bit set. Fixup..."); + + String cmd = "chmod ugo+x " + scriptPath; + s_logger.info("Executing " + cmd); + String result = Script.runSimpleBashScript(cmd); + if (result != null) { + s_logger.warn("Failed to fixup shell script executable bits " + result); + } + } + } + } private void updateKeyPairsOnDisk(String homeDir) { File keyDir = new File(homeDir + "/.ssh"); + Boolean devel = Boolean.valueOf(_configDao.getValue("developer")); if (!keyDir.isDirectory()) { s_logger.warn("Failed to create " + homeDir + "/.ssh for storing the SSH keypars"); keyDir.mkdir(); } String pubKey = _configDao.getValue("ssh.publickey"); String prvKey = _configDao.getValue("ssh.privatekey"); - writeKeyToDisk(prvKey, homeDir + "/.ssh/id_rsa"); - writeKeyToDisk(pubKey, homeDir + "/.ssh/id_rsa.pub"); + + // Using non-default file names (id_rsa.cloud and id_rsa.cloud.pub) in developer mode. This is to prevent SSH keys overwritten for user running management server + if( devel ) { + writeKeyToDisk(prvKey, homeDir + "/.ssh/id_rsa.cloud"); + writeKeyToDisk(pubKey, homeDir + "/.ssh/id_rsa.cloud.pub"); + } else { + writeKeyToDisk(prvKey, homeDir + "/.ssh/id_rsa"); + writeKeyToDisk(pubKey, homeDir + "/.ssh/id_rsa.pub"); + } } protected void injectSshKeysIntoSystemVmIsoPatch(String publicKeyPath, String privKeyPath) { @@ -1272,9 +1304,4 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio return svcProviders; } - private void createDefaultRegion(){ - //Get Region name and URL from db.properties - _regionDao.persist(new RegionVO(_regionDao.getRegionId(), "Local", "http://localhost:8080/client/api", "", "")); - } - } diff --git a/server/src/com/cloud/server/ManagementServer.java b/server/src/com/cloud/server/ManagementServer.java index 5c34deea53b..6773725f361 100755 --- a/server/src/com/cloud/server/ManagementServer.java +++ b/server/src/com/cloud/server/ManagementServer.java @@ -19,11 +19,12 @@ package com.cloud.server; import java.util.Date; import java.util.List; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + import com.cloud.event.EventVO; import com.cloud.host.HostVO; import com.cloud.info.ConsoleProxyInfo; import com.cloud.storage.GuestOSVO; -import com.cloud.storage.StoragePoolVO; import com.cloud.utils.Pair; import com.cloud.utils.component.PluggableService; import com.cloud.vm.VirtualMachine; diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index d3812067118..d70c45f1f8a 100755 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -47,12 +47,13 @@ import javax.management.MalformedObjectNameException; import javax.management.NotCompliantMBeanException; import javax.naming.ConfigurationException; -import com.cloud.storage.dao.*; +import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ApiConstants; import com.cloud.event.ActionEventUtils; import org.apache.cloudstack.api.BaseUpdateTemplateOrIsoCmd; + import org.apache.cloudstack.api.command.admin.account.*; import org.apache.cloudstack.api.command.admin.autoscale.*; import org.apache.cloudstack.api.command.admin.cluster.*; @@ -108,6 +109,9 @@ import org.apache.cloudstack.api.command.user.vpc.*; import org.apache.cloudstack.api.command.user.vpn.*; import org.apache.cloudstack.api.command.user.zone.*; import org.apache.cloudstack.api.response.ExtractResponse; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.commons.codec.binary.Base64; import org.apache.log4j.Logger; @@ -122,6 +126,7 @@ import com.cloud.alert.AlertManager; import com.cloud.alert.AlertVO; import com.cloud.alert.dao.AlertDao; import com.cloud.api.ApiDBUtils; +import com.cloud.api.query.vo.EventJoinVO; import com.cloud.async.AsyncJobExecutor; import com.cloud.async.AsyncJobManager; import com.cloud.async.AsyncJobResult; @@ -185,6 +190,7 @@ import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.info.ConsoleProxyInfo; import com.cloud.keystore.KeystoreManager; import com.cloud.network.IpAddress; +import com.cloud.network.as.ConditionVO; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.LoadBalancerDao; @@ -206,15 +212,21 @@ import com.cloud.storage.GuestOSCategoryVO; import com.cloud.storage.GuestOSVO; import com.cloud.storage.GuestOsCategory; import com.cloud.storage.Storage; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageManager; -import com.cloud.storage.StoragePoolVO; +import com.cloud.storage.StoragePool; import com.cloud.storage.Upload; import com.cloud.storage.Upload.Mode; import com.cloud.storage.UploadVO; -import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.GuestOSCategoryDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.UploadDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.s3.S3Manager; import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.storage.snapshot.SnapshotManager; @@ -222,6 +234,7 @@ import com.cloud.storage.swift.SwiftManager; import com.cloud.storage.upload.UploadMonitor; import com.cloud.tags.ResourceTagVO; import com.cloud.tags.dao.ResourceTagDao; +import com.cloud.template.TemplateManager; import com.cloud.template.VirtualMachineTemplate.TemplateFilter; import com.cloud.user.Account; import com.cloud.user.AccountManager; @@ -252,6 +265,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.JoinBuilder.JoinType; +import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; @@ -285,7 +299,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe public static final Logger s_logger = Logger.getLogger(ManagementServerImpl.class.getName()); @Inject - private AccountManager _accountMgr; + public AccountManager _accountMgr; @Inject private AgentManager _agentMgr; @Inject @@ -301,7 +315,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Inject private SecondaryStorageVmDao _secStorageVmDao; @Inject - private EventDao _eventDao; + public EventDao _eventDao; @Inject private DataCenterDao _dcDao; @Inject @@ -337,7 +351,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Inject private AccountDao _accountDao; @Inject - private AlertDao _alertDao; + public AlertDao _alertDao; @Inject private CapacityDao _capacityDao; @Inject @@ -345,7 +359,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Inject private GuestOSCategoryDao _guestOSCategoryDao; @Inject - private StoragePoolDao _poolDao; + private PrimaryDataStoreDao _poolDao; @Inject private NetworkDao _networkDao; @Inject @@ -361,6 +375,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Inject private AsyncJobManager _asyncMgr; private int _purgeDelay; + private int _alertPurgeDelay; @Inject private InstanceGroupDao _vmGroupDao; @Inject @@ -390,6 +405,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Inject HighAvailabilityManager _haMgr; @Inject + TemplateManager templateMgr; + @Inject + DataStoreManager dataStoreMgr; + @Inject HostTagsDao _hostTagsDao; @Inject @@ -403,6 +422,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe EventUtils _forceEventUtilsRef; */ private final ScheduledExecutorService _eventExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("EventChecker")); + private final ScheduledExecutorService _alertExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("AlertChecker")); private KeystoreManager _ksMgr; private Map _configs; @@ -432,6 +452,15 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe _eventExecutor.scheduleAtFixedRate(new EventPurgeTask(), cleanup, cleanup, TimeUnit.SECONDS); } + //Alerts purge configurations + int alertPurgeInterval = NumbersUtil.parseInt(_configDao.getValue(Config.AlertPurgeInterval.key()), + 60 * 60 * 24); // 1 day. + _alertPurgeDelay = NumbersUtil.parseInt(_configDao.getValue(Config.AlertPurgeDelay.key()), 0); + if (_alertPurgeDelay != 0) { + _alertExecutor.scheduleAtFixedRate(new AlertPurgeTask(), alertPurgeInterval, alertPurgeInterval, + TimeUnit.SECONDS); + } + String[] availableIds = TimeZone.getAvailableIDs(); _availableIdsMap = new HashMap(availableIds.length); for (String id : availableIds) { @@ -524,6 +553,42 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe return _eventDao.search(sc, null); } + @Override + public boolean archiveEvents(ArchiveEventsCmd cmd) { + List ids = cmd.getIds(); + boolean result =true; + + List events = _eventDao.listToArchiveOrDeleteEvents(ids, cmd.getType(), cmd.getOlderThan(), cmd.getEntityOwnerId()); + ControlledEntity[] sameOwnerEvents = events.toArray(new ControlledEntity[events.size()]); + _accountMgr.checkAccess(UserContext.current().getCaller(), null, true, sameOwnerEvents); + + if (ids != null && events.size() < ids.size()) { + result = false; + return result; + } + _eventDao.archiveEvents(events); + return result; + } + + @Override + public boolean deleteEvents(DeleteEventsCmd cmd) { + List ids = cmd.getIds(); + boolean result =true; + + List events = _eventDao.listToArchiveOrDeleteEvents(ids, cmd.getType(), cmd.getOlderThan(), cmd.getEntityOwnerId()); + ControlledEntity[] sameOwnerEvents = events.toArray(new ControlledEntity[events.size()]); + _accountMgr.checkAccess(UserContext.current().getCaller(), null, true, sameOwnerEvents); + + if (ids != null && events.size() < ids.size()) { + result = false; + return result; + } + for (EventVO event : events) { + _eventDao.remove(event.getId()); + } + return result; + } + private Date massageDate(Date date, int hourOfDay, int minute, int second) { Calendar cal = Calendar.getInstance(); cal.setTime(date); @@ -1649,10 +1714,25 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe sc.addAnd("type", SearchCriteria.Op.EQ, type); } + sc.addAnd("archived", SearchCriteria.Op.EQ, false); Pair, Integer> result = _alertDao.searchAndCount(sc, searchFilter); return new Pair, Integer>(result.first(), result.second()); } + @Override + public boolean archiveAlerts(ArchiveAlertsCmd cmd) { + Long zoneId = _accountMgr.checkAccessAndSpecifyAuthority(UserContext.current().getCaller(), null); + boolean result = _alertDao.archiveAlert(cmd.getIds(), cmd.getType(), cmd.getOlderThan(), zoneId); + return result; + } + + @Override + public boolean deleteAlerts(DeleteAlertsCmd cmd) { + Long zoneId = _accountMgr.checkAccessAndSpecifyAuthority(UserContext.current().getCaller(), null); + boolean result = _alertDao.deleteAlert(cmd.getIds(), cmd.getType(), cmd.getOlderThan(), zoneId); + return result; + } + @Override public List listTopConsumedResources(ListCapacityCmd cmd) { @@ -2151,6 +2231,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(CreateVMSnapshotCmd.class); cmdList.add(RevertToSnapshotCmd.class); cmdList.add(DeleteVMSnapshotCmd.class); + cmdList.add(AddIpToVmNicCmd.class); + cmdList.add(RemoveIpFromVmNicCmd.class); + cmdList.add(ListNicsCmd.class); + cmdList.add(ArchiveAlertsCmd.class); + cmdList.add(DeleteAlertsCmd.class); + cmdList.add(ArchiveEventsCmd.class); + cmdList.add(DeleteEventsCmd.class); return cmdList; } @@ -2188,6 +2275,39 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } } + protected class AlertPurgeTask implements Runnable { + @Override + public void run() { + try { + GlobalLock lock = GlobalLock.getInternLock("AlertPurge"); + if (lock == null) { + s_logger.debug("Couldn't get the global lock"); + return; + } + if (!lock.lock(30)) { + s_logger.debug("Couldn't lock the db"); + return; + } + try { + final Calendar purgeCal = Calendar.getInstance(); + purgeCal.add(Calendar.DAY_OF_YEAR, - _alertPurgeDelay); + Date purgeTime = purgeCal.getTime(); + s_logger.debug("Deleting alerts older than: " + purgeTime.toString()); + List oldAlerts = _alertDao.listOlderAlerts(purgeTime); + s_logger.debug("Found " + oldAlerts.size() + " events to be purged"); + for (AlertVO alert : oldAlerts) { + _alertDao.expunge(alert.getId()); + } + } catch (Exception e) { + s_logger.error("Exception ", e); + } finally { + lock.unlock(); + } + } catch (Exception e) { + s_logger.error("Exception ", e); + } + } + } @Override public Pair, Integer> searchForStoragePools(Criteria c) { @@ -2518,6 +2638,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe String userPublicTemplateEnabled = _configs.get(Config.AllowPublicUserTemplates.key()); // add some parameters UI needs to handle API throttling + boolean apiLimitEnabled = Boolean.parseBoolean(_configDao.getValue(Config.ApiLimitEnabled.key())); Integer apiLimitInterval = Integer.valueOf(_configDao.getValue(Config.ApiLimitInterval.key())); Integer apiLimitMax = Integer.valueOf(_configDao.getValue(Config.ApiLimitMax.key())); @@ -2529,8 +2650,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe capabilities.put("projectInviteRequired", _projectMgr.projectInviteRequired()); capabilities.put("allowusercreateprojects", _projectMgr.allowUserToCreateProject()); capabilities.put("customDiskOffMaxSize", diskOffMaxSize); - capabilities.put("apiLimitInterval", apiLimitInterval); - capabilities.put("apiLimitMax", apiLimitMax); + if (apiLimitEnabled) { + capabilities.put("apiLimitInterval", apiLimitInterval); + capabilities.put("apiLimitMax", apiLimitMax); + } return capabilities; } @@ -2635,8 +2758,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } long accountId = volume.getAccountId(); - StoragePoolVO srcPool = _poolDao.findById(volume.getPoolId()); - HostVO sserver = _storageMgr.getSecondaryStorageHost(zoneId); + StoragePool srcPool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(volume.getPoolId()); + HostVO sserver = this.templateMgr.getSecondaryStorageHost(zoneId); String secondaryStorageURL = sserver.getStorageUrl(); List extractURLList = _uploadDao.listByTypeUploadStatus(volumeId, Upload.Type.VOLUME, UploadVO.Status.DOWNLOAD_URL_CREATED); @@ -2713,7 +2836,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } } - private String getFormatForPool(StoragePoolVO pool) { + private String getFormatForPool(StoragePool pool) { ClusterVO cluster = ApiDBUtils.findClusterById(pool.getClusterId()); if (cluster.getHypervisorType() == HypervisorType.XenServer) { diff --git a/server/src/com/cloud/server/StatsCollector.java b/server/src/com/cloud/server/StatsCollector.java index be83c188f8b..76bae5b4aca 100755 --- a/server/src/com/cloud/server/StatsCollector.java +++ b/server/src/com/cloud/server/StatsCollector.java @@ -29,8 +29,8 @@ import java.util.concurrent.TimeUnit; import javax.inject.Inject; -import com.cloud.resource.ResourceManager; - +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -49,14 +49,13 @@ import com.cloud.host.HostStats; import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; +import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePoolHostVO; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.StorageStats; import com.cloud.storage.VolumeStats; import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.secondary.SecondaryStorageVmManager; @@ -84,7 +83,7 @@ public class StatsCollector { @Inject private HostDao _hostDao; @Inject private UserVmDao _userVmDao; @Inject private VolumeDao _volsDao; - @Inject private StoragePoolDao _storagePoolDao; + @Inject private PrimaryDataStoreDao _storagePoolDao; @Inject private StorageManager _storageManager; @Inject private StoragePoolHostDao _storagePoolHostDao; @Inject private SecondaryStorageVmManager _ssvmMgr; @@ -301,7 +300,7 @@ public class StatsCollector { GetStorageStatsCommand command = new GetStorageStatsCommand(pool.getUuid(), pool.getPoolType(), pool.getPath()); long poolId = pool.getId(); try { - Answer answer = _storageManager.sendToPool(pool, command); + Answer answer = _storageManager.sendToPool(pool.getId(), command); if (answer != null && answer.getResult()) { storagePoolStats.put(pool.getId(), (StorageStats)answer); diff --git a/server/src/com/cloud/server/auth/DefaultUserAuthenticator.java b/server/src/com/cloud/server/auth/DefaultUserAuthenticator.java index 347b9c0b362..952f724bac9 100644 --- a/server/src/com/cloud/server/auth/DefaultUserAuthenticator.java +++ b/server/src/com/cloud/server/auth/DefaultUserAuthenticator.java @@ -21,15 +21,12 @@ import java.util.Map; import javax.ejb.Local; import javax.naming.ConfigurationException; -import org.springframework.stereotype.Component; - import com.cloud.utils.component.AdapterBase; /** * Use this UserAuthenticator if users are already authenticated outside * */ -@Component @Local(value={UserAuthenticator.class}) public abstract class DefaultUserAuthenticator extends AdapterBase implements UserAuthenticator { private String _name = null; diff --git a/server/src/com/cloud/service/ServiceOfferingVO.java b/server/src/com/cloud/service/ServiceOfferingVO.java index c199a86afd1..7be939c3a15 100755 --- a/server/src/com/cloud/service/ServiceOfferingVO.java +++ b/server/src/com/cloud/service/ServiceOfferingVO.java @@ -53,6 +53,9 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering @Column(name="limit_cpu_use") private boolean limitCpuUse; + @Column(name="is_volatile") + private boolean volatileVm; + @Column(name="host_tag") private String hostTag; @@ -78,11 +81,12 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering this.multicastRateMbps = multicastRateMbps; this.offerHA = offerHA; this.limitCpuUse = false; + this.volatileVm = false; this.default_use = defaultUse; this.vm_type = vm_type == null ? null : vm_type.toString().toLowerCase(); } - public ServiceOfferingVO(String name, int cpu, int ramSize, int speed, Integer rateMbps, Integer multicastRateMbps, boolean offerHA, boolean limitCpuUse, String displayText, boolean useLocalStorage, boolean recreatable, String tags, boolean systemUse, VirtualMachine.Type vm_type, Long domainId) { + public ServiceOfferingVO(String name, int cpu, int ramSize, int speed, Integer rateMbps, Integer multicastRateMbps, boolean offerHA, boolean limitCpuUse, boolean volatileVm, String displayText, boolean useLocalStorage, boolean recreatable, String tags, boolean systemUse, VirtualMachine.Type vm_type, Long domainId) { super(name, displayText, false, tags, recreatable, useLocalStorage, systemUse, true, domainId); this.cpu = cpu; this.ramSize = ramSize; @@ -91,11 +95,12 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering this.multicastRateMbps = multicastRateMbps; this.offerHA = offerHA; this.limitCpuUse = limitCpuUse; + this.volatileVm = volatileVm; this.vm_type = vm_type == null ? null : vm_type.toString().toLowerCase(); } - public ServiceOfferingVO(String name, int cpu, int ramSize, int speed, Integer rateMbps, Integer multicastRateMbps, boolean offerHA, boolean limitResourceUse, String displayText, boolean useLocalStorage, boolean recreatable, String tags, boolean systemUse, VirtualMachine.Type vm_type, Long domainId, String hostTag) { - this(name, cpu, ramSize, speed, rateMbps, multicastRateMbps, offerHA, limitResourceUse, displayText, useLocalStorage, recreatable, tags, systemUse, vm_type, domainId); + public ServiceOfferingVO(String name, int cpu, int ramSize, int speed, Integer rateMbps, Integer multicastRateMbps, boolean offerHA, boolean limitResourceUse, boolean volatileVm, String displayText, boolean useLocalStorage, boolean recreatable, String tags, boolean systemUse, VirtualMachine.Type vm_type, Long domainId, String hostTag) { + this(name, cpu, ramSize, speed, rateMbps, multicastRateMbps, offerHA, limitResourceUse, volatileVm, displayText, useLocalStorage, recreatable, tags, systemUse, vm_type, domainId); this.hostTag = hostTag; } @@ -189,13 +194,18 @@ public class ServiceOfferingVO extends DiskOfferingVO implements ServiceOffering public String getSystemVmType(){ return vm_type; } + + public void setSortKey(int key) { + sortKey = key; + } + + public int getSortKey() { + return sortKey; + } - public void setSortKey(int key) { - sortKey = key; + @Override + public boolean getVolatileVm() { + return volatileVm; } - - public int getSortKey() { - return sortKey; - } - + } diff --git a/server/src/com/cloud/servlet/CloudStartupServlet.java b/server/src/com/cloud/servlet/CloudStartupServlet.java index 46be09387ba..2cabe15e0ac 100755 --- a/server/src/com/cloud/servlet/CloudStartupServlet.java +++ b/server/src/com/cloud/servlet/CloudStartupServlet.java @@ -16,6 +16,9 @@ // under the License. package com.cloud.servlet; +import java.util.Timer; +import java.util.TimerTask; + import javax.servlet.ServletConfig; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; @@ -31,10 +34,22 @@ public class CloudStartupServlet extends HttpServlet { public static final Logger s_logger = Logger.getLogger(CloudStartupServlet.class.getName()); static final long serialVersionUID = SerialVersionUID.CloudStartupServlet; + Timer _timer = new Timer(); + @Override public void init(ServletConfig config) throws ServletException { LogUtils.initLog4j("log4j-cloud.xml"); SpringBeanAutowiringSupport.processInjectionBasedOnServletContext(this, config.getServletContext()); - ComponentContext.initComponentsLifeCycle(); + + // wait when condition is ready for initialization + _timer.scheduleAtFixedRate(new TimerTask() { + @Override + public void run() { + if(ComponentContext.getApplicationContext() != null) { + _timer.cancel(); + ComponentContext.initComponentsLifeCycle(); + } + } + }, 0, 1000); } } diff --git a/server/src/com/cloud/storage/LocalStoragePoolListener.java b/server/src/com/cloud/storage/LocalStoragePoolListener.java index 8d5875e9d76..244f7fbe271 100755 --- a/server/src/com/cloud/storage/LocalStoragePoolListener.java +++ b/server/src/com/cloud/storage/LocalStoragePoolListener.java @@ -16,10 +16,9 @@ // under the License. package com.cloud.storage; -import java.util.List; - import javax.inject.Inject; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.log4j.Logger; import com.cloud.agent.Listener; @@ -30,24 +29,17 @@ import com.cloud.agent.api.Command; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupStorageCommand; import com.cloud.agent.api.StoragePoolInfo; -import com.cloud.capacity.Capacity; -import com.cloud.capacity.CapacityVO; import com.cloud.capacity.dao.CapacityDao; -import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; import com.cloud.exception.ConnectionException; import com.cloud.host.HostVO; import com.cloud.host.Status; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.utils.db.DB; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; public class LocalStoragePoolListener implements Listener { private final static Logger s_logger = Logger.getLogger(LocalStoragePoolListener.class); - @Inject StoragePoolDao _storagePoolDao; + @Inject PrimaryDataStoreDao _storagePoolDao; @Inject StoragePoolHostDao _storagePoolHostDao; @Inject CapacityDao _capacityDao; @Inject StorageManager _storageMgr; @@ -91,63 +83,7 @@ public class LocalStoragePoolListener implements Listener { return; } - DataCenterVO dc = _dcDao.findById(host.getDataCenterId()); - if (dc == null || !dc.isLocalStorageEnabled()) { - return; - } - - try { - StoragePoolVO pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), pInfo.getHost(), pInfo.getHostPath(), pInfo.getUuid()); - if(pool == null && host.getHypervisorType() == HypervisorType.VMware) { - // perform run-time upgrade. In versions prior to 2.2.12, there is a bug that we don't save local datastore info (host path is empty), this will cause us - // not able to distinguish multiple local datastores that may be available on the host, to support smooth migration, we - // need to perform runtime upgrade here - if(pInfo.getHostPath().length() > 0) { - pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), pInfo.getHost(), "", pInfo.getUuid()); - } - } - - if (pool == null) { - - long poolId = _storagePoolDao.getNextInSequence(Long.class, "id"); - String name = cmd.getName() == null ? (host.getName() + " Local Storage") : cmd.getName(); - Transaction txn = Transaction.currentTxn(); - txn.start(); - pool = new StoragePoolVO(poolId, name, pInfo.getUuid(), pInfo.getPoolType(), host.getDataCenterId(), - host.getPodId(), pInfo.getAvailableBytes(), pInfo.getCapacityBytes(), pInfo.getHost(), 0, - pInfo.getHostPath()); - pool.setClusterId(host.getClusterId()); - pool.setStatus(StoragePoolStatus.Up); - _storagePoolDao.persist(pool, pInfo.getDetails()); - StoragePoolHostVO poolHost = new StoragePoolHostVO(pool.getId(), host.getId(), pInfo.getLocalPath()); - _storagePoolHostDao.persist(poolHost); - _storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getCapacityBytes() - pool.getAvailableBytes()); - - txn.commit(); - } else { - Transaction txn = Transaction.currentTxn(); - txn.start(); - pool.setPath(pInfo.getHostPath()); - pool.setAvailableBytes(pInfo.getAvailableBytes()); - pool.setCapacityBytes(pInfo.getCapacityBytes()); - _storagePoolDao.update(pool.getId(), pool); - if (pInfo.getDetails() != null) { - _storagePoolDao.updateDetails(pool.getId(), pInfo.getDetails()); - } - StoragePoolHostVO poolHost = _storagePoolHostDao.findByPoolHost(pool.getId(), host.getId()); - if (poolHost == null) { - poolHost = new StoragePoolHostVO(pool.getId(), host.getId(), pInfo.getLocalPath()); - _storagePoolHostDao.persist(poolHost); - } - - _storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getCapacityBytes() - pool.getAvailableBytes()); - - txn.commit(); - } - } catch (Exception e) { - s_logger.warn("Unable to setup the local storage pool for " + host, e); - throw new ConnectionException(true, "Unable to setup the local storage pool for " + host, e); - } + this._storageMgr.createLocalStorage(host, pInfo); } diff --git a/server/src/com/cloud/storage/OCFS2ManagerImpl.java b/server/src/com/cloud/storage/OCFS2ManagerImpl.java index 6bbeec40551..476bf04cae9 100755 --- a/server/src/com/cloud/storage/OCFS2ManagerImpl.java +++ b/server/src/com/cloud/storage/OCFS2ManagerImpl.java @@ -25,6 +25,8 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -42,7 +44,6 @@ import com.cloud.resource.ResourceListener; import com.cloud.resource.ResourceManager; import com.cloud.resource.ServerResource; import com.cloud.storage.Storage.StoragePoolType; -import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.utils.Ternary; import com.cloud.utils.component.ManagerBase; @@ -62,7 +63,7 @@ public class OCFS2ManagerImpl extends ManagerBase implements OCFS2Manager, Resou @Inject ClusterDao _clusterDao; @Inject ResourceManager _resourceMgr; @Inject StoragePoolHostDao _poolHostDao; - @Inject StoragePoolDao _poolDao; + @Inject PrimaryDataStoreDao _poolDao; @Override public boolean configure(String name, Map params) throws ConfigurationException { diff --git a/server/src/com/cloud/storage/RegisterVolumePayload.java b/server/src/com/cloud/storage/RegisterVolumePayload.java new file mode 100644 index 00000000000..142de186e25 --- /dev/null +++ b/server/src/com/cloud/storage/RegisterVolumePayload.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.storage; + +public class RegisterVolumePayload { + private final String url; + private final String checksum; + private final String format; + + public RegisterVolumePayload(String url, String checksum, String format) { + this.url = url; + this.checksum = checksum; + this.format = format; + } + + public String getUrl() { + return this.url; + } + + public String getChecksum() { + return this.checksum; + } + + public String getFormat() { + return this.format; + } +} diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/AssociateUcsProfileToBladesInClusterResponse.java b/server/src/com/cloud/storage/ResizeVolumePayload.java similarity index 66% rename from plugins/hypervisors/ucs/src/com/cloud/ucs/manager/AssociateUcsProfileToBladesInClusterResponse.java rename to server/src/com/cloud/storage/ResizeVolumePayload.java index f6cb0a6e34c..205fafa4bb0 100644 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/AssociateUcsProfileToBladesInClusterResponse.java +++ b/server/src/com/cloud/storage/ResizeVolumePayload.java @@ -14,9 +14,18 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package com.cloud.ucs.manager; - -import org.apache.cloudstack.api.BaseResponse; - -public class AssociateUcsProfileToBladesInClusterResponse extends BaseResponse { -} + +package com.cloud.storage; + +public class ResizeVolumePayload { + public final Long newSize; + public final boolean shrinkOk; + public final String instanceName; + public final long[] hosts; + public ResizeVolumePayload(Long newSize, boolean shrinkOk, String instanceName, long[] hosts) { + this.newSize = newSize; + this.shrinkOk = shrinkOk; + this.instanceName = instanceName; + this.hosts = hosts; + } +} diff --git a/server/src/com/cloud/storage/StorageManager.java b/server/src/com/cloud/storage/StorageManager.java index 97853ac76de..9213b4bf486 100755 --- a/server/src/com/cloud/storage/StorageManager.java +++ b/server/src/com/cloud/storage/StorageManager.java @@ -17,50 +17,29 @@ package com.cloud.storage; import java.util.List; +import java.util.Set; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; +import com.cloud.agent.api.StoragePoolInfo; import com.cloud.agent.manager.Commands; import com.cloud.capacity.CapacityVO; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; -import com.cloud.deploy.DeployDestination; -import com.cloud.exception.ConcurrentOperationException; -import com.cloud.exception.InsufficientStorageCapacityException; +import com.cloud.exception.ConnectionException; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.Host; -import com.cloud.host.HostVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.service.ServiceOfferingVO; import com.cloud.storage.Storage.ImageFormat; -import com.cloud.storage.Volume.Event; -import com.cloud.storage.Volume.Type; -import com.cloud.user.Account; import com.cloud.utils.Pair; -import com.cloud.utils.component.Manager; -import com.cloud.utils.fsm.NoTransitionException; import com.cloud.vm.DiskProfile; import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachineProfile; - -public interface StorageManager extends StorageService, Manager { - boolean canVmRestartOnAnotherServer(long vmId); - - /** Returns the absolute path of the specified ISO - * @param templateId - the ID of the template that represents the ISO - * @param datacenterId - * @return absolute ISO path - */ - public Pair getAbsoluteIsoPath(long templateId, long dataCenterId); - - /** - * Returns the URL of the secondary storage host - * @param zoneId - * @return URL - */ - public String getSecondaryStorageURL(long zoneId); +public interface StorageManager extends StorageService { /** * Returns a comma separated list of tags for the specified storage pool * @param poolId @@ -68,67 +47,9 @@ public interface StorageManager extends StorageService, Manager { */ public String getStoragePoolTags(long poolId); - /** - * Returns the secondary storage host - * @param zoneId - * @return secondary storage host - */ - public HostVO getSecondaryStorageHost(long zoneId); + - /** - * Returns the secondary storage host - * @param zoneId - * @return secondary storage host - */ - public VMTemplateHostVO findVmTemplateHost(long templateId, StoragePool pool); - /** - * Moves a volume from its current storage pool to a storage pool with enough capacity in the specified zone, pod, or cluster - * @param volume - * @param destPoolDcId - * @param destPoolPodId - * @param destPoolClusterId - * @return VolumeVO - * @throws ConcurrentOperationException - */ - VolumeVO moveVolume(VolumeVO volume, long destPoolDcId, Long destPoolPodId, Long destPoolClusterId, HypervisorType dataDiskHyperType) throws ConcurrentOperationException; - - /** - * Create a volume based on the given criteria - * @param volume - * @param vm - * @param template - * @param dc - * @param pod - * @param clusterId - * @param offering - * @param diskOffering - * @param avoids - * @param size - * @param hyperType - * @return volume VO if success, null otherwise - */ - VolumeVO createVolume(VolumeVO volume, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, Long clusterId, - ServiceOfferingVO offering, DiskOfferingVO diskOffering, List avoids, long size, HypervisorType hyperType); - - /** - * Marks the specified volume as destroyed in the management server database. The expunge thread will delete the volume from its storage pool. - * @param volume - * @return - */ - boolean destroyVolume(VolumeVO volume) throws ConcurrentOperationException; - - /** Create capacity entries in the op capacity table - * @param storagePool - */ - public void createCapacityEntry(StoragePoolVO storagePool); - - /** - * Checks that the volume is stored on a shared storage pool - * @param volume - * @return true if the volume is on a shared storage pool, false otherwise - */ - boolean volumeOnSharedStoragePool(VolumeVO volume); Answer sendToPool(long poolId, Command cmd) throws StorageUnavailableException; Answer sendToPool(StoragePool pool, Command cmd) throws StorageUnavailableException; @@ -137,17 +58,6 @@ public interface StorageManager extends StorageService, Manager { Pair sendToPool(StoragePool pool, long[] hostIdsToTryFirst, List hostIdsToAvoid, Commands cmds) throws StorageUnavailableException; Pair sendToPool(StoragePool pool, long[] hostIdsToTryFirst, List hostIdsToAvoid, Command cmd) throws StorageUnavailableException; - /** - * Checks that one of the following is true: - * 1. The volume is not attached to any VM - * 2. The volume is attached to a VM that is running on a host with the KVM hypervisor, and the VM is stopped - * 3. The volume is attached to a VM that is running on a host with the XenServer hypervisor (the VM can be stopped or running) - * @return true if one of the above conditions is true - */ - boolean volumeInactive(VolumeVO volume); - - String getVmNameOnVolume(VolumeVO volume); - /** * Checks if a host has running VMs that are using its local storage pool. * @return true if local storage is active on the host @@ -162,31 +72,10 @@ public interface StorageManager extends StorageService, Manager { String getPrimaryStorageNameLabel(VolumeVO volume); - /** - * Allocates one volume. - * @param - * @param type - * @param offering - * @param name - * @param size - * @param template - * @param vm - * @param account - * @return VolumeVO a persisted volume. - */ - DiskProfile allocateRawVolume(Type type, String name, DiskOfferingVO offering, Long size, T vm, Account owner); - DiskProfile allocateTemplatedVolume(Type type, String name, DiskOfferingVO offering, VMTemplateVO template, T vm, Account owner); void createCapacityEntry(StoragePoolVO storagePool, short capacityType, long allocated); - void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException; - - void release(VirtualMachineProfile profile); - - void cleanupVolumes(long vmId) throws ConcurrentOperationException; - - void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest); Answer sendToPool(StoragePool pool, long[] hostIdsToTryFirst, Command cmd) throws StorageUnavailableException; @@ -194,14 +83,6 @@ public interface StorageManager extends StorageService, Manager { CapacityVO getStoragePoolUsedStats(Long poolId, Long clusterId, Long podId, Long zoneId); - boolean createStoragePool(long hostId, StoragePoolVO pool); - - boolean delPoolFromHost(long hostId); - - HostVO getSecondaryStorageHost(long zoneId, long tmpltId); - - List getSecondaryStorageHosts(long zoneId); - List ListByDataCenterHypervisor(long datacenterId, HypervisorType type); @@ -209,34 +90,34 @@ public interface StorageManager extends StorageService, Manager { StoragePoolVO findLocalStorageOnHost(long hostId); - VMTemplateHostVO getTemplateHostRef(long zoneId, long tmpltId, boolean readyOnly); - - boolean StorageMigration( - VirtualMachineProfile vm, - StoragePool destPool) throws ConcurrentOperationException; - - boolean stateTransitTo(Volume vol, Event event) - throws NoTransitionException; - - VolumeVO allocateDuplicateVolume(VolumeVO oldVol, Long templateId); - Host updateSecondaryStorage(long secStorageId, String newUrl); List getUpHostsInPool(long poolId); void cleanupSecondaryStorage(boolean recurring); - VolumeVO copyVolumeFromSecToPrimary(VolumeVO volume, VMInstanceVO vm, - VMTemplateVO template, DataCenterVO dc, HostPodVO pod, - Long clusterId, ServiceOfferingVO offering, - DiskOfferingVO diskOffering, List avoids, long size, - HypervisorType hyperType) throws NoTransitionException; - - String getSupportedImageFormatForCluster(Long clusterId); HypervisorType getHypervisorTypeFromFormat(ImageFormat format); boolean storagePoolHasEnoughSpace(List volume, StoragePool pool); - boolean deleteVolume(long volumeId, Account caller) throws ConcurrentOperationException; + + boolean registerHostListener(String providerUuid, HypervisorHostListener listener); + + StoragePool findStoragePool(DiskProfile dskCh, DataCenterVO dc, + HostPodVO pod, Long clusterId, Long hostId, VMInstanceVO vm, + Set avoid); + + + void connectHostToSharedPool(long hostId, long poolId) + throws StorageUnavailableException; + + void createCapacityEntry(long poolId); + + + + + + DataStore createLocalStorage(Host host, StoragePoolInfo poolInfo) throws ConnectionException; + } diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index 05e0cfe9869..b0a1da14eb8 100755 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -17,8 +17,6 @@ package com.cloud.storage; import java.math.BigDecimal; -import java.net.Inet6Address; -import java.net.InetAddress; import java.net.URI; import java.net.URISyntaxException; import java.net.UnknownHostException; @@ -27,15 +25,13 @@ import java.sql.ResultSet; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.Date; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; -import java.util.UUID; +import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -48,17 +44,40 @@ import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaint import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; -import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; -import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; -import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreStatus; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; -import com.cloud.agent.api.*; -import com.cloud.agent.api.storage.*; -import com.cloud.agent.api.to.StorageFilerTO; -import com.cloud.agent.api.to.VolumeTO; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.BackupSnapshotCommand; +import com.cloud.agent.api.CleanupSnapshotBackupCommand; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.ManageSnapshotCommand; +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.agent.api.storage.DeleteTemplateCommand; +import com.cloud.agent.api.storage.DeleteVolumeCommand; import com.cloud.agent.manager.Commands; import com.cloud.alert.AlertManager; import com.cloud.api.ApiDBUtils; @@ -72,46 +91,57 @@ import com.cloud.cluster.ClusterManagerListener; import com.cloud.cluster.ManagementServerHostVO; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; -import com.cloud.configuration.Resource.ResourceType; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.consoleproxy.ConsoleProxyManager; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; -import com.cloud.dc.Pod; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; -import com.cloud.deploy.DeployDestination; -import com.cloud.domain.Domain; +import com.cloud.deploy.DataCenterDeployment; +import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.domain.dao.DomainDao; -import com.cloud.event.ActionEvent; -import com.cloud.event.EventTypes; -import com.cloud.event.UsageEventUtils; import com.cloud.event.dao.EventDao; -import com.cloud.exception.*; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.ConnectionException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.exception.ResourceInUseException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.StorageUnavailableException; import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorGuruManager; +import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.network.NetworkModel; -import com.cloud.offering.ServiceOffering; import com.cloud.org.Grouping; import com.cloud.org.Grouping.AllocationState; import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; import com.cloud.server.ManagementServer; import com.cloud.server.StatsCollector; -import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.StoragePoolType; -import com.cloud.storage.Volume.Event; import com.cloud.storage.Volume.Type; -import com.cloud.storage.allocator.StoragePoolAllocator; -import com.cloud.storage.dao.*; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.SnapshotPolicyDao; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.StoragePoolWorkDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateHostDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VMTemplateS3Dao; +import com.cloud.storage.dao.VMTemplateSwiftDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeHostDao; import com.cloud.storage.download.DownloadMonitor; import com.cloud.storage.listener.StoragePoolMonitor; import com.cloud.storage.listener.VolumeStateListener; @@ -121,28 +151,41 @@ import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.storage.snapshot.SnapshotScheduler; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.template.TemplateManager; -import com.cloud.user.*; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.ResourceLimitService; +import com.cloud.user.User; +import com.cloud.user.UserContext; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; -import com.cloud.uservm.UserVm; -import com.cloud.utils.EnumUtils; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.UriUtils; import com.cloud.utils.component.ComponentContext; -import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; -import com.cloud.utils.db.*; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.JoinBuilder.JoinType; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.exception.ExecutionException; -import com.cloud.utils.fsm.NoTransitionException; -import com.cloud.utils.fsm.StateMachine2; -import com.cloud.vm.*; +import com.cloud.vm.DiskProfile; +import com.cloud.vm.UserVmManager; +import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine.State; -import com.cloud.vm.dao.*; +import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.VirtualMachineProfileImpl; +import com.cloud.vm.dao.ConsoleProxyDao; +import com.cloud.vm.dao.DomainRouterDao; +import com.cloud.vm.dao.SecondaryStorageVmDao; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; @Component @Local(value = { StorageManager.class, StorageService.class }) @@ -157,22 +200,10 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Inject protected TemplateManager _tmpltMgr; @Inject - protected AsyncJobManager _asyncMgr; - @Inject - protected SnapshotManager _snapshotMgr; - @Inject - protected SnapshotScheduler _snapshotScheduler; - @Inject protected AccountManager _accountMgr; @Inject protected ConfigurationManager _configMgr; @Inject - protected ConsoleProxyManager _consoleProxyMgr; - @Inject - protected SecondaryStorageVmManager _secStorageMgr; - @Inject - protected NetworkModel _networkMgr; - @Inject protected VolumeDao _volsDao; @Inject protected HostDao _hostDao; @@ -209,7 +240,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Inject protected VMInstanceDao _vmInstanceDao; @Inject - protected StoragePoolDao _storagePoolDao = null; + protected PrimaryDataStoreDao _storagePoolDao = null; @Inject protected CapacityDao _capacityDao; @Inject @@ -237,39 +268,39 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Inject protected ClusterDao _clusterDao; @Inject - protected VirtualMachineManager _vmMgr; - @Inject - protected DomainRouterDao _domrDao; - @Inject - protected SecondaryStorageVmDao _secStrgDao; - @Inject protected StoragePoolWorkDao _storagePoolWorkDao; @Inject protected HypervisorGuruManager _hvGuruMgr; @Inject protected VolumeDao _volumeDao; @Inject - protected OCFS2Manager _ocfs2Mgr; - @Inject - protected ResourceLimitService _resourceLimitMgr; - @Inject protected SecondaryStorageVmManager _ssvmMgr; @Inject - protected ResourceManager _resourceMgr; - @Inject - protected DownloadMonitor _downloadMonitor; - @Inject - protected ResourceTagDao _resourceTagDao; - @Inject protected List _storagePoolAllocators; - @Inject ConfigurationDao _configDao; - @Inject ManagementServer _msServer; + @Inject + ConfigurationDao _configDao; + @Inject + ManagementServer _msServer; + @Inject + DataStoreManager dataStoreMgr; + @Inject + DataStoreProviderManager dataStoreProviderMgr; + @Inject + VolumeService volService; + @Inject + VolumeDataFactory volFactory; + @Inject + ImageDataFactory tmplFactory; + @Inject + SnapshotDataFactory snapshotFactory; + @Inject + protected HypervisorCapabilitiesDao _hypervisorCapabilitiesDao; - // TODO : we don't have any instantiated pool discover, disable injection temporarily + // TODO : we don't have any instantiated pool discover, disable injection + // temporarily // @Inject protected List _discoverers; - protected SearchBuilder HostTemplateStatesSearch; protected GenericSearchBuilder UpHostsInPoolSearch; protected SearchBuilder StoragePoolSearch; @@ -288,32 +319,39 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C protected BigDecimal _overProvisioningFactor = new BigDecimal(1); private long _maxVolumeSizeInGb; private long _serverId; - private final StateMachine2 _volStateMachine; + private int _customDiskOfferingMinSize = 1; private int _customDiskOfferingMaxSize = 1024; private double _storageUsedThreshold = 1.0d; private double _storageAllocatedThreshold = 1.0d; protected BigDecimal _storageOverprovisioningFactor = new BigDecimal(1); + private Map hostListeners = new HashMap(); private boolean _recreateSystemVmEnabled; - public boolean share(VMInstanceVO vm, List vols, HostVO host, boolean cancelPreviousShare) throws StorageUnavailableException { + public boolean share(VMInstanceVO vm, List vols, HostVO host, + boolean cancelPreviousShare) throws StorageUnavailableException { // if pool is in maintenance and it is the ONLY pool available; reject - List rootVolForGivenVm = _volsDao.findByInstanceAndType(vm.getId(), Type.ROOT); + List rootVolForGivenVm = _volsDao.findByInstanceAndType( + vm.getId(), Type.ROOT); if (rootVolForGivenVm != null && rootVolForGivenVm.size() > 0) { - boolean isPoolAvailable = isPoolAvailable(rootVolForGivenVm.get(0).getPoolId()); + boolean isPoolAvailable = isPoolAvailable(rootVolForGivenVm.get(0) + .getPoolId()); if (!isPoolAvailable) { - throw new StorageUnavailableException("Can not share " + vm, rootVolForGivenVm.get(0).getPoolId()); + throw new StorageUnavailableException("Can not share " + vm, + rootVolForGivenVm.get(0).getPoolId()); } } // this check is done for maintenance mode for primary storage // if any one of the volume is unusable, we return false - // if we return false, the allocator will try to switch to another PS if available + // if we return false, the allocator will try to switch to another PS if + // available for (VolumeVO vol : vols) { if (vol.getRemoved() != null) { - s_logger.warn("Volume id:" + vol.getId() + " is removed, cannot share on this instance"); + s_logger.warn("Volume id:" + vol.getId() + + " is removed, cannot share on this instance"); // not ok to share return false; } @@ -323,26 +361,15 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return true; } - @Override - public VolumeVO allocateDuplicateVolume(VolumeVO oldVol, Long templateId) { - VolumeVO newVol = new VolumeVO(oldVol.getVolumeType(), oldVol.getName(), oldVol.getDataCenterId(), oldVol.getDomainId(), oldVol.getAccountId(), oldVol.getDiskOfferingId(), oldVol.getSize()); - if (templateId != null) { - newVol.setTemplateId(templateId); - } else { - newVol.setTemplateId(oldVol.getTemplateId()); - } - newVol.setDeviceId(oldVol.getDeviceId()); - newVol.setInstanceId(oldVol.getInstanceId()); - newVol.setRecreatable(oldVol.isRecreatable()); - return _volsDao.persist(newVol); - } - private boolean isPoolAvailable(Long poolId) { // get list of all pools List pools = _storagePoolDao.listAll(); // if no pools or 1 pool which is in maintenance - if (pools == null || pools.size() == 0 || (pools.size() == 1 && pools.get(0).getStatus().equals(StoragePoolStatus.Maintenance))) { + if (pools == null + || pools.size() == 0 + || (pools.size() == 1 && pools.get(0).getStatus() + .equals(DataStoreStatus.Maintenance))) { return false; } else { return true; @@ -350,8 +377,10 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } @Override - public List ListByDataCenterHypervisor(long datacenterId, HypervisorType type) { - List pools = _storagePoolDao.listByDataCenterId(datacenterId); + public List ListByDataCenterHypervisor( + long datacenterId, HypervisorType type) { + List pools = _storagePoolDao + .listByDataCenterId(datacenterId); List retPools = new ArrayList(); for (StoragePoolVO pool : pools) { if (pool.getStatus() != StoragePoolStatus.Up) { @@ -368,21 +397,33 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override public boolean isLocalStorageActiveOnHost(Long hostId) { - List storagePoolHostRefs = _storagePoolHostDao.listByHostId(hostId); + List storagePoolHostRefs = _storagePoolHostDao + .listByHostId(hostId); for (StoragePoolHostVO storagePoolHostRef : storagePoolHostRefs) { - StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolHostRef.getPoolId()); - if (storagePool.getPoolType() == StoragePoolType.LVM || storagePool.getPoolType() == StoragePoolType.EXT) { - SearchBuilder volumeSB = _volsDao.createSearchBuilder(); - volumeSB.and("poolId", volumeSB.entity().getPoolId(), SearchCriteria.Op.EQ); - volumeSB.and("removed", volumeSB.entity().getRemoved(), SearchCriteria.Op.NULL); + StoragePoolVO PrimaryDataStoreVO = _storagePoolDao + .findById(storagePoolHostRef.getPoolId()); + if (PrimaryDataStoreVO.getPoolType() == StoragePoolType.LVM + || PrimaryDataStoreVO.getPoolType() == StoragePoolType.EXT) { + SearchBuilder volumeSB = _volsDao + .createSearchBuilder(); + volumeSB.and("poolId", volumeSB.entity().getPoolId(), + SearchCriteria.Op.EQ); + volumeSB.and("removed", volumeSB.entity().getRemoved(), + SearchCriteria.Op.NULL); - SearchBuilder activeVmSB = _vmInstanceDao.createSearchBuilder(); - activeVmSB.and("state", activeVmSB.entity().getState(), SearchCriteria.Op.IN); - volumeSB.join("activeVmSB", activeVmSB, volumeSB.entity().getInstanceId(), activeVmSB.entity().getId(), JoinBuilder.JoinType.INNER); + SearchBuilder activeVmSB = _vmInstanceDao + .createSearchBuilder(); + activeVmSB.and("state", activeVmSB.entity().getState(), + SearchCriteria.Op.IN); + volumeSB.join("activeVmSB", activeVmSB, volumeSB.entity() + .getInstanceId(), activeVmSB.entity().getId(), + JoinBuilder.JoinType.INNER); SearchCriteria volumeSC = volumeSB.create(); - volumeSC.setParameters("poolId", storagePool.getId()); - volumeSC.setJoinParameters("activeVmSB", "state", State.Starting, State.Running, State.Stopping, State.Migrating); + volumeSC.setParameters("poolId", PrimaryDataStoreVO.getId()); + volumeSC.setJoinParameters("activeVmSB", "state", + State.Starting, State.Running, State.Stopping, + State.Migrating); List volumes = _volsDao.search(volumeSC, null); if (volumes.size() > 0) { @@ -394,26 +435,40 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return false; } - protected StoragePoolVO findStoragePool(DiskProfile dskCh, final DataCenterVO dc, HostPodVO pod, Long clusterId, Long hostId, VMInstanceVO vm, final Set avoid) { + @Override + public StoragePool findStoragePool(DiskProfile dskCh, + final DataCenterVO dc, HostPodVO pod, Long clusterId, Long hostId, + VMInstanceVO vm, final Set avoid) { - VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); + VirtualMachineProfile profile = new VirtualMachineProfileImpl( + vm); for (StoragePoolAllocator allocator : _storagePoolAllocators) { - final List poolList = allocator.allocateToPool(dskCh, profile, dc.getId(), pod.getId(), clusterId, hostId, avoid, 1); - if (poolList != null && !poolList.isEmpty()) { - return (StoragePoolVO) poolList.get(0); - } + + ExcludeList avoidList = new ExcludeList(); + for(StoragePool pool : avoid){ + avoidList.addPool(pool.getId()); + } + DataCenterDeployment plan = new DataCenterDeployment(dc.getId(), pod.getId(), clusterId, hostId, null, null); + + final List poolList = allocator.allocateToPool(dskCh, profile, plan, avoidList, 1); + if (poolList != null && !poolList.isEmpty()) { + return (StoragePool)this.dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary); + } } return null; } @Override - public Answer[] sendToPool(StoragePool pool, Commands cmds) throws StorageUnavailableException { + public Answer[] sendToPool(StoragePool pool, Commands cmds) + throws StorageUnavailableException { return sendToPool(pool, null, null, cmds).second(); } @Override - public Answer sendToPool(StoragePool pool, long[] hostIdsToTryFirst, Command cmd) throws StorageUnavailableException { - Answer[] answers = sendToPool(pool, hostIdsToTryFirst, null, new Commands(cmd)).second(); + public Answer sendToPool(StoragePool pool, long[] hostIdsToTryFirst, + Command cmd) throws StorageUnavailableException { + Answer[] answers = sendToPool(pool, hostIdsToTryFirst, null, + new Commands(cmd)).second(); if (answers == null) { return null; } @@ -421,7 +476,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } @Override - public Answer sendToPool(StoragePool pool, Command cmd) throws StorageUnavailableException { + public Answer sendToPool(StoragePool pool, Command cmd) + throws StorageUnavailableException { Answer[] answers = sendToPool(pool, new Commands(cmd)); if (answers == null) { return null; @@ -429,439 +485,27 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return answers[0]; } - @Override - public Answer sendToPool(long poolId, Command cmd) throws StorageUnavailableException { - StoragePool pool = _storagePoolDao.findById(poolId); - return sendToPool(pool, cmd); - } - - @Override - public Answer[] sendToPool(long poolId, Commands cmds) throws StorageUnavailableException { - StoragePool pool = _storagePoolDao.findById(poolId); - return sendToPool(pool, cmds); - } - - protected DiskProfile createDiskCharacteristics(VolumeVO volume, VMTemplateVO template, DataCenterVO dc, DiskOfferingVO diskOffering) { - if (volume.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat()) { - SearchCriteria sc = HostTemplateStatesSearch.create(); - sc.setParameters("id", template.getId()); - sc.setParameters("state", com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOADED); - sc.setJoinParameters("host", "dcId", dc.getId()); - - List sss = _vmTemplateHostDao.search(sc, null); - if (sss.size() == 0) { - throw new CloudRuntimeException("Template " + template.getName() + " has not been completely downloaded to zone " + dc.getId()); - } - VMTemplateHostVO ss = sss.get(0); - - return new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), ss.getSize(), diskOffering.getTagsArray(), diskOffering.getUseLocalStorage(), - diskOffering.isRecreatable(), Storage.ImageFormat.ISO != template.getFormat() ? template.getId() : null); - } else { - return new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), diskOffering.getDiskSize(), diskOffering.getTagsArray(), - diskOffering.getUseLocalStorage(), diskOffering.isRecreatable(), null); - } - } - - @Override - public boolean canVmRestartOnAnotherServer(long vmId) { - List vols = _volsDao.findCreatedByInstance(vmId); - for (VolumeVO vol : vols) { - if (!vol.isRecreatable() && !vol.getPoolType().isShared()) { - return false; - } - } - return true; - } - - @DB - protected Pair createVolumeFromSnapshot(VolumeVO volume, SnapshotVO snapshot) { - VolumeVO createdVolume = null; - Long volumeId = volume.getId(); - - String volumeFolder = null; - - try { - stateTransitTo(volume, Volume.Event.CreateRequested); - } catch (NoTransitionException e) { - s_logger.debug(e.toString()); - return null; - } - // Create the Volume object and save it so that we can return it to the user - Account account = _accountDao.findById(volume.getAccountId()); - - final HashSet poolsToAvoid = new HashSet(); - StoragePoolVO pool = null; - boolean success = false; - Set podsToAvoid = new HashSet(); - Pair pod = null; - String volumeUUID = null; - String details = null; - - DiskOfferingVO diskOffering = _diskOfferingDao.findByIdIncludingRemoved(volume.getDiskOfferingId()); - DataCenterVO dc = _dcDao.findById(volume.getDataCenterId()); - DiskProfile dskCh = new DiskProfile(volume, diskOffering, snapshot.getHypervisorType()); - - int retry = 0; - // Determine what pod to store the volume in - while ((pod = _resourceMgr.findPod(null, null, dc, account.getId(), podsToAvoid)) != null) { - podsToAvoid.add(pod.first().getId()); - // Determine what storage pool to store the volume in - while ((pool = findStoragePool(dskCh, dc, pod.first(), null, null, null, poolsToAvoid)) != null) { - poolsToAvoid.add(pool); - volumeFolder = pool.getPath(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Attempting to create volume from snapshotId: " + snapshot.getId() + " on storage pool " + pool.getName()); - } - - // Get the newly created VDI from the snapshot. - // This will return a null volumePath if it could not be created - Pair volumeDetails = createVDIFromSnapshot(UserContext.current().getCallerUserId(), snapshot, pool); - - volumeUUID = volumeDetails.first(); - details = volumeDetails.second(); - - if (volumeUUID != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Volume with UUID " + volumeUUID + " was created on storage pool " + pool.getName()); - } - success = true; - break; // break out of the "find storage pool" loop - } else { - retry++; - if (retry >= 3) { - _volsDao.expunge(volumeId); - String msg = "Unable to create volume from snapshot " + snapshot.getId() + " after retrying 3 times, due to " + details; - s_logger.debug(msg); - throw new CloudRuntimeException(msg); - - } - } - s_logger.warn("Unable to create volume on pool " + pool.getName() + ", reason: " + details); - } - - if (success) { - break; // break out of the "find pod" loop - } - } - - if (!success) { - _volsDao.expunge(volumeId); - String msg = "Unable to create volume from snapshot " + snapshot.getId() + " due to " + details; - s_logger.debug(msg); - throw new CloudRuntimeException(msg); - - } - - createdVolume = _volsDao.findById(volumeId); - - try { - if (success) { - createdVolume.setPodId(pod.first().getId()); - createdVolume.setPoolId(pool.getId()); - createdVolume.setPoolType(pool.getPoolType()); - createdVolume.setFolder(volumeFolder); - createdVolume.setPath(volumeUUID); - createdVolume.setDomainId(account.getDomainId()); - stateTransitTo(createdVolume, Volume.Event.OperationSucceeded); - } - } catch (NoTransitionException e) { - s_logger.debug("Failed to update volume state: " + e.toString()); - return null; - } - - return new Pair(createdVolume, details); - } - - @Override - public boolean stateTransitTo(Volume vol, Volume.Event event) throws NoTransitionException { - return _volStateMachine.transitTo(vol, event, null, _volsDao); - } - - protected VolumeVO createVolumeFromSnapshot(VolumeVO volume, long snapshotId) { - - // By default, assume failure. - VolumeVO createdVolume = null; - SnapshotVO snapshot = _snapshotDao.findById(snapshotId); // Precondition: snapshot is not null and not removed. - - Pair volumeDetails = createVolumeFromSnapshot(volume, snapshot); - if (volumeDetails != null) { - createdVolume = volumeDetails.first(); - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, createdVolume.getAccountId(), - createdVolume.getDataCenterId(), createdVolume.getId(), createdVolume.getName(), createdVolume.getDiskOfferingId(), - null, createdVolume.getSize(), Volume.class.getName(), createdVolume.getUuid()); - } - return createdVolume; - } - - protected Pair createVDIFromSnapshot(long userId, SnapshotVO snapshot, StoragePoolVO pool) { - String vdiUUID = null; - Long snapshotId = snapshot.getId(); - Long volumeId = snapshot.getVolumeId(); - Long dcId = snapshot.getDataCenterId(); - String secondaryStoragePoolUrl = _snapMgr.getSecondaryStorageURL(snapshot); - long accountId = snapshot.getAccountId(); - - String backedUpSnapshotUuid = snapshot.getBackupSnapshotId(); - snapshot = _snapshotDao.findById(snapshotId); - if (snapshot.getVersion().trim().equals("2.1")) { - VolumeVO volume = _volsDao.findByIdIncludingRemoved(volumeId); - if (volume == null) { - throw new CloudRuntimeException("failed to upgrade snapshot " + snapshotId + " due to unable to find orignal volume:" + volumeId + ", try it later "); - } - if (volume.getTemplateId() == null) { - _snapshotDao.updateSnapshotVersion(volumeId, "2.1", "2.2"); - } else { - VMTemplateVO template = _templateDao.findByIdIncludingRemoved(volume.getTemplateId()); - if (template == null) { - throw new CloudRuntimeException("failed to upgrade snapshot " + snapshotId + " due to unalbe to find orignal template :" + volume.getTemplateId() + ", try it later "); - } - Long templateId = template.getId(); - Long tmpltAccountId = template.getAccountId(); - if (!_snapshotDao.lockInLockTable(snapshotId.toString(), 10)) { - throw new CloudRuntimeException("failed to upgrade snapshot " + snapshotId + " due to this snapshot is being used, try it later "); - } - UpgradeSnapshotCommand cmd = new UpgradeSnapshotCommand(null, secondaryStoragePoolUrl, dcId, accountId, volumeId, templateId, tmpltAccountId, null, snapshot.getBackupSnapshotId(), - snapshot.getName(), "2.1"); - Answer answer = null; - try { - answer = sendToPool(pool, cmd); - } catch (StorageUnavailableException e) { - } finally { - _snapshotDao.unlockFromLockTable(snapshotId.toString()); - } - if ((answer != null) && answer.getResult()) { - _snapshotDao.updateSnapshotVersion(volumeId, "2.1", "2.2"); - } else { - return new Pair(null, "Unable to upgrade snapshot from 2.1 to 2.2 for " + snapshot.getId()); - } - } - } - String basicErrMsg = "Failed to create volume from " + snapshot.getName() + " on pool " + pool; - try { - if (snapshot.getSwiftId() != null && snapshot.getSwiftId() != 0) { - _snapshotMgr.downloadSnapshotsFromSwift(snapshot); - } else if (snapshot.getS3Id() != null && snapshot.getS3Id() != 0) { - _snapshotMgr.downloadSnapshotsFromS3(snapshot); - } - CreateVolumeFromSnapshotCommand createVolumeFromSnapshotCommand = new CreateVolumeFromSnapshotCommand(pool, secondaryStoragePoolUrl, dcId, accountId, volumeId, - backedUpSnapshotUuid, snapshot.getName(), _createVolumeFromSnapshotWait); - CreateVolumeFromSnapshotAnswer answer; - if (!_snapshotDao.lockInLockTable(snapshotId.toString(), 10)) { - throw new CloudRuntimeException("failed to create volume from " + snapshotId + " due to this snapshot is being used, try it later "); - } - answer = (CreateVolumeFromSnapshotAnswer) sendToPool(pool, createVolumeFromSnapshotCommand); - if (answer != null && answer.getResult()) { - vdiUUID = answer.getVdi(); - } else { - s_logger.error(basicErrMsg + " due to " + ((answer == null) ? "null" : answer.getDetails())); - throw new CloudRuntimeException(basicErrMsg); - } - } catch (StorageUnavailableException e) { - s_logger.error(basicErrMsg); - } finally { - if (snapshot.getSwiftId() != null) { - _snapshotMgr.deleteSnapshotsDirForVolume(secondaryStoragePoolUrl, dcId, accountId, volumeId); - } - _snapshotDao.unlockFromLockTable(snapshotId.toString()); - } - return new Pair(vdiUUID, basicErrMsg); - } - - - @Override - @DB - public VolumeVO copyVolumeFromSecToPrimary(VolumeVO volume, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, Long clusterId, ServiceOfferingVO offering, DiskOfferingVO diskOffering, - List avoids, long size, HypervisorType hyperType) throws NoTransitionException { - - final HashSet avoidPools = new HashSet(avoids); - DiskProfile dskCh = createDiskCharacteristics(volume, template, dc, diskOffering); - dskCh.setHyperType(vm.getHypervisorType()); - // Find a suitable storage to create volume on - StoragePoolVO destPool = findStoragePool(dskCh, dc, pod, clusterId, null, vm, avoidPools); - - // Copy the volume from secondary storage to the destination storage pool - stateTransitTo(volume, Event.CopyRequested); - VolumeHostVO volumeHostVO = _volumeHostDao.findByVolumeId(volume.getId()); - HostVO secStorage = _hostDao.findById(volumeHostVO.getHostId()); - String secondaryStorageURL = secStorage.getStorageUrl(); - String[] volumePath = volumeHostVO.getInstallPath().split("/"); - String volumeUUID = volumePath[volumePath.length - 1].split("\\.")[0]; - - CopyVolumeCommand cvCmd = new CopyVolumeCommand(volume.getId(), volumeUUID, destPool, secondaryStorageURL, false, _copyvolumewait); - CopyVolumeAnswer cvAnswer; - try { - cvAnswer = (CopyVolumeAnswer) sendToPool(destPool, cvCmd); - } catch (StorageUnavailableException e1) { - stateTransitTo(volume, Event.CopyFailed); - throw new CloudRuntimeException("Failed to copy the volume from secondary storage to the destination primary storage pool."); - } - - if (cvAnswer == null || !cvAnswer.getResult()) { - stateTransitTo(volume, Event.CopyFailed); - throw new CloudRuntimeException("Failed to copy the volume from secondary storage to the destination primary storage pool."); - } - Transaction txn = Transaction.currentTxn(); - txn.start(); - volume.setPath(cvAnswer.getVolumePath()); - volume.setFolder(destPool.getPath()); - volume.setPodId(destPool.getPodId()); - volume.setPoolId(destPool.getId()); - volume.setPodId(destPool.getPodId()); - stateTransitTo(volume, Event.CopySucceeded); - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), - volume.getDataCenterId(), volume.getId(), volume.getName(), volume.getDiskOfferingId(), - null, volume.getSize(), Volume.class.getName(), volume.getUuid()); - _volumeHostDao.remove(volumeHostVO.getId()); - txn.commit(); - return volume; - - } - - @Override - @DB - public VolumeVO createVolume(VolumeVO volume, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, Long clusterId, ServiceOfferingVO offering, DiskOfferingVO diskOffering, - List avoids, long size, HypervisorType hyperType) { - StoragePoolVO pool = null; - final HashSet avoidPools = new HashSet(avoids); - - try { - stateTransitTo(volume, Volume.Event.CreateRequested); - } catch (NoTransitionException e) { - s_logger.debug("Unable to update volume state: " + e.toString()); - return null; - } - - if (diskOffering != null && diskOffering.isCustomized()) { - diskOffering.setDiskSize(size); - } - DiskProfile dskCh = null; - if (volume.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat()) { - dskCh = createDiskCharacteristics(volume, template, dc, offering); - } else { - dskCh = createDiskCharacteristics(volume, template, dc, diskOffering); - } - - dskCh.setHyperType(hyperType); - - VolumeTO created = null; - int retry = _retry; - while (--retry >= 0) { - created = null; - - long podId = pod.getId(); - pod = _podDao.findById(podId); - if (pod == null) { - s_logger.warn("Unable to find pod " + podId + " when create volume " + volume.getName()); - break; - } - - pool = findStoragePool(dskCh, dc, pod, clusterId, vm.getHostId(), vm, avoidPools); - if (pool == null) { - s_logger.warn("Unable to find storage poll when create volume " + volume.getName()); - break; - } - - avoidPools.add(pool); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Trying to create " + volume + " on " + pool); - } - - CreateCommand cmd = null; - VMTemplateStoragePoolVO tmpltStoredOn = null; - - for (int i = 0; i < 2; i++) { - if (volume.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO != template.getFormat()) { - if (pool.getPoolType() == StoragePoolType.CLVM) { - //prepareISOForCreate does what we need, which is to tell us where the template is - VMTemplateHostVO tmpltHostOn = _tmpltMgr.prepareISOForCreate(template, pool); - if (tmpltHostOn == null) { - continue; - } - HostVO secondaryStorageHost = _hostDao.findById(tmpltHostOn.getHostId()); - String tmpltHostUrl = secondaryStorageHost.getStorageUrl(); - String fullTmpltUrl = tmpltHostUrl + "/" + tmpltHostOn.getInstallPath(); - cmd = new CreateCommand(dskCh, fullTmpltUrl, new StorageFilerTO(pool)); - } else { - tmpltStoredOn = _tmpltMgr.prepareTemplateForCreate(template, pool); - if (tmpltStoredOn == null) { - continue; - } - cmd = new CreateCommand(dskCh, tmpltStoredOn.getLocalDownloadPath(), new StorageFilerTO(pool)); - } - } else { - if (volume.getVolumeType() == Type.ROOT && Storage.ImageFormat.ISO == template.getFormat()) { - VMTemplateHostVO tmpltHostOn = _tmpltMgr.prepareISOForCreate(template, pool); - if (tmpltHostOn == null) { - throw new CloudRuntimeException("Did not find ISO in secondry storage in zone " + pool.getDataCenterId()); - } - } - cmd = new CreateCommand(dskCh, new StorageFilerTO(pool)); - } - - try { - Answer answer = sendToPool(pool, cmd); - if (answer != null && answer.getResult()) { - created = ((CreateAnswer) answer).getVolume(); - break; - } - - if (tmpltStoredOn != null && answer != null && (answer instanceof CreateAnswer) && ((CreateAnswer) answer).templateReloadRequested()) { - if (!_tmpltMgr.resetTemplateDownloadStateOnPool(tmpltStoredOn.getId())) { - break; // break out of template-redeploy retry loop - } - } else { - break; - } - } catch (StorageUnavailableException e) { - s_logger.debug("Storage unavailable for " + pool.getId()); - break; // break out of template-redeploy retry loop - } - } - - if (created != null) { - break; - } - - s_logger.debug("Retrying the create because it failed on pool " + pool); - } - - if (created == null) { - return null; - } else { - volume.setFolder(pool.getPath()); - volume.setPath(created.getPath()); - volume.setSize(created.getSize()); - volume.setPoolType(pool.getPoolType()); - volume.setPoolId(pool.getId()); - volume.setPodId(pod.getId()); - try { - stateTransitTo(volume, Volume.Event.OperationSucceeded); - } catch (NoTransitionException e) { - s_logger.debug("Unable to update volume state: " + e.toString()); - return null; - } - return volume; - } - } - - public Long chooseHostForStoragePool(StoragePoolVO poolVO, List avoidHosts, boolean sendToVmResidesOn, Long vmId) { + public Long chooseHostForStoragePool(StoragePoolVO poolVO, + List avoidHosts, boolean sendToVmResidesOn, Long vmId) { if (sendToVmResidesOn) { if (vmId != null) { VMInstanceVO vmInstance = _vmInstanceDao.findById(vmId); if (vmInstance != null) { Long hostId = vmInstance.getHostId(); - if (hostId != null && !avoidHosts.contains(vmInstance.getHostId())) { + if (hostId != null + && !avoidHosts.contains(vmInstance.getHostId())) { return hostId; } } } /* - * Can't find the vm where host resides on(vm is destroyed? or volume is detached from vm), randomly choose - * a host - * to send the cmd + * Can't find the vm where host resides on(vm is destroyed? or + * volume is detached from vm), randomly choose a host to send the + * cmd */ } - List poolHosts = _poolHostDao.listByHostStatus(poolVO.getId(), Status.Up); + List poolHosts = _poolHostDao.listByHostStatus( + poolVO.getId(), Status.Up); Collections.shuffle(poolHosts); if (poolHosts != null && poolHosts.size() > 0) { for (StoragePoolHostVO sphvo : poolHosts) { @@ -876,9 +520,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override public boolean configure(String name, Map params) throws ConfigurationException { - Map configs = _configDao.getConfiguration("management-server", params); - String overProvisioningFactorStr = configs.get("storage.overprovisioning.factor"); + Map configs = _configDao.getConfiguration( + "management-server", params); + + String overProvisioningFactorStr = configs + .get("storage.overprovisioning.factor"); if (overProvisioningFactorStr != null) { _overProvisioningFactor = new BigDecimal(overProvisioningFactorStr); } @@ -886,94 +533,128 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C _retry = NumbersUtil.parseInt(configs.get(Config.StartRetry.key()), 10); _pingInterval = NumbersUtil.parseInt(configs.get("ping.interval"), 60); _hostRetry = NumbersUtil.parseInt(configs.get("host.retry"), 2); - _storagePoolAcquisitionWaitSeconds = NumbersUtil.parseInt(configs.get("pool.acquisition.wait.seconds"), 1800); - s_logger.info("pool.acquisition.wait.seconds is configured as " + _storagePoolAcquisitionWaitSeconds + " seconds"); + _storagePoolAcquisitionWaitSeconds = NumbersUtil.parseInt( + configs.get("pool.acquisition.wait.seconds"), 1800); + s_logger.info("pool.acquisition.wait.seconds is configured as " + + _storagePoolAcquisitionWaitSeconds + " seconds"); - _agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao), true, false, true); + _agentMgr.registerForHostEvents(new StoragePoolMonitor(this, + _storagePoolDao), true, false, true); String storageCleanupEnabled = configs.get("storage.cleanup.enabled"); - _storageCleanupEnabled = (storageCleanupEnabled == null) ? true : Boolean.parseBoolean(storageCleanupEnabled); + _storageCleanupEnabled = (storageCleanupEnabled == null) ? true + : Boolean.parseBoolean(storageCleanupEnabled); - String value = _configDao.getValue(Config.CreateVolumeFromSnapshotWait.toString()); - _createVolumeFromSnapshotWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CreateVolumeFromSnapshotWait.getDefaultValue())); + String value = _configDao.getValue(Config.CreateVolumeFromSnapshotWait + .toString()); + _createVolumeFromSnapshotWait = NumbersUtil.parseInt(value, + Integer.parseInt(Config.CreateVolumeFromSnapshotWait + .getDefaultValue())); value = _configDao.getValue(Config.CopyVolumeWait.toString()); - _copyvolumewait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); + _copyvolumewait = NumbersUtil.parseInt(value, + Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); value = _configDao.getValue(Config.RecreateSystemVmEnabled.key()); _recreateSystemVmEnabled = Boolean.parseBoolean(value); value = _configDao.getValue(Config.StorageTemplateCleanupEnabled.key()); - _templateCleanupEnabled = (value == null ? true : Boolean.parseBoolean(value)); + _templateCleanupEnabled = (value == null ? true : Boolean + .parseBoolean(value)); String time = configs.get("storage.cleanup.interval"); _storageCleanupInterval = NumbersUtil.parseInt(time, 86400); - String storageUsedThreshold = _configDao.getValue(Config.StorageCapacityDisableThreshold.key()); + String storageUsedThreshold = _configDao + .getValue(Config.StorageCapacityDisableThreshold.key()); if (storageUsedThreshold != null) { _storageUsedThreshold = Double.parseDouble(storageUsedThreshold); } - String storageAllocatedThreshold = _configDao.getValue(Config.StorageAllocatedCapacityDisableThreshold.key()); + String storageAllocatedThreshold = _configDao + .getValue(Config.StorageAllocatedCapacityDisableThreshold.key()); if (storageAllocatedThreshold != null) { - _storageAllocatedThreshold = Double.parseDouble(storageAllocatedThreshold); + _storageAllocatedThreshold = Double + .parseDouble(storageAllocatedThreshold); } - String globalStorageOverprovisioningFactor = configs.get("storage.overprovisioning.factor"); - _storageOverprovisioningFactor = new BigDecimal(NumbersUtil.parseFloat(globalStorageOverprovisioningFactor, 2.0f)); + String globalStorageOverprovisioningFactor = configs + .get("storage.overprovisioning.factor"); + _storageOverprovisioningFactor = new BigDecimal(NumbersUtil.parseFloat( + globalStorageOverprovisioningFactor, 2.0f)); - s_logger.info("Storage cleanup enabled: " + _storageCleanupEnabled + ", interval: " + _storageCleanupInterval + ", template cleanup enabled: " + _templateCleanupEnabled); + s_logger.info("Storage cleanup enabled: " + _storageCleanupEnabled + + ", interval: " + _storageCleanupInterval + + ", template cleanup enabled: " + _templateCleanupEnabled); String workers = configs.get("expunge.workers"); int wrks = NumbersUtil.parseInt(workers, 10); - _executor = Executors.newScheduledThreadPool(wrks, new NamedThreadFactory("StorageManager-Scavenger")); + _executor = Executors.newScheduledThreadPool(wrks, + new NamedThreadFactory("StorageManager-Scavenger")); - _agentMgr.registerForHostEvents(ComponentContext.inject(LocalStoragePoolListener.class), true, false, false); + _agentMgr.registerForHostEvents( + ComponentContext.inject(LocalStoragePoolListener.class), true, + false, false); - String maxVolumeSizeInGbString = _configDao.getValue("storage.max.volume.size"); - _maxVolumeSizeInGb = NumbersUtil.parseLong(maxVolumeSizeInGbString, 2000); + String maxVolumeSizeInGbString = _configDao + .getValue("storage.max.volume.size"); + _maxVolumeSizeInGb = NumbersUtil.parseLong(maxVolumeSizeInGbString, + 2000); - String _customDiskOfferingMinSizeStr = _configDao.getValue(Config.CustomDiskOfferingMinSize.toString()); - _customDiskOfferingMinSize = NumbersUtil.parseInt(_customDiskOfferingMinSizeStr, Integer.parseInt(Config.CustomDiskOfferingMinSize.getDefaultValue())); + String _customDiskOfferingMinSizeStr = _configDao + .getValue(Config.CustomDiskOfferingMinSize.toString()); + _customDiskOfferingMinSize = NumbersUtil.parseInt( + _customDiskOfferingMinSizeStr, Integer + .parseInt(Config.CustomDiskOfferingMinSize + .getDefaultValue())); - String _customDiskOfferingMaxSizeStr = _configDao.getValue(Config.CustomDiskOfferingMaxSize.toString()); - _customDiskOfferingMaxSize = NumbersUtil.parseInt(_customDiskOfferingMaxSizeStr, Integer.parseInt(Config.CustomDiskOfferingMaxSize.getDefaultValue())); + String _customDiskOfferingMaxSizeStr = _configDao + .getValue(Config.CustomDiskOfferingMaxSize.toString()); + _customDiskOfferingMaxSize = NumbersUtil.parseInt( + _customDiskOfferingMaxSizeStr, Integer + .parseInt(Config.CustomDiskOfferingMaxSize + .getDefaultValue())); - HostTemplateStatesSearch = _vmTemplateHostDao.createSearchBuilder(); - HostTemplateStatesSearch.and("id", HostTemplateStatesSearch.entity().getTemplateId(), SearchCriteria.Op.EQ); - HostTemplateStatesSearch.and("state", HostTemplateStatesSearch.entity().getDownloadState(), SearchCriteria.Op.EQ); - - SearchBuilder HostSearch = _hostDao.createSearchBuilder(); - HostSearch.and("dcId", HostSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); - - HostTemplateStatesSearch.join("host", HostSearch, HostSearch.entity().getId(), HostTemplateStatesSearch.entity().getHostId(), JoinBuilder.JoinType.INNER); - HostSearch.done(); - HostTemplateStatesSearch.done(); _serverId = _msServer.getId(); - UpHostsInPoolSearch = _storagePoolHostDao.createSearchBuilder(Long.class); - UpHostsInPoolSearch.selectField(UpHostsInPoolSearch.entity().getHostId()); + UpHostsInPoolSearch = _storagePoolHostDao + .createSearchBuilder(Long.class); + UpHostsInPoolSearch.selectField(UpHostsInPoolSearch.entity() + .getHostId()); SearchBuilder hostSearch = _hostDao.createSearchBuilder(); hostSearch.and("status", hostSearch.entity().getStatus(), Op.EQ); - hostSearch.and("resourceState", hostSearch.entity().getResourceState(), Op.EQ); - UpHostsInPoolSearch.join("hosts", hostSearch, hostSearch.entity().getId(), UpHostsInPoolSearch.entity().getHostId(), JoinType.INNER); - UpHostsInPoolSearch.and("pool", UpHostsInPoolSearch.entity().getPoolId(), Op.EQ); + hostSearch.and("resourceState", hostSearch.entity().getResourceState(), + Op.EQ); + UpHostsInPoolSearch.join("hosts", hostSearch, hostSearch.entity() + .getId(), UpHostsInPoolSearch.entity().getHostId(), + JoinType.INNER); + UpHostsInPoolSearch.and("pool", UpHostsInPoolSearch.entity() + .getPoolId(), Op.EQ); UpHostsInPoolSearch.done(); StoragePoolSearch = _vmInstanceDao.createSearchBuilder(); SearchBuilder volumeSearch = _volumeDao.createSearchBuilder(); - volumeSearch.and("volumeType", volumeSearch.entity().getVolumeType(), SearchCriteria.Op.EQ); - volumeSearch.and("poolId", volumeSearch.entity().getPoolId(), SearchCriteria.Op.EQ); - StoragePoolSearch.join("vmVolume", volumeSearch, volumeSearch.entity().getInstanceId(), StoragePoolSearch.entity().getId(), JoinBuilder.JoinType.INNER); + volumeSearch.and("volumeType", volumeSearch.entity().getVolumeType(), + SearchCriteria.Op.EQ); + volumeSearch.and("poolId", volumeSearch.entity().getPoolId(), + SearchCriteria.Op.EQ); + StoragePoolSearch.join("vmVolume", volumeSearch, volumeSearch.entity() + .getInstanceId(), StoragePoolSearch.entity().getId(), + JoinBuilder.JoinType.INNER); StoragePoolSearch.done(); LocalStorageSearch = _storagePoolDao.createSearchBuilder(); - SearchBuilder storageHostSearch = _storagePoolHostDao.createSearchBuilder(); - storageHostSearch.and("hostId", storageHostSearch.entity().getHostId(), SearchCriteria.Op.EQ); - LocalStorageSearch.join("poolHost", storageHostSearch, storageHostSearch.entity().getPoolId(), LocalStorageSearch.entity().getId(), JoinBuilder.JoinType.INNER); - LocalStorageSearch.and("type", LocalStorageSearch.entity().getPoolType(), SearchCriteria.Op.IN); + SearchBuilder storageHostSearch = _storagePoolHostDao + .createSearchBuilder(); + storageHostSearch.and("hostId", storageHostSearch.entity().getHostId(), + SearchCriteria.Op.EQ); + LocalStorageSearch.join("poolHost", storageHostSearch, + storageHostSearch.entity().getPoolId(), LocalStorageSearch + .entity().getId(), JoinBuilder.JoinType.INNER); + LocalStorageSearch.and("type", LocalStorageSearch.entity() + .getPoolType(), SearchCriteria.Op.IN); LocalStorageSearch.done(); Volume.State.getStateMachine().registerListener( new VolumeStateListener()); @@ -981,159 +662,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return true; } - public String getRandomVolumeName() { - return UUID.randomUUID().toString(); - } - - @Override - public boolean volumeOnSharedStoragePool(VolumeVO volume) { - Long poolId = volume.getPoolId(); - if (poolId == null) { - return false; - } else { - StoragePoolVO pool = _storagePoolDao.findById(poolId); - - if (pool == null) { - return false; - } else { - return pool.isShared(); - } - } - } - - @Override - public boolean volumeInactive(VolumeVO volume) { - Long vmId = volume.getInstanceId(); - if (vmId != null) { - UserVm vm = _userVmDao.findById(vmId); - if (vm == null) { - return true; - } - State state = vm.getState(); - if (state.equals(State.Stopped) || state.equals(State.Destroyed)) { - return true; - } - } - return false; - } - - @Override - public String getVmNameOnVolume(VolumeVO volume) { - Long vmId = volume.getInstanceId(); - if (vmId != null) { - VMInstanceVO vm = _vmInstanceDao.findById(vmId); - - if (vm == null) { - return null; - } - return vm.getInstanceName(); - } - return null; - } - - @Override - public Pair getAbsoluteIsoPath(long templateId, long dataCenterId) { - String isoPath = null; - - List storageHosts = _resourceMgr.listAllHostsInOneZoneByType(Host.Type.SecondaryStorage, dataCenterId); - if (storageHosts != null) { - for (HostVO storageHost : storageHosts) { - List templateHostVOs = _vmTemplateHostDao.listByTemplateHostStatus(templateId, storageHost.getId(), VMTemplateStorageResourceAssoc.Status.DOWNLOADED ); - if (templateHostVOs != null && !templateHostVOs.isEmpty()) { - VMTemplateHostVO tmpHostVO = templateHostVOs.get(0); - isoPath = storageHost.getStorageUrl() + "/" + tmpHostVO.getInstallPath(); - return new Pair(isoPath, storageHost.getStorageUrl()); - } - } - } - s_logger.warn("Unable to find secondary storage in zone id=" + dataCenterId); - return null; - } - - @Override - public String getSecondaryStorageURL(long zoneId) { - // Determine the secondary storage URL - HostVO secondaryStorageHost = getSecondaryStorageHost(zoneId); - - if (secondaryStorageHost == null) { - return null; - } - - return secondaryStorageHost.getStorageUrl(); - } - - @Override - public HostVO getSecondaryStorageHost(long zoneId, long tmpltId) { - List hosts = _ssvmMgr.listSecondaryStorageHostsInOneZone(zoneId); - if (hosts == null || hosts.size() == 0) { - return null; - } - for (HostVO host : hosts) { - VMTemplateHostVO tmpltHost = _vmTemplateHostDao.findByHostTemplate(host.getId(), tmpltId); - if (tmpltHost != null && !tmpltHost.getDestroyed() && tmpltHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { - return host; - } - } - return null; - } - - @Override - public VMTemplateHostVO getTemplateHostRef(long zoneId, long tmpltId, boolean readyOnly) { - List hosts = _ssvmMgr.listSecondaryStorageHostsInOneZone(zoneId); - if (hosts == null || hosts.size() == 0) { - return null; - } - VMTemplateHostVO inProgress = null; - VMTemplateHostVO other = null; - for (HostVO host : hosts) { - VMTemplateHostVO tmpltHost = _vmTemplateHostDao.findByHostTemplate(host.getId(), tmpltId); - if (tmpltHost != null && !tmpltHost.getDestroyed()) { - if (tmpltHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { - return tmpltHost; - } else if (tmpltHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS) { - inProgress = tmpltHost; - } else { - other = tmpltHost; - } - } - } - if (inProgress != null) { - return inProgress; - } - return other; - } - - @Override - public HostVO getSecondaryStorageHost(long zoneId) { - List hosts = _ssvmMgr.listSecondaryStorageHostsInOneZone(zoneId); - if (hosts == null || hosts.size() == 0) { - hosts = _ssvmMgr.listLocalSecondaryStorageHostsInOneZone(zoneId); - if (hosts.isEmpty()) { - return null; - } - } - - int size = hosts.size(); - Random rn = new Random(); - int index = rn.nextInt(size); - return hosts.get(index); - } - - @Override - public List getSecondaryStorageHosts(long zoneId) { - List hosts = _ssvmMgr.listSecondaryStorageHostsInOneZone(zoneId); - if (hosts == null || hosts.size() == 0) { - hosts = _ssvmMgr.listLocalSecondaryStorageHostsInOneZone(zoneId); - if (hosts.isEmpty()) { - return new ArrayList(); - } - } - return hosts; - } - + @Override public String getStoragePoolTags(long poolId) { - return _configMgr.listToCsvTags(_storagePoolDao.searchForStoragePoolDetails(poolId, "true")); + return _configMgr.listToCsvTags(_storagePoolDao + .searchForStoragePoolDetails(poolId, "true")); } @Override @@ -1141,7 +674,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (_storageCleanupEnabled) { Random generator = new Random(); int initialDelay = generator.nextInt(_storageCleanupInterval); - _executor.scheduleWithFixedDelay(new StorageGarbageCollector(), initialDelay, _storageCleanupInterval, TimeUnit.SECONDS); + _executor.scheduleWithFixedDelay(new StorageGarbageCollector(), + initialDelay, _storageCleanupInterval, TimeUnit.SECONDS); } else { s_logger.debug("Storage cleanup is not enabled, so the storage cleanup thread is not being scheduled."); } @@ -1157,22 +691,99 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return true; } + + @DB + @Override + public DataStore createLocalStorage(Host host, StoragePoolInfo pInfo) throws ConnectionException { - protected StorageManagerImpl() { - _volStateMachine = Volume.State.getStateMachine(); + DataCenterVO dc = _dcDao.findById(host.getDataCenterId()); + if (dc == null || !dc.isLocalStorageEnabled()) { + return null; + } + DataStore store = null; + try { + StoragePoolVO pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), pInfo.getHost(), pInfo.getHostPath(), pInfo.getUuid()); + if(pool == null && host.getHypervisorType() == HypervisorType.VMware) { + // perform run-time upgrade. In versions prior to 2.2.12, there is a bug that we don't save local datastore info (host path is empty), this will cause us + // not able to distinguish multiple local datastores that may be available on the host, to support smooth migration, we + // need to perform runtime upgrade here + if(pInfo.getHostPath().length() > 0) { + pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), pInfo.getHost(), "", pInfo.getUuid()); + } + } + DataStoreProvider provider = this.dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider(); + DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); + if (pool == null) { + Map params = new HashMap(); + String name = (host.getName() + " Local Storage"); + params.put("zoneId", host.getDataCenterId()); + params.put("clusterId", host.getClusterId()); + params.put("podId", host.getPodId()); + params.put("url", pInfo.getPoolType().toString() + "://" + pInfo.getHost() + "/" + pInfo.getHostPath()); + params.put("name", name); + params.put("localStorage", true); + params.put("details", pInfo.getDetails()); + params.put("uuid", pInfo.getUuid()); + params.put("providerId", provider.getId()); + + store = lifeCycle.initialize(params); + } else { + store = (DataStore) dataStoreMgr.getDataStore(pool.getId(), + DataStoreRole.Primary); + } + + HostScope scope = new HostScope(host.getId()); + lifeCycle.attachHost(store, scope, pInfo); + } catch (Exception e) { + s_logger.warn("Unable to setup the local storage pool for " + host, e); + throw new ConnectionException(true, "Unable to setup the local storage pool for " + host, e); + } + + return (DataStore) dataStoreMgr.getDataStore(store.getId(), + DataStoreRole.Primary); } @Override @SuppressWarnings("rawtypes") - public StoragePoolVO createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException, ResourceUnavailableException { - Long clusterId = cmd.getClusterId(); - Long podId = cmd.getPodId(); - Map ds = cmd.getDetails(); + public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) + throws ResourceInUseException, IllegalArgumentException, + UnknownHostException, ResourceUnavailableException { + String providerUuid = cmd.getStorageProviderUuid(); + DataStoreProvider storeProvider = dataStoreProviderMgr + .getDataStoreProviderByUuid(providerUuid); - if (clusterId != null && podId == null) { - throw new InvalidParameterValueException("Cluster id requires pod id"); + if (storeProvider == null) { + storeProvider = dataStoreProviderMgr.getDefaultPrimaryDataStoreProvider(); + if (storeProvider == null) { + throw new InvalidParameterValueException( + "can't find storage provider: " + providerUuid); + } } + Long clusterId = cmd.getClusterId(); + Long podId = cmd.getPodId(); + Long zoneId = cmd.getZoneId(); + + ScopeType scopeType = ScopeType.CLUSTER; + String scope = cmd.getScope(); + if (scope != null) { + try { + scopeType = Enum.valueOf(ScopeType.class, scope.toUpperCase()); + } catch (Exception e) { + throw new InvalidParameterValueException("invalid scope" + + scope); + } + } + + if (scopeType == ScopeType.CLUSTER && clusterId == null) { + throw new InvalidParameterValueException( + "cluster id can't be null, if scope is cluster"); + } else if (scopeType == ScopeType.ZONE && zoneId == null) { + throw new InvalidParameterValueException( + "zone id can't be null, if scope is zone"); + } + + Map ds = cmd.getDetails(); Map details = new HashMap(); if (ds != null) { Collection detailsCollection = ds.values(); @@ -1182,233 +793,69 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C Iterator it2 = d.entrySet().iterator(); while (it2.hasNext()) { Map.Entry entry = (Map.Entry) it2.next(); - details.put((String) entry.getKey(), (String) entry.getValue()); + details.put((String) entry.getKey(), + (String) entry.getValue()); } } } - // verify input parameters - Long zoneId = cmd.getZoneId(); DataCenterVO zone = _dcDao.findById(cmd.getZoneId()); if (zone == null) { - throw new InvalidParameterValueException("unable to find zone by id " + zoneId); + throw new InvalidParameterValueException( + "unable to find zone by id " + zoneId); } // Check if zone is disabled Account account = UserContext.current().getCaller(); - if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getType())) { - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); + if (Grouping.AllocationState.Disabled == zone.getAllocationState() + && !_accountMgr.isRootAdmin(account.getType())) { + throw new PermissionDeniedException( + "Cannot perform this operation, Zone is currently disabled: " + + zoneId); } - // Check if there is host up in this cluster - List allHosts = _resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing, clusterId, podId, zoneId); - if (allHosts.isEmpty()) { - throw new ResourceUnavailableException("No host up to associate a storage pool with in cluster " + clusterId, Pod.class, podId); - } - URI uri = null; + Map params = new HashMap(); + params.put("zoneId", zone.getId()); + params.put("clusterId", clusterId); + params.put("podId", podId); + params.put("url", cmd.getUrl()); + params.put("tags", cmd.getTags()); + params.put("name", cmd.getStoragePoolName()); + params.put("details", details); + params.put("providerId", storeProvider.getId()); + + DataStoreLifeCycle lifeCycle = storeProvider.getLifeCycle(); + DataStore store = null; try { - uri = new URI(UriUtils.encodeURIComponent(cmd.getUrl())); - if (uri.getScheme() == null) { - throw new InvalidParameterValueException("scheme is null " + cmd.getUrl() + ", add nfs:// as a prefix"); - } else if (uri.getScheme().equalsIgnoreCase("nfs")) { - String uriHost = uri.getHost(); - String uriPath = uri.getPath(); - if (uriHost == null || uriPath == null || uriHost.trim().isEmpty() || uriPath.trim().isEmpty()) { - throw new InvalidParameterValueException("host or path is null, should be nfs://hostname/path"); - } - } else if (uri.getScheme().equalsIgnoreCase("sharedMountPoint")) { - String uriPath = uri.getPath(); - if (uriPath == null) { - throw new InvalidParameterValueException("host or path is null, should be sharedmountpoint://localhost/path"); - } - } else if (uri.getScheme().equalsIgnoreCase("rbd")) { - String uriPath = uri.getPath(); - if (uriPath == null) { - throw new InvalidParameterValueException("host or path is null, should be rbd://hostname/pool"); - } + store = lifeCycle.initialize(params); + + if (scopeType == ScopeType.CLUSTER) { + ClusterScope clusterScope = new ClusterScope(clusterId, podId, + zoneId); + lifeCycle.attachCluster(store, clusterScope); + } else if (scopeType == ScopeType.ZONE) { + ZoneScope zoneScope = new ZoneScope(zoneId); + lifeCycle.attachZone(store, zoneScope); } - } catch (URISyntaxException e) { - throw new InvalidParameterValueException(cmd.getUrl() + " is not a valid uri"); + } catch (Exception e) { + s_logger.debug("Failed to add data store", e); + throw new CloudRuntimeException("Failed to add data store", e); } - String tags = cmd.getTags(); - if (tags != null) { - String[] tokens = tags.split(","); - - for (String tag : tokens) { - tag = tag.trim(); - if (tag.length() == 0) { - continue; - } - details.put(tag, "true"); - } - } - - String scheme = uri.getScheme(); - String storageHost = uri.getHost(); - String hostPath = uri.getPath(); - String userInfo = uri.getUserInfo(); - int port = uri.getPort(); - StoragePoolVO pool = null; - if (s_logger.isDebugEnabled()) { - s_logger.debug("createPool Params @ scheme - " + scheme + " storageHost - " + storageHost + " hostPath - " + hostPath + " port - " + port); - } - if (scheme.equalsIgnoreCase("nfs")) { - if (port == -1) { - port = 2049; - } - pool = new StoragePoolVO(StoragePoolType.NetworkFilesystem, storageHost, port, hostPath); - if (clusterId == null) { - throw new IllegalArgumentException("NFS need to have clusters specified for XenServers"); - } - } else if (scheme.equalsIgnoreCase("file")) { - if (port == -1) { - port = 0; - } - pool = new StoragePoolVO(StoragePoolType.Filesystem, "localhost", 0, hostPath); - } else if (scheme.equalsIgnoreCase("sharedMountPoint")) { - pool = new StoragePoolVO(StoragePoolType.SharedMountPoint, storageHost, 0, hostPath); - } else if (scheme.equalsIgnoreCase("clvm")) { - pool = new StoragePoolVO(StoragePoolType.CLVM, storageHost, 0, hostPath.replaceFirst("/", "")); - } else if (scheme.equalsIgnoreCase("rbd")) { - if (port == -1) { - port = 6789; - } - pool = new StoragePoolVO(StoragePoolType.RBD, storageHost, port, hostPath.replaceFirst("/", ""), userInfo); - } else if (scheme.equalsIgnoreCase("PreSetup")) { - pool = new StoragePoolVO(StoragePoolType.PreSetup, storageHost, 0, hostPath); - } else if (scheme.equalsIgnoreCase("iscsi")) { - String[] tokens = hostPath.split("/"); - int lun = NumbersUtil.parseInt(tokens[tokens.length - 1], -1); - if (port == -1) { - port = 3260; - } - if (lun != -1) { - if (clusterId == null) { - throw new IllegalArgumentException("IscsiLUN need to have clusters specified"); - } - hostPath.replaceFirst("/", ""); - pool = new StoragePoolVO(StoragePoolType.IscsiLUN, storageHost, port, hostPath); - } else { - for (StoragePoolDiscoverer discoverer : _discoverers) { - Map> pools; - try { - pools = discoverer.find(cmd.getZoneId(), podId, uri, details); - } catch (DiscoveryException e) { - throw new IllegalArgumentException("Not enough information for discovery " + uri, e); - } - if (pools != null) { - Map.Entry> entry = pools.entrySet().iterator().next(); - pool = entry.getKey(); - details = entry.getValue(); - break; - } - } - } - } else if (scheme.equalsIgnoreCase("iso")) { - if (port == -1) { - port = 2049; - } - pool = new StoragePoolVO(StoragePoolType.ISO, storageHost, port, hostPath); - } else if (scheme.equalsIgnoreCase("vmfs")) { - pool = new StoragePoolVO(StoragePoolType.VMFS, "VMFS datastore: " + hostPath, 0, hostPath); - } else if (scheme.equalsIgnoreCase("ocfs2")) { - port = 7777; - pool = new StoragePoolVO(StoragePoolType.OCFS2, "clustered", port, hostPath); - } else { - s_logger.warn("Unable to figure out the scheme for URI: " + uri); - throw new IllegalArgumentException("Unable to figure out the scheme for URI: " + uri); - } - - if (pool == null) { - s_logger.warn("Unable to figure out the scheme for URI: " + uri); - throw new IllegalArgumentException("Unable to figure out the scheme for URI: " + uri); - } - - List pools = _storagePoolDao.listPoolByHostPath(storageHost, hostPath); - if (!pools.isEmpty() && !scheme.equalsIgnoreCase("sharedmountpoint")) { - Long oldPodId = pools.get(0).getPodId(); - throw new ResourceInUseException("Storage pool " + uri + " already in use by another pod (id=" + oldPodId + ")", "StoragePool", uri.toASCIIString()); - } - - long poolId = _storagePoolDao.getNextInSequence(Long.class, "id"); - String uuid = null; - if (scheme.equalsIgnoreCase("sharedmountpoint") || scheme.equalsIgnoreCase("clvm")) { - uuid = UUID.randomUUID().toString(); - } else if (scheme.equalsIgnoreCase("PreSetup")) { - uuid = hostPath.replace("/", ""); - } else { - uuid = UUID.nameUUIDFromBytes(new String(storageHost + hostPath).getBytes()).toString(); - } - - List spHandles = _storagePoolDao.findIfDuplicatePoolsExistByUUID(uuid); - if ((spHandles != null) && (spHandles.size() > 0)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Another active pool with the same uuid already exists"); - } - throw new ResourceInUseException("Another active pool with the same uuid already exists"); - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("In createPool Setting poolId - " + poolId + " uuid - " + uuid + " zoneId - " + zoneId + " podId - " + podId + " poolName - " + cmd.getStoragePoolName()); - } - - pool.setId(poolId); - pool.setUuid(uuid); - pool.setDataCenterId(cmd.getZoneId()); - pool.setPodId(podId); - pool.setName(cmd.getStoragePoolName()); - pool.setClusterId(clusterId); - pool.setStatus(StoragePoolStatus.Up); - pool = _storagePoolDao.persist(pool, details); - - if (pool.getPoolType() == StoragePoolType.OCFS2 && !_ocfs2Mgr.prepareNodes(allHosts, pool)) { - s_logger.warn("Can not create storage pool " + pool + " on cluster " + clusterId); - _storagePoolDao.expunge(pool.getId()); - return null; - } - - boolean success = false; - for (HostVO h : allHosts) { - success = createStoragePool(h.getId(), pool); - if (success) { - break; - } - } - if (!success) { - s_logger.warn("Can not create storage pool " + pool + " on cluster " + clusterId); - _storagePoolDao.expunge(pool.getId()); - return null; - } - s_logger.debug("In createPool Adding the pool to each of the hosts"); - List poolHosts = new ArrayList(); - for (HostVO h : allHosts) { - try { - connectHostToSharedPool(h.getId(), pool); - poolHosts.add(h); - } catch (Exception e) { - s_logger.warn("Unable to establish a connection between " + h + " and " + pool, e); - } - } - - if (poolHosts.isEmpty()) { - s_logger.warn("No host can access storage pool " + pool + " on cluster " + clusterId); - _storagePoolDao.expunge(pool.getId()); - return null; - } else { - createCapacityEntry(pool); - } - return pool; + return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore(store.getId(), + DataStoreRole.Primary); } @Override - public StoragePoolVO updateStoragePool(UpdateStoragePoolCmd cmd) throws IllegalArgumentException { + public PrimaryDataStoreInfo updateStoragePool(UpdateStoragePoolCmd cmd) + throws IllegalArgumentException { // Input validation Long id = cmd.getId(); List tags = cmd.getTags(); StoragePoolVO pool = _storagePoolDao.findById(id); if (pool == null) { - throw new IllegalArgumentException("Unable to find storage pool with ID: " + id); + throw new IllegalArgumentException( + "Unable to find storage pool with ID: " + id); } if (tags != null) { @@ -1423,834 +870,102 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C _storagePoolDao.updateDetails(id, details); } - return pool; + return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore(pool.getId(), + DataStoreRole.Primary); } @Override @DB public boolean deletePool(DeletePoolCmd cmd) { Long id = cmd.getId(); - boolean deleteFlag = false; boolean forced = cmd.isForced(); - // verify parameters StoragePoolVO sPool = _storagePoolDao.findById(id); if (sPool == null) { s_logger.warn("Unable to find pool:" + id); - throw new InvalidParameterValueException("Unable to find pool by id " + id); + throw new InvalidParameterValueException( + "Unable to find pool by id " + id); } - if(sPool.getStatus() != StoragePoolStatus.Maintenance){ - s_logger.warn("Unable to delete storage id: " + id +" due to it is not in Maintenance state"); - throw new InvalidParameterValueException("Unable to delete storage due to it is not in Maintenance state, id: " + id); + if (sPool.getStatus() != StoragePoolStatus.Maintenance) { + s_logger.warn("Unable to delete storage id: " + id + + " due to it is not in Maintenance state"); + throw new InvalidParameterValueException( + "Unable to delete storage due to it is not in Maintenance state, id: " + + id); } - if (sPool.getPoolType().equals(StoragePoolType.LVM) || sPool.getPoolType().equals(StoragePoolType.EXT)) { + if (sPool.isLocal()) { s_logger.warn("Unable to delete local storage id:" + id); - throw new InvalidParameterValueException("Unable to delete local storage id: " + id); + throw new InvalidParameterValueException( + "Unable to delete local storage id: " + id); } Pair vlms = _volsDao.getCountAndTotalByPool(id); if (forced) { if (vlms.first() > 0) { - Pair nonDstrdVlms = _volsDao.getNonDestroyedCountAndTotalByPool(id); + Pair nonDstrdVlms = _volsDao + .getNonDestroyedCountAndTotalByPool(id); if (nonDstrdVlms.first() > 0) { - throw new CloudRuntimeException("Cannot delete pool " + sPool.getName() + " as there are associated " + - "non-destroyed vols for this pool"); + throw new CloudRuntimeException("Cannot delete pool " + + sPool.getName() + " as there are associated " + + "non-destroyed vols for this pool"); } - //force expunge non-destroyed volumes + // force expunge non-destroyed volumes List vols = _volsDao.listVolumesToBeDestroyed(); for (VolumeVO vol : vols) { - expungeVolume(vol, true); + AsyncCallFuture future = this.volService.expungeVolumeAsync(this.volFactory.getVolume(vol.getId())); + try { + future.get(); + } catch (InterruptedException e) { + s_logger.debug("expunge volume failed" + vol.getId(), e); + } catch (ExecutionException e) { + s_logger.debug("expunge volume failed" + vol.getId(), e); + } } } } else { // Check if the pool has associated volumes in the volumes table // If it does , then you cannot delete the pool if (vlms.first() > 0) { - throw new CloudRuntimeException("Cannot delete pool " + sPool.getName() + " as there are associated vols" + - " for this pool"); + throw new CloudRuntimeException("Cannot delete pool " + + sPool.getName() + " as there are associated vols" + + " for this pool"); } } - // First get the host_id from storage_pool_host_ref for given pool id - StoragePoolVO lock = _storagePoolDao.acquireInLockTable(sPool.getId()); + StoragePoolVO lock = _storagePoolDao.acquireInLockTable(sPool + .getId()); if (lock == null) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Failed to acquire lock when deleting StoragePool with ID: " + sPool.getId()); + s_logger.debug("Failed to acquire lock when deleting PrimaryDataStoreVO with ID: " + + sPool.getId()); } return false; } - // mark storage pool as removed (so it can't be used for new volumes creation), release the lock - boolean isLockReleased = false; - isLockReleased = _storagePoolDao.releaseFromLockTable(lock.getId()); + _storagePoolDao.releaseFromLockTable(lock.getId()); s_logger.trace("Released lock for storage pool " + id); - // for the given pool id, find all records in the storage_pool_host_ref - List hostPoolRecords = _storagePoolHostDao.listByPoolId(id); - Transaction txn = Transaction.currentTxn(); - try { - // if not records exist, delete the given pool (base case) - if (hostPoolRecords.size() == 0) { - - txn.start(); - sPool.setUuid(null); - _storagePoolDao.update(id, sPool); - _storagePoolDao.remove(id); - deletePoolStats(id); - txn.commit(); - - deleteFlag = true; - return true; - } else { - // Remove the SR associated with the Xenserver - for (StoragePoolHostVO host : hostPoolRecords) { - DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand(sPool); - final Answer answer = _agentMgr.easySend(host.getHostId(), deleteCmd); - - if (answer != null && answer.getResult()) { - deleteFlag = true; - break; - } - } - } - } finally { - if (deleteFlag) { - // now delete the storage_pool_host_ref and storage_pool records - txn.start(); - for (StoragePoolHostVO host : hostPoolRecords) { - _storagePoolHostDao.deleteStoragePoolHostDetails(host.getHostId(), host.getPoolId()); - } - sPool.setUuid(null); - _storagePoolDao.update(id, sPool); - _storagePoolDao.remove(id); - deletePoolStats(id); - // Delete op_host_capacity entries - _capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, null, null, null, id); - txn.commit(); - - s_logger.debug("Storage pool id=" + id + " is removed successfully"); - return true; - } else { - // alert that the storage cleanup is required - s_logger.warn("Failed to Delete storage pool id: " + id); - _alertMgr.sendAlert(AlertManager.ALERT_TYPE_STORAGE_DELETE, sPool.getDataCenterId(), sPool.getPodId(), "Unable to delete storage pool id= " + id, - "Delete storage pool command failed. Please check logs."); - } - - if (lock != null && !isLockReleased) { - _storagePoolDao.releaseFromLockTable(lock.getId()); - } - } + DataStoreProvider storeProvider = dataStoreProviderMgr + .getDataStoreProviderById(sPool.getStorageProviderId()); + DataStoreLifeCycle lifeCycle = storeProvider.getLifeCycle(); + lifeCycle.deleteDataStore(id); return false; - - } - - @DB - private boolean deletePoolStats(Long poolId) { - CapacityVO capacity1 = _capacityDao.findByHostIdType(poolId, CapacityVO.CAPACITY_TYPE_STORAGE); - CapacityVO capacity2 = _capacityDao.findByHostIdType(poolId, CapacityVO.CAPACITY_TYPE_STORAGE_ALLOCATED); - Transaction txn = Transaction.currentTxn(); - txn.start(); - if (capacity1 != null) { - _capacityDao.remove(capacity1.getId()); - } - - if (capacity2 != null) { - _capacityDao.remove(capacity2.getId()); - } - - txn.commit(); - return true; - } @Override - public boolean createStoragePool(long hostId, StoragePoolVO pool) { - s_logger.debug("creating pool " + pool.getName() + " on host " + hostId); - if (pool.getPoolType() != StoragePoolType.NetworkFilesystem && pool.getPoolType() != StoragePoolType.Filesystem && pool.getPoolType() != StoragePoolType.IscsiLUN - && pool.getPoolType() != StoragePoolType.Iscsi && pool.getPoolType() != StoragePoolType.VMFS && pool.getPoolType() != StoragePoolType.SharedMountPoint - && pool.getPoolType() != StoragePoolType.PreSetup && pool.getPoolType() != StoragePoolType.OCFS2 && pool.getPoolType() != StoragePoolType.RBD && pool.getPoolType() != StoragePoolType.CLVM) { - s_logger.warn(" Doesn't support storage pool type " + pool.getPoolType()); - return false; - } - CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool); - final Answer answer = _agentMgr.easySend(hostId, cmd); - if (answer != null && answer.getResult()) { - return true; - } else { - _storagePoolDao.expunge(pool.getId()); - String msg = ""; - if (answer != null) { - msg = "Can not create storage pool through host " + hostId + " due to " + answer.getDetails(); - s_logger.warn(msg); - } else { - msg = "Can not create storage pool through host " + hostId + " due to CreateStoragePoolCommand returns null"; - s_logger.warn(msg); - } - throw new CloudRuntimeException(msg); - } - } - - @Override - public boolean delPoolFromHost(long hostId) { - List poolHosts = _poolHostDao.listByHostIdIncludingRemoved(hostId); - for (StoragePoolHostVO poolHost : poolHosts) { - s_logger.debug("Deleting pool " + poolHost.getPoolId() + " from host " + hostId); - _poolHostDao.remove(poolHost.getId()); - } - return true; - } - - public void connectHostToSharedPool(long hostId, StoragePoolVO pool) throws StorageUnavailableException { - assert (pool.getPoolType().isShared()) : "Now, did you actually read the name of this method?"; + public void connectHostToSharedPool(long hostId, long poolId) + throws StorageUnavailableException { + StoragePool pool = (StoragePool)this.dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); + assert (pool.isShared()) : "Now, did you actually read the name of this method?"; s_logger.debug("Adding pool " + pool.getName() + " to host " + hostId); - ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool); - final Answer answer = _agentMgr.easySend(hostId, cmd); - - if (answer == null) { - throw new StorageUnavailableException("Unable to get an answer to the modify storage pool command", pool.getId()); - } - - if (!answer.getResult()) { - String msg = "Add host failed due to ModifyStoragePoolCommand failed" + answer.getDetails(); - _alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg); - throw new StorageUnavailableException("Unable establish connection from storage head to storage pool " + pool.getId() + " due to " + answer.getDetails(), pool.getId()); - } - - assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + pool.getId() + "Host=" + hostId; - ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer; - - StoragePoolHostVO poolHost = _poolHostDao.findByPoolHost(pool.getId(), hostId); - if (poolHost == null) { - poolHost = new StoragePoolHostVO(pool.getId(), hostId, mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/")); - _poolHostDao.persist(poolHost); - } else { - poolHost.setLocalPath(mspAnswer.getPoolInfo().getLocalPath().replaceAll("//", "/")); - } - pool.setAvailableBytes(mspAnswer.getPoolInfo().getAvailableBytes()); - pool.setCapacityBytes(mspAnswer.getPoolInfo().getCapacityBytes()); - _storagePoolDao.update(pool.getId(), pool); - - s_logger.info("Connection established between " + pool + " host + " + hostId); - } - - @Override - public VolumeVO moveVolume(VolumeVO volume, long destPoolDcId, Long destPoolPodId, Long destPoolClusterId, HypervisorType dataDiskHyperType) throws ConcurrentOperationException { - - // Find a destination storage pool with the specified criteria - DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); - DiskProfile dskCh = new DiskProfile(volume.getId(), volume.getVolumeType(), volume.getName(), diskOffering.getId(), diskOffering.getDiskSize(), diskOffering.getTagsArray(), - diskOffering.getUseLocalStorage(), diskOffering.isRecreatable(), null); - dskCh.setHyperType(dataDiskHyperType); - DataCenterVO destPoolDataCenter = _dcDao.findById(destPoolDcId); - HostPodVO destPoolPod = _podDao.findById(destPoolPodId); - StoragePoolVO destPool = findStoragePool(dskCh, destPoolDataCenter, destPoolPod, destPoolClusterId, null, null, new HashSet()); - String secondaryStorageURL = getSecondaryStorageURL(volume.getDataCenterId()); - - if (destPool == null) { - throw new CloudRuntimeException("Failed to find a storage pool with enough capacity to move the volume to."); - } - if (secondaryStorageURL == null) { - throw new CloudRuntimeException("Failed to find secondary storage."); - } - - List vols = new ArrayList(); - vols.add(volume); - migrateVolumes(vols, destPool); - return _volsDao.findById(volume.getId()); - } - - - /* - * Upload the volume to secondary storage. - * - */ - @Override - @DB - @ActionEvent(eventType = EventTypes.EVENT_VOLUME_UPLOAD, eventDescription = "uploading volume", async = true) - public VolumeVO uploadVolume(UploadVolumeCmd cmd) throws ResourceAllocationException{ - Account caller = UserContext.current().getCaller(); - long ownerId = cmd.getEntityOwnerId(); - Long zoneId = cmd.getZoneId(); - String volumeName = cmd.getVolumeName(); - String url = cmd.getUrl(); - String format = cmd.getFormat(); - - validateVolume(caller, ownerId, zoneId, volumeName, url, format); - VolumeVO volume = persistVolume(caller, ownerId, zoneId, volumeName, url, cmd.getFormat()); - _downloadMonitor.downloadVolumeToStorage(volume, zoneId, url, cmd.getChecksum(), ImageFormat.valueOf(format.toUpperCase())); - return volume; - } - - private boolean validateVolume(Account caller, long ownerId, Long zoneId, String volumeName, String url, String format) throws ResourceAllocationException{ - - // permission check - _accountMgr.checkAccess(caller, null, true, _accountMgr.getActiveAccountById(ownerId)); - - // Check that the resource limit for volumes won't be exceeded - _resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(ownerId), ResourceType.volume); - - - // Verify that zone exists - DataCenterVO zone = _dcDao.findById(zoneId); - if (zone == null) { - throw new InvalidParameterValueException("Unable to find zone by id " + zoneId); - } - - // Check if zone is disabled - if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getType())) { - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); - } - - if (url.toLowerCase().contains("file://")) { - throw new InvalidParameterValueException("File:// type urls are currently unsupported"); - } - - ImageFormat imgfmt = ImageFormat.valueOf(format.toUpperCase()); - if (imgfmt == null) { - throw new IllegalArgumentException("Image format is incorrect " + format + ". Supported formats are " + EnumUtils.listValues(ImageFormat.values())); - } - - String userSpecifiedName = volumeName; - if (userSpecifiedName == null) { - userSpecifiedName = getRandomVolumeName(); - } - if((!url.toLowerCase().endsWith("vhd"))&&(!url.toLowerCase().endsWith("vhd.zip")) - &&(!url.toLowerCase().endsWith("vhd.bz2"))&&(!url.toLowerCase().endsWith("vhd.gz")) - &&(!url.toLowerCase().endsWith("qcow2"))&&(!url.toLowerCase().endsWith("qcow2.zip")) - &&(!url.toLowerCase().endsWith("qcow2.bz2"))&&(!url.toLowerCase().endsWith("qcow2.gz")) - &&(!url.toLowerCase().endsWith("ova"))&&(!url.toLowerCase().endsWith("ova.zip")) - &&(!url.toLowerCase().endsWith("ova.bz2"))&&(!url.toLowerCase().endsWith("ova.gz")) - &&(!url.toLowerCase().endsWith("img"))&&(!url.toLowerCase().endsWith("raw"))){ - throw new InvalidParameterValueException("Please specify a valid " + format.toLowerCase()); - } - - if ((format.equalsIgnoreCase("vhd") && (!url.toLowerCase().endsWith(".vhd") && !url.toLowerCase().endsWith("vhd.zip") && !url.toLowerCase().endsWith("vhd.bz2") && !url.toLowerCase().endsWith("vhd.gz") )) - || (format.equalsIgnoreCase("qcow2") && (!url.toLowerCase().endsWith(".qcow2") && !url.toLowerCase().endsWith("qcow2.zip") && !url.toLowerCase().endsWith("qcow2.bz2") && !url.toLowerCase().endsWith("qcow2.gz") )) - || (format.equalsIgnoreCase("ova") && (!url.toLowerCase().endsWith(".ova") && !url.toLowerCase().endsWith("ova.zip") && !url.toLowerCase().endsWith("ova.bz2") && !url.toLowerCase().endsWith("ova.gz"))) - || (format.equalsIgnoreCase("raw") && (!url.toLowerCase().endsWith(".img") && !url.toLowerCase().endsWith("raw")))) { - throw new InvalidParameterValueException("Please specify a valid URL. URL:" + url + " is an invalid for the format " + format.toLowerCase()); - } - validateUrl(url); - - return false; - } - - private String validateUrl(String url){ - try { - URI uri = new URI(url); - if ((uri.getScheme() == null) || (!uri.getScheme().equalsIgnoreCase("http") - && !uri.getScheme().equalsIgnoreCase("https") && !uri.getScheme().equalsIgnoreCase("file"))) { - throw new IllegalArgumentException("Unsupported scheme for url: " + url); - } - - int port = uri.getPort(); - if (!(port == 80 || port == 443 || port == -1)) { - throw new IllegalArgumentException("Only ports 80 and 443 are allowed"); - } - String host = uri.getHost(); - try { - InetAddress hostAddr = InetAddress.getByName(host); - if (hostAddr.isAnyLocalAddress() || hostAddr.isLinkLocalAddress() || hostAddr.isLoopbackAddress() || hostAddr.isMulticastAddress()) { - throw new IllegalArgumentException("Illegal host specified in url"); - } - if (hostAddr instanceof Inet6Address) { - throw new IllegalArgumentException("IPV6 addresses not supported (" + hostAddr.getHostAddress() + ")"); - } - } catch (UnknownHostException uhe) { - throw new IllegalArgumentException("Unable to resolve " + host); - } - - return uri.toString(); - } catch (URISyntaxException e) { - throw new IllegalArgumentException("Invalid URL " + url); - } - - } - - private VolumeVO persistVolume(Account caller, long ownerId, Long zoneId, String volumeName, String url, String format) { - - Transaction txn = Transaction.currentTxn(); - txn.start(); - - VolumeVO volume = new VolumeVO(volumeName, zoneId, -1, -1, -1, new Long(-1), null, null, 0, Volume.Type.DATADISK); - volume.setPoolId(null); - volume.setDataCenterId(zoneId); - volume.setPodId(null); - volume.setAccountId(ownerId); - volume.setDomainId(((caller == null) ? Domain.ROOT_DOMAIN : caller.getDomainId())); - long diskOfferingId = _diskOfferingDao.findByUniqueName("Cloud.com-Custom").getId(); - volume.setDiskOfferingId(diskOfferingId); - //volume.setSize(size); - volume.setInstanceId(null); - volume.setUpdated(new Date()); - volume.setDomainId((caller == null) ? Domain.ROOT_DOMAIN : caller.getDomainId()); - - volume = _volsDao.persist(volume); - try { - stateTransitTo(volume, Event.UploadRequested); - } catch (NoTransitionException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - UserContext.current().setEventDetails("Volume Id: " + volume.getId()); - - // Increment resource count during allocation; if actual creation fails, decrement it - _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.volume); - - txn.commit(); - return volume; - } - - - /* - * Just allocate a volume in the database, don't send the createvolume cmd to hypervisor. The volume will be finally - * created - * only when it's attached to a VM. - */ - @Override - @DB - @ActionEvent(eventType = EventTypes.EVENT_VOLUME_CREATE, eventDescription = "creating volume", create = true) - public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationException { - // FIXME: some of the scheduled event stuff might be missing here... - Account caller = UserContext.current().getCaller(); - - long ownerId = cmd.getEntityOwnerId(); - - // permission check - _accountMgr.checkAccess(caller, null, true, _accountMgr.getActiveAccountById(ownerId)); - - // Check that the resource limit for volumes won't be exceeded - _resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(ownerId), ResourceType.volume); - - Long zoneId = cmd.getZoneId(); - Long diskOfferingId = null; - DiskOfferingVO diskOffering = null; - Long size = null; - - // validate input parameters before creating the volume - if ((cmd.getSnapshotId() == null && cmd.getDiskOfferingId() == null) || (cmd.getSnapshotId() != null && cmd.getDiskOfferingId() != null)) { - throw new InvalidParameterValueException("Either disk Offering Id or snapshot Id must be passed whilst creating volume"); - } - - if (cmd.getSnapshotId() == null) {// create a new volume - - diskOfferingId = cmd.getDiskOfferingId(); - size = cmd.getSize(); - Long sizeInGB = size; - if (size != null) { - if (size > 0) { - size = size * 1024 * 1024 * 1024; // user specify size in GB - } else { - throw new InvalidParameterValueException("Disk size must be larger than 0"); - } - } - - // Check that the the disk offering is specified - diskOffering = _diskOfferingDao.findById(diskOfferingId); - if ((diskOffering == null) || diskOffering.getRemoved() != null || !DiskOfferingVO.Type.Disk.equals(diskOffering.getType())) { - throw new InvalidParameterValueException("Please specify a valid disk offering."); - } - - if (diskOffering.isCustomized()) { - if (size == null) { - throw new InvalidParameterValueException("This disk offering requires a custom size specified"); - } - if ((sizeInGB < _customDiskOfferingMinSize) || (sizeInGB > _customDiskOfferingMaxSize)) { - throw new InvalidParameterValueException("Volume size: " + sizeInGB + "GB is out of allowed range. Max: " + _customDiskOfferingMaxSize + " Min:" + _customDiskOfferingMinSize); - } - } - - if (!diskOffering.isCustomized() && size != null) { - throw new InvalidParameterValueException("This disk offering does not allow custom size"); - } - - if (diskOffering.getDomainId() == null) { - // do nothing as offering is public - } else { - _configMgr.checkDiskOfferingAccess(caller, diskOffering); - } - - if (diskOffering.getDiskSize() > 0) { - size = diskOffering.getDiskSize(); - } - - if (!validateVolumeSizeRange(size)) {// convert size from mb to gb for validation - throw new InvalidParameterValueException("Invalid size for custom volume creation: " + size + " ,max volume size is:" + _maxVolumeSizeInGb); - } - } else { // create volume from snapshot - Long snapshotId = cmd.getSnapshotId(); - SnapshotVO snapshotCheck = _snapshotDao.findById(snapshotId); - if (snapshotCheck == null) { - throw new InvalidParameterValueException("unable to find a snapshot with id " + snapshotId); - } - - if (snapshotCheck.getState() != Snapshot.State.BackedUp) { - throw new InvalidParameterValueException("Snapshot id=" + snapshotId + " is not in " + Snapshot.State.BackedUp + " state yet and can't be used for volume creation"); - } - - diskOfferingId = snapshotCheck.getDiskOfferingId(); - diskOffering = _diskOfferingDao.findById(diskOfferingId); - zoneId = snapshotCheck.getDataCenterId(); - size = snapshotCheck.getSize(); // ; disk offering is used for tags purposes - - // check snapshot permissions - _accountMgr.checkAccess(caller, null, true, snapshotCheck); - - /* - * // bug #11428. Operation not supported if vmware and snapshots parent volume = ROOT - * if(snapshotCheck.getHypervisorType() == HypervisorType.VMware - * && _volumeDao.findByIdIncludingRemoved(snapshotCheck.getVolumeId()).getVolumeType() == Type.ROOT){ - * throw new UnsupportedServiceException("operation not supported, snapshot with id " + snapshotId + - * " is created from ROOT volume"); - * } - * - */ - } - - // Verify that zone exists - DataCenterVO zone = _dcDao.findById(zoneId); - if (zone == null) { - throw new InvalidParameterValueException("Unable to find zone by id " + zoneId); - } - - // Check if zone is disabled - if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getType())) { - throw new PermissionDeniedException("Cannot perform this operation, Zone is currently disabled: " + zoneId); - } - - // If local storage is disabled then creation of volume with local disk offering not allowed - if (!zone.isLocalStorageEnabled() && diskOffering.getUseLocalStorage()) { - throw new InvalidParameterValueException("Zone is not configured to use local storage but volume's disk offering " + diskOffering.getName() + " uses it"); - } - - // Check that there is appropriate primary storage pool in the specified zone - List storagePools = _storagePoolDao.listByDataCenterId(zoneId); - boolean appropriatePoolExists = false; - if (!diskOffering.getUseLocalStorage()) { - for (StoragePoolVO storagePool : storagePools) { - if (storagePool.isShared()) { - appropriatePoolExists = true; - break; - } - } - } else { - for (StoragePoolVO storagePool : storagePools) { - if (storagePool.isLocal()) { - appropriatePoolExists = true; - break; - } - } - } - - // Check that there is at least one host in the specified zone - List hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByType(Host.Type.Routing, zoneId); - if (hosts.isEmpty()) { - throw new InvalidParameterValueException("There is no workable host in data center id " + zoneId + ", please check hosts' agent status and see if they are disabled"); - } - - if (!appropriatePoolExists) { - String storageType = diskOffering.getUseLocalStorage() ? ServiceOffering.StorageType.local.toString() : ServiceOffering.StorageType.shared.toString(); - throw new InvalidParameterValueException("Volume's disk offering uses " + storageType + " storage, please specify a zone that has at least one " + storageType + " primary storage pool."); - } - - String userSpecifiedName = cmd.getVolumeName(); - if (userSpecifiedName == null) { - userSpecifiedName = getRandomVolumeName(); - } - - Transaction txn = Transaction.currentTxn(); - txn.start(); - - VolumeVO volume = new VolumeVO(userSpecifiedName, -1, -1, -1, -1, new Long(-1), null, null, 0, Volume.Type.DATADISK); - volume.setPoolId(null); - volume.setDataCenterId(zoneId); - volume.setPodId(null); - volume.setAccountId(ownerId); - volume.setDomainId(((caller == null) ? Domain.ROOT_DOMAIN : caller.getDomainId())); - volume.setDiskOfferingId(diskOfferingId); - volume.setSize(size); - volume.setInstanceId(null); - volume.setUpdated(new Date()); - volume.setDomainId((caller == null) ? Domain.ROOT_DOMAIN : caller.getDomainId()); - - volume = _volsDao.persist(volume); - if(cmd.getSnapshotId() == null){ - //for volume created from snapshot, create usage event after volume creation - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), - volume.getDataCenterId(), volume.getId(), volume.getName(), diskOfferingId, null, size, - Volume.class.getName(), volume.getUuid()); - } - - UserContext.current().setEventDetails("Volume Id: " + volume.getId()); - - // Increment resource count during allocation; if actual creation fails, decrement it - _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.volume); - - txn.commit(); - - return volume; - } - - @Override - @DB - @ActionEvent(eventType = EventTypes.EVENT_VOLUME_CREATE, eventDescription = "creating volume", async = true) - public VolumeVO createVolume(CreateVolumeCmd cmd) { - VolumeVO volume = _volsDao.findById(cmd.getEntityId()); - boolean created = false; - - try { - if (cmd.getSnapshotId() != null) { - volume = createVolumeFromSnapshot(volume, cmd.getSnapshotId()); - if (volume.getState() == Volume.State.Ready) { - created = true; - } - return volume; - } else { - _volsDao.update(volume.getId(), volume); - created = true; - } - - return _volsDao.findById(volume.getId()); - } finally { - if (!created) { - s_logger.trace("Decrementing volume resource count for account id=" + volume.getAccountId() + " as volume failed to create on the backend"); - _resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.volume); - } - } - } - - @Override - @DB - @ActionEvent(eventType = EventTypes.EVENT_VOLUME_RESIZE, eventDescription = "resizing volume", async = true) - public VolumeVO resizeVolume(ResizeVolumeCmd cmd) { - VolumeVO volume = _volsDao.findById(cmd.getEntityId()); - Long newSize = null; - boolean shrinkOk = cmd.getShrinkOk(); - boolean success = false; - DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); - DiskOfferingVO newDiskOffering = null; - - newDiskOffering = _diskOfferingDao.findById(cmd.getNewDiskOfferingId()); - - /* Volumes with no hypervisor have never been assigned, and can be resized by recreating. - perhaps in the future we can just update the db entry for the volume */ - if(_volsDao.getHypervisorType(volume.getId()) == HypervisorType.None){ - throw new InvalidParameterValueException("Can't resize a volume that has never been attached, not sure which hypervisor type. Recreate volume to resize."); - } - - /* Only works for KVM/Xen/VMware for now */ - if(_volsDao.getHypervisorType(volume.getId()) != HypervisorType.KVM - && _volsDao.getHypervisorType(volume.getId()) != HypervisorType.XenServer - && _volsDao.getHypervisorType(volume.getId()) != HypervisorType.VMware){ - throw new InvalidParameterValueException("Cloudstack currently only supports volumes marked as KVM, XenServer or VMware hypervisor for resize"); - } - - if (volume == null) { - throw new InvalidParameterValueException("No such volume"); - } - - if (volume.getState() != Volume.State.Ready) { - throw new InvalidParameterValueException("Volume should be in ready state before attempting a resize"); - } - - if (!volume.getVolumeType().equals(Volume.Type.DATADISK)) { - throw new InvalidParameterValueException("Can only resize DATA volumes"); - } - - /* figure out whether or not a new disk offering or size parameter is required, get the correct size value */ - if (newDiskOffering == null) { - if (diskOffering.isCustomized()) { - newSize = cmd.getSize(); - - if (newSize == null) { - throw new InvalidParameterValueException("new offering is of custom size, need to specify a size"); - } - - newSize = ( newSize << 30 ); - } else { - throw new InvalidParameterValueException("current offering" + volume.getDiskOfferingId() + " cannot be resized, need to specify a disk offering"); - } - } else { - - if (newDiskOffering.getRemoved() != null || !DiskOfferingVO.Type.Disk.equals(newDiskOffering.getType())) { - throw new InvalidParameterValueException("Disk offering ID is missing or invalid"); - } - - if(diskOffering.getTags() != null) { - if(!newDiskOffering.getTags().equals(diskOffering.getTags())){ - throw new InvalidParameterValueException("Tags on new and old disk offerings must match"); - } - } else if (newDiskOffering.getTags() != null ){ - throw new InvalidParameterValueException("There are no tags on current disk offering, new disk offering needs to have no tags"); - } - - if (newDiskOffering.getDomainId() == null) { - // do nothing as offering is public - } else { - _configMgr.checkDiskOfferingAccess(UserContext.current().getCaller(), newDiskOffering); - } - - if (newDiskOffering.isCustomized()) { - newSize = cmd.getSize(); - - if (newSize == null) { - throw new InvalidParameterValueException("new offering is of custom size, need to specify a size"); - } - - newSize = ( newSize << 30 ); - } else { - newSize = newDiskOffering.getDiskSize(); - } - } - - if (newSize == null) { - throw new InvalidParameterValueException("could not detect a size parameter or fetch one from the diskofferingid parameter"); - } - - if (!validateVolumeSizeRange(newSize)) { - throw new InvalidParameterValueException("Requested size out of range"); - } - - /* does the caller have the authority to act on this volume? */ - _accountMgr.checkAccess(UserContext.current().getCaller(), null, true, volume); - - UserVmVO userVm = _userVmDao.findById(volume.getInstanceId()); - - StoragePool pool = _storagePoolDao.findById(volume.getPoolId()); - long currentSize = volume.getSize(); - - /* lets make certain they (think they) know what they're doing if they - want to shrink, by forcing them to provide the shrinkok parameter. This will - be checked again at the hypervisor level where we can see the actual disk size */ - if (currentSize > newSize && !shrinkOk) { - throw new InvalidParameterValueException("Going from existing size of " + currentSize + " to size of " - + newSize + " would shrink the volume, need to sign off by supplying the shrinkok parameter with value of true"); - } - - /* get a list of hosts to send the commands to, try the system the - associated vm is running on first, then the last known place it ran. - If not attached to a userVm, we pass 'none' and resizevolume.sh is - ok with that since it only needs the vm name to live resize */ - long[] hosts = null; - String instanceName = "none"; - if (userVm != null) { - instanceName = userVm.getInstanceName(); - if(userVm.getHostId() != null) { - hosts = new long[] { userVm.getHostId() }; - } else if(userVm.getLastHostId() != null) { - hosts = new long[] { userVm.getLastHostId() }; - } - - /*Xen only works offline, SR does not support VDI.resizeOnline*/ - if(_volsDao.getHypervisorType(volume.getId()) == HypervisorType.XenServer - && ! userVm.getState().equals(State.Stopped)) { - throw new InvalidParameterValueException("VM must be stopped or disk detached in order to resize with the Xen HV"); - } - } - - try { - try { - stateTransitTo(volume, Volume.Event.ResizeRequested); - } catch (NoTransitionException etrans) { - throw new CloudRuntimeException("Unable to change volume state for resize: " + etrans.toString()); - } - - ResizeVolumeCommand resizeCmd = new ResizeVolumeCommand(volume.getPath(), new StorageFilerTO(pool), - currentSize, newSize, shrinkOk, instanceName); - ResizeVolumeAnswer answer = (ResizeVolumeAnswer) sendToPool(pool, hosts, resizeCmd); - - /* need to fetch/store new volume size in database. This value comes from - hypervisor rather than trusting that a success means we have a volume of the - size we requested */ - if (answer != null && answer.getResult()) { - long finalSize = answer.getNewSize(); - s_logger.debug("Resize: volume started at size " + currentSize + " and ended at size " + finalSize); - volume.setSize(finalSize); - if (newDiskOffering != null) { - volume.setDiskOfferingId(cmd.getNewDiskOfferingId()); - } - _volsDao.update(volume.getId(), volume); - - success = true; - return volume; - } else if (answer != null) { - s_logger.debug("Resize: returned '" + answer.getDetails() + "'"); - } - } catch (StorageUnavailableException e) { - s_logger.debug("volume failed to resize: "+e); - return null; - } finally { - if(success) { - try { - stateTransitTo(volume, Volume.Event.OperationSucceeded); - } catch (NoTransitionException etrans) { - throw new CloudRuntimeException("Failed to change volume state: " + etrans.toString()); - } - } else { - try { - stateTransitTo(volume, Volume.Event.OperationFailed); - } catch (NoTransitionException etrans) { - throw new CloudRuntimeException("Failed to change volume state: " + etrans.toString()); - } - } - } - return null; - } - - @Override - @DB - public boolean destroyVolume(VolumeVO volume) throws ConcurrentOperationException { - try { - if (!stateTransitTo(volume, Volume.Event.DestroyRequested)) { - throw new ConcurrentOperationException("Failed to transit to destroyed state"); - } - } catch (NoTransitionException e) { - s_logger.debug("Unable to destoy the volume: " + e.toString()); - return false; - } - - long volumeId = volume.getId(); - - // Delete the recurring snapshot policies for this volume. - _snapshotMgr.deletePoliciesForVolume(volumeId); - - Long instanceId = volume.getInstanceId(); - VMInstanceVO vmInstance = null; - if (instanceId != null) { - vmInstance = _vmInstanceDao.findById(instanceId); - } - - if (instanceId == null || (vmInstance.getType().equals(VirtualMachine.Type.User))) { - // Decrement the resource count for volumes belonging user VM's only - _resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.volume); - // Log usage event for volumes belonging user VM's only - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_DELETE, volume.getAccountId(), - volume.getDataCenterId(), volume.getId(), volume.getName(), - Volume.class.getName(), volume.getUuid()); - } - - try { - if (!stateTransitTo(volume, Volume.Event.OperationSucceeded)) { - throw new ConcurrentOperationException("Failed to transit state"); - - } - } catch (NoTransitionException e) { - s_logger.debug("Unable to change volume state: " + e.toString()); - return false; - } - - return true; - - } - - @Override - public void createCapacityEntry(StoragePoolVO storagePool) { - createCapacityEntry(storagePool, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, 0); + DataStoreProvider provider = dataStoreProviderMgr + .getDataStoreProviderById(pool.getStorageProviderId()); + HypervisorHostListener listener = hostListeners.get(provider.getUuid()); + listener.hostConnect(hostId, pool.getId()); } @Override @@ -2384,17 +1099,21 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } + cleanupSecondaryStorage(recurring); List vols = _volsDao.listVolumesToBeDestroyed(); for (VolumeVO vol : vols) { try { - expungeVolume(vol, false); + + this.volService.expungeVolumeAsync(this.volFactory.getVolume(vol.getId())); + } catch (Exception e) { s_logger.warn("Unable to destroy " + vol.getId(), e); } } + // remove snapshots in Error state List snapshots = _snapshotDao.listAllByStatus(Snapshot.State.Error); for (SnapshotVO snapshotVO : snapshots) { @@ -2405,11 +1124,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } - } finally { + }finally { scanLock.unlock(); } - } - } finally { + } + }finally { scanLock.releaseRef(); } } @@ -2430,7 +1149,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } return list; } catch (Exception e) { - s_logger.debug("failed to get all volumes who has snapshots in secondary storage " + hostId + " due to " + e.getMessage()); + s_logger.debug("failed to get all volumes who has snapshots in secondary storage " + + hostId + " due to " + e.getMessage()); return null; } @@ -2451,7 +1171,8 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } return list; } catch (Exception e) { - s_logger.debug("failed to get all snapshots for a volume " + volumeId + " due to " + e.getMessage()); + s_logger.debug("failed to get all snapshots for a volume " + + volumeId + " due to " + e.getMessage()); return null; } } @@ -2461,42 +1182,66 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C public void cleanupSecondaryStorage(boolean recurring) { try { // Cleanup templates in secondary storage hosts - List secondaryStorageHosts = _ssvmMgr.listSecondaryStorageHostsInAllZones(); + List secondaryStorageHosts = _ssvmMgr + .listSecondaryStorageHostsInAllZones(); for (HostVO secondaryStorageHost : secondaryStorageHosts) { try { long hostId = secondaryStorageHost.getId(); - List destroyedTemplateHostVOs = _vmTemplateHostDao.listDestroyed(hostId); - s_logger.debug("Secondary storage garbage collector found " + destroyedTemplateHostVOs.size() + " templates to cleanup on secondary storage host: " + List destroyedTemplateHostVOs = _vmTemplateHostDao + .listDestroyed(hostId); + s_logger.debug("Secondary storage garbage collector found " + + destroyedTemplateHostVOs.size() + + " templates to cleanup on secondary storage host: " + secondaryStorageHost.getName()); for (VMTemplateHostVO destroyedTemplateHostVO : destroyedTemplateHostVOs) { - if (!_tmpltMgr.templateIsDeleteable(destroyedTemplateHostVO)) { + if (!_tmpltMgr + .templateIsDeleteable(destroyedTemplateHostVO)) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Not deleting template at: " + destroyedTemplateHostVO); + s_logger.debug("Not deleting template at: " + + destroyedTemplateHostVO); } continue; } if (s_logger.isDebugEnabled()) { - s_logger.debug("Deleting template host: " + destroyedTemplateHostVO); + s_logger.debug("Deleting template host: " + + destroyedTemplateHostVO); } - String installPath = destroyedTemplateHostVO.getInstallPath(); + String installPath = destroyedTemplateHostVO + .getInstallPath(); if (installPath != null) { - Answer answer = _agentMgr.sendToSecStorage(secondaryStorageHost, new DeleteTemplateCommand(secondaryStorageHost.getStorageUrl(), destroyedTemplateHostVO.getInstallPath())); + Answer answer = _agentMgr.sendToSecStorage( + secondaryStorageHost, + new DeleteTemplateCommand( + secondaryStorageHost + .getStorageUrl(), + destroyedTemplateHostVO + .getInstallPath())); if (answer == null || !answer.getResult()) { - s_logger.debug("Failed to delete " + destroyedTemplateHostVO + " due to " + ((answer == null) ? "answer is null" : answer.getDetails())); + s_logger.debug("Failed to delete " + + destroyedTemplateHostVO + + " due to " + + ((answer == null) ? "answer is null" + : answer.getDetails())); } else { - _vmTemplateHostDao.remove(destroyedTemplateHostVO.getId()); - s_logger.debug("Deleted template at: " + destroyedTemplateHostVO.getInstallPath()); + _vmTemplateHostDao + .remove(destroyedTemplateHostVO.getId()); + s_logger.debug("Deleted template at: " + + destroyedTemplateHostVO + .getInstallPath()); } } else { - _vmTemplateHostDao.remove(destroyedTemplateHostVO.getId()); + _vmTemplateHostDao.remove(destroyedTemplateHostVO + .getId()); } } } catch (Exception e) { - s_logger.warn("problem cleaning up templates in secondary storage " + secondaryStorageHost, e); + s_logger.warn( + "problem cleaning up templates in secondary storage " + + secondaryStorageHost, e); } } @@ -2511,9 +1256,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C for (Long volumeId : vIDs) { boolean lock = false; try { - VolumeVO volume = _volsDao.findByIdIncludingRemoved(volumeId); + VolumeVO volume = _volsDao + .findByIdIncludingRemoved(volumeId); if (volume.getRemoved() == null) { - volume = _volsDao.acquireInLockTable(volumeId, 10); + volume = _volsDao.acquireInLockTable(volumeId, + 10); if (volume == null) { continue; } @@ -2523,16 +1270,25 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C if (snapshots == null) { continue; } - CleanupSnapshotBackupCommand cmd = new CleanupSnapshotBackupCommand(secondaryStorageHost.getStorageUrl(), secondaryStorageHost.getDataCenterId(), volume.getAccountId(), - volumeId, snapshots); + CleanupSnapshotBackupCommand cmd = new CleanupSnapshotBackupCommand( + secondaryStorageHost.getStorageUrl(), + secondaryStorageHost.getDataCenterId(), + volume.getAccountId(), volumeId, snapshots); - Answer answer = _agentMgr.sendToSecStorage(secondaryStorageHost, cmd); + Answer answer = _agentMgr.sendToSecStorage( + secondaryStorageHost, cmd); if ((answer == null) || !answer.getResult()) { - String details = "Failed to cleanup snapshots for volume " + volumeId + " due to " + (answer == null ? "null" : answer.getDetails()); + String details = "Failed to cleanup snapshots for volume " + + volumeId + + " due to " + + (answer == null ? "null" : answer + .getDetails()); s_logger.warn(details); } } catch (Exception e1) { - s_logger.warn("problem cleaning up snapshots in secondary storage " + secondaryStorageHost, e1); + s_logger.warn( + "problem cleaning up snapshots in secondary storage " + + secondaryStorageHost, e1); } finally { if (lock) { _volsDao.releaseFromLockTable(volumeId); @@ -2540,40 +1296,63 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } } catch (Exception e2) { - s_logger.warn("problem cleaning up snapshots in secondary storage " + secondaryStorageHost, e2); + s_logger.warn( + "problem cleaning up snapshots in secondary storage " + + secondaryStorageHost, e2); } } - //CleanUp volumes on Secondary Storage. + // CleanUp volumes on Secondary Storage. for (HostVO secondaryStorageHost : secondaryStorageHosts) { try { long hostId = secondaryStorageHost.getId(); - List destroyedVolumeHostVOs = _volumeHostDao.listDestroyed(hostId); - s_logger.debug("Secondary storage garbage collector found " + destroyedVolumeHostVOs.size() + " templates to cleanup on secondary storage host: " + List destroyedVolumeHostVOs = _volumeHostDao + .listDestroyed(hostId); + s_logger.debug("Secondary storage garbage collector found " + + destroyedVolumeHostVOs.size() + + " templates to cleanup on secondary storage host: " + secondaryStorageHost.getName()); for (VolumeHostVO destroyedVolumeHostVO : destroyedVolumeHostVOs) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Deleting volume host: " + destroyedVolumeHostVO); + s_logger.debug("Deleting volume host: " + + destroyedVolumeHostVO); } - String installPath = destroyedVolumeHostVO.getInstallPath(); + String installPath = destroyedVolumeHostVO + .getInstallPath(); if (installPath != null) { - Answer answer = _agentMgr.sendToSecStorage(secondaryStorageHost, new DeleteVolumeCommand(secondaryStorageHost.getStorageUrl(), destroyedVolumeHostVO.getInstallPath())); + Answer answer = _agentMgr.sendToSecStorage( + secondaryStorageHost, + new DeleteVolumeCommand( + secondaryStorageHost + .getStorageUrl(), + destroyedVolumeHostVO + .getInstallPath())); if (answer == null || !answer.getResult()) { - s_logger.debug("Failed to delete " + destroyedVolumeHostVO + " due to " + ((answer == null) ? "answer is null" : answer.getDetails())); + s_logger.debug("Failed to delete " + + destroyedVolumeHostVO + + " due to " + + ((answer == null) ? "answer is null" + : answer.getDetails())); } else { - _volumeHostDao.remove(destroyedVolumeHostVO.getId()); - s_logger.debug("Deleted volume at: " + destroyedVolumeHostVO.getInstallPath()); + _volumeHostDao.remove(destroyedVolumeHostVO + .getId()); + s_logger.debug("Deleted volume at: " + + destroyedVolumeHostVO + .getInstallPath()); } } else { - _volumeHostDao.remove(destroyedVolumeHostVO.getId()); + _volumeHostDao + .remove(destroyedVolumeHostVO.getId()); } } - }catch (Exception e2) { - s_logger.warn("problem cleaning up volumes in secondary storage " + secondaryStorageHost, e2); + } catch (Exception e2) { + s_logger.warn( + "problem cleaning up volumes in secondary storage " + + secondaryStorageHost, e2); } } } catch (Exception e3) { @@ -2585,235 +1364,63 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C public String getPrimaryStorageNameLabel(VolumeVO volume) { Long poolId = volume.getPoolId(); - // poolId is null only if volume is destroyed, which has been checked before. + // poolId is null only if volume is destroyed, which has been checked + // before. assert poolId != null; - StoragePoolVO storagePoolVO = _storagePoolDao.findById(poolId); - assert storagePoolVO != null; - return storagePoolVO.getUuid(); + StoragePoolVO PrimaryDataStoreVO = _storagePoolDao + .findById(poolId); + assert PrimaryDataStoreVO != null; + return PrimaryDataStoreVO.getUuid(); } @Override @DB - public StoragePoolVO preparePrimaryStorageForMaintenance(Long primaryStorageId) throws ResourceUnavailableException, InsufficientCapacityException { + public PrimaryDataStoreInfo preparePrimaryStorageForMaintenance( + Long primaryStorageId) throws ResourceUnavailableException, + InsufficientCapacityException { Long userId = UserContext.current().getCallerUserId(); User user = _userDao.findById(userId); Account account = UserContext.current().getCaller(); + boolean restart = true; StoragePoolVO primaryStorage = null; - try { - // 1. Get the primary storage record and perform validation check - primaryStorage = _storagePoolDao.lockRow(primaryStorageId, true); - if (primaryStorage == null) { - String msg = "Unable to obtain lock on the storage pool record in preparePrimaryStorageForMaintenance()"; - s_logger.error(msg); - throw new ExecutionException(msg); - } - - List spes = _storagePoolDao.listBy(primaryStorage.getDataCenterId(), primaryStorage.getPodId(), primaryStorage.getClusterId()); - for (StoragePoolVO sp : spes) { - if (sp.getStatus() == StoragePoolStatus.PrepareForMaintenance) { - throw new CloudRuntimeException("Only one storage pool in a cluster can be in PrepareForMaintenance mode, " + sp.getId() + " is already in PrepareForMaintenance mode "); - } - } - - if (!primaryStorage.getStatus().equals(StoragePoolStatus.Up) && !primaryStorage.getStatus().equals(StoragePoolStatus.ErrorInMaintenance)) { - throw new InvalidParameterValueException("Primary storage with id " + primaryStorageId + " is not ready for migration, as the status is:" + primaryStorage.getStatus().toString()); - } - - List hosts = _resourceMgr.listHostsInClusterByStatus(primaryStorage.getClusterId(), Status.Up); - if (hosts == null || hosts.size() == 0) { - primaryStorage.setStatus(StoragePoolStatus.Maintenance); - _storagePoolDao.update(primaryStorageId, primaryStorage); - return _storagePoolDao.findById(primaryStorageId); - } else { - // set the pool state to prepare for maintenance - primaryStorage.setStatus(StoragePoolStatus.PrepareForMaintenance); - _storagePoolDao.update(primaryStorageId, primaryStorage); - } - // remove heartbeat - for (HostVO host : hosts) { - ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(false, primaryStorage); - final Answer answer = _agentMgr.easySend(host.getId(), cmd); - if (answer == null || !answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool false failed due to " + ((answer == null) ? "answer null" : answer.getDetails())); - } - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool false secceeded"); - } - } - } - // check to see if other ps exist - // if they do, then we can migrate over the system vms to them - // if they dont, then just stop all vms on this one - List upPools = _storagePoolDao.listByStatusInZone(primaryStorage.getDataCenterId(), StoragePoolStatus.Up); - - if (upPools == null || upPools.size() == 0) { - restart = false; - } - - // 2. Get a list of all the ROOT volumes within this storage pool - List allVolumes = _volsDao.findByPoolId(primaryStorageId); - - // 3. Enqueue to the work queue - for (VolumeVO volume : allVolumes) { - VMInstanceVO vmInstance = _vmInstanceDao.findById(volume.getInstanceId()); - - if (vmInstance == null) { - continue; - } - - // enqueue sp work - if (vmInstance.getState().equals(State.Running) || vmInstance.getState().equals(State.Starting) || vmInstance.getState().equals(State.Stopping)) { - - try { - StoragePoolWorkVO work = new StoragePoolWorkVO(vmInstance.getId(), primaryStorageId, false, false, _serverId); - _storagePoolWorkDao.persist(work); - } catch (Exception e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Work record already exists, re-using by re-setting values"); - } - StoragePoolWorkVO work = _storagePoolWorkDao.findByPoolIdAndVmId(primaryStorageId, vmInstance.getId()); - work.setStartedAfterMaintenance(false); - work.setStoppedForMaintenance(false); - work.setManagementServerId(_serverId); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - - // 4. Process the queue - List pendingWork = _storagePoolWorkDao.listPendingWorkForPrepareForMaintenanceByPoolId(primaryStorageId); - - for (StoragePoolWorkVO work : pendingWork) { - // shut down the running vms - VMInstanceVO vmInstance = _vmInstanceDao.findById(work.getVmId()); - - if (vmInstance == null) { - continue; - } - - // if the instance is of type consoleproxy, call the console proxy - if (vmInstance.getType().equals(VirtualMachine.Type.ConsoleProxy)) { - // call the consoleproxymanager - ConsoleProxyVO consoleProxy = _consoleProxyDao.findById(vmInstance.getId()); - if (!_vmMgr.advanceStop(consoleProxy, true, user, account)) { - String errorMsg = "There was an error stopping the console proxy id: " + vmInstance.getId() + " ,cannot enable storage maintenance"; - s_logger.warn(errorMsg); - throw new CloudRuntimeException(errorMsg); - } else { - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - - if (restart) { - - if (_vmMgr.advanceStart(consoleProxy, null, user, account) == null) { - String errorMsg = "There was an error starting the console proxy id: " + vmInstance.getId() + " on another storage pool, cannot enable primary storage maintenance"; - s_logger.warn(errorMsg); - } else { - // update work status - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - - // if the instance is of type uservm, call the user vm manager - if (vmInstance.getType().equals(VirtualMachine.Type.User)) { - UserVmVO userVm = _userVmDao.findById(vmInstance.getId()); - if (!_vmMgr.advanceStop(userVm, true, user, account)) { - String errorMsg = "There was an error stopping the user vm id: " + vmInstance.getId() + " ,cannot enable storage maintenance"; - s_logger.warn(errorMsg); - throw new CloudRuntimeException(errorMsg); - } else { - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type secondary storage vm, call the secondary storage vm manager - if (vmInstance.getType().equals(VirtualMachine.Type.SecondaryStorageVm)) { - SecondaryStorageVmVO secStrgVm = _secStrgDao.findById(vmInstance.getId()); - if (!_vmMgr.advanceStop(secStrgVm, true, user, account)) { - String errorMsg = "There was an error stopping the ssvm id: " + vmInstance.getId() + " ,cannot enable storage maintenance"; - s_logger.warn(errorMsg); - throw new CloudRuntimeException(errorMsg); - } else { - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - - if (restart) { - if (_vmMgr.advanceStart(secStrgVm, null, user, account) == null) { - String errorMsg = "There was an error starting the ssvm id: " + vmInstance.getId() + " on another storage pool, cannot enable primary storage maintenance"; - s_logger.warn(errorMsg); - } else { - // update work status - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - - // if the instance is of type domain router vm, call the network manager - if (vmInstance.getType().equals(VirtualMachine.Type.DomainRouter)) { - DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); - if (!_vmMgr.advanceStop(domR, true, user, account)) { - String errorMsg = "There was an error stopping the domain router id: " + vmInstance.getId() + " ,cannot enable primary storage maintenance"; - s_logger.warn(errorMsg); - throw new CloudRuntimeException(errorMsg); - } else { - // update work status - work.setStoppedForMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - - if (restart) { - if (_vmMgr.advanceStart(domR, null, user, account) == null) { - String errorMsg = "There was an error starting the domain router id: " + vmInstance.getId() + " on another storage pool, cannot enable primary storage maintenance"; - s_logger.warn(errorMsg); - } else { - // update work status - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - } - } - - // 5. Update the status - primaryStorage.setStatus(StoragePoolStatus.Maintenance); - _storagePoolDao.update(primaryStorageId, primaryStorage); - - return _storagePoolDao.findById(primaryStorageId); - } catch (Exception e) { - if (e instanceof ExecutionException || e instanceof ResourceUnavailableException) { - s_logger.error("Exception in enabling primary storage maintenance:", e); - setPoolStateToError(primaryStorage); - throw (ResourceUnavailableException) e; - } - if (e instanceof InvalidParameterValueException) { - s_logger.error("Exception in enabling primary storage maintenance:", e); - setPoolStateToError(primaryStorage); - throw (InvalidParameterValueException) e; - } - if (e instanceof InsufficientCapacityException) { - s_logger.error("Exception in enabling primary storage maintenance:", e); - setPoolStateToError(primaryStorage); - throw (InsufficientCapacityException) e; - } - // for everything else - s_logger.error("Exception in enabling primary storage maintenance:", e); - setPoolStateToError(primaryStorage); - throw new CloudRuntimeException(e.getMessage()); + primaryStorage = _storagePoolDao.findById(primaryStorageId); + if (primaryStorage == null) { + String msg = "Unable to obtain lock on the storage pool record in preparePrimaryStorageForMaintenance()"; + s_logger.error(msg); + throw new InvalidParameterValueException(msg); } + + List spes = _storagePoolDao.listBy( + primaryStorage.getDataCenterId(), primaryStorage.getPodId(), + primaryStorage.getClusterId(), ScopeType.CLUSTER); + for (StoragePoolVO sp : spes) { + if (sp.getStatus() == StoragePoolStatus.PrepareForMaintenance) { + throw new CloudRuntimeException( + "Only one storage pool in a cluster can be in PrepareForMaintenance mode, " + + sp.getId() + + " is already in PrepareForMaintenance mode "); + } + } + + if (!primaryStorage.getStatus().equals(DataStoreStatus.Up) + && !primaryStorage.getStatus().equals( + DataStoreStatus.ErrorInMaintenance)) { + throw new InvalidParameterValueException("Primary storage with id " + + primaryStorageId + + " is not ready for migration, as the status is:" + + primaryStorage.getStatus().toString()); + } + + DataStoreProvider provider = dataStoreProviderMgr + .getDataStoreProviderById(primaryStorage.getStorageProviderId()); + DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); + lifeCycle.maintain(primaryStorage.getId()); + + return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore( + primaryStorage.getId(), DataStoreRole.Primary); } private void setPoolStateToError(StoragePoolVO primaryStorage) { @@ -2823,156 +1430,46 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override @DB - public StoragePoolVO cancelPrimaryStorageForMaintenance(CancelPrimaryStorageMaintenanceCmd cmd) throws ResourceUnavailableException { + public PrimaryDataStoreInfo cancelPrimaryStorageForMaintenance( + CancelPrimaryStorageMaintenanceCmd cmd) + throws ResourceUnavailableException { Long primaryStorageId = cmd.getId(); Long userId = UserContext.current().getCallerUserId(); User user = _userDao.findById(userId); Account account = UserContext.current().getCaller(); StoragePoolVO primaryStorage = null; - try { - Transaction txn = Transaction.currentTxn(); - txn.start(); - // 1. Get the primary storage record and perform validation check - primaryStorage = _storagePoolDao.lockRow(primaryStorageId, true); - if (primaryStorage == null) { - String msg = "Unable to obtain lock on the storage pool in cancelPrimaryStorageForMaintenance()"; - s_logger.error(msg); - throw new ExecutionException(msg); - } + primaryStorage = _storagePoolDao.findById(primaryStorageId); - if (primaryStorage.getStatus().equals(StoragePoolStatus.Up) || primaryStorage.getStatus().equals(StoragePoolStatus.PrepareForMaintenance)) { - throw new StorageUnavailableException("Primary storage with id " + primaryStorageId + " is not ready to complete migration, as the status is:" + primaryStorage.getStatus().toString(), - primaryStorageId); - } - - // Change the storage state back to up - primaryStorage.setStatus(StoragePoolStatus.Up); - _storagePoolDao.update(primaryStorageId, primaryStorage); - txn.commit(); - List hosts = _resourceMgr.listHostsInClusterByStatus(primaryStorage.getClusterId(), Status.Up); - if (hosts == null || hosts.size() == 0) { - return _storagePoolDao.findById(primaryStorageId); - } - // add heartbeat - for (HostVO host : hosts) { - ModifyStoragePoolCommand msPoolCmd = new ModifyStoragePoolCommand(true, primaryStorage); - final Answer answer = _agentMgr.easySend(host.getId(), msPoolCmd); - if (answer == null || !answer.getResult()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool add failed due to " + ((answer == null) ? "answer null" : answer.getDetails())); - } - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("ModifyStoragePool add secceeded"); - } - } - } - - // 2. Get a list of pending work for this queue - List pendingWork = _storagePoolWorkDao.listPendingWorkForCancelMaintenanceByPoolId(primaryStorageId); - - // 3. work through the queue - for (StoragePoolWorkVO work : pendingWork) { - - VMInstanceVO vmInstance = _vmInstanceDao.findById(work.getVmId()); - - if (vmInstance == null) { - continue; - } - - // if the instance is of type consoleproxy, call the console proxy - if (vmInstance.getType().equals(VirtualMachine.Type.ConsoleProxy)) { - - ConsoleProxyVO consoleProxy = _consoleProxyDao.findById(vmInstance.getId()); - if (_vmMgr.advanceStart(consoleProxy, null, user, account) == null) { - String msg = "There was an error starting the console proxy id: " + vmInstance.getId() + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg); - throw new ExecutionException(msg); - } else { - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type ssvm, call the ssvm manager - if (vmInstance.getType().equals(VirtualMachine.Type.SecondaryStorageVm)) { - SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance.getId()); - if (_vmMgr.advanceStart(ssVm, null, user, account) == null) { - String msg = "There was an error starting the ssvm id: " + vmInstance.getId() + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg); - throw new ExecutionException(msg); - } else { - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type ssvm, call the ssvm manager - if (vmInstance.getType().equals(VirtualMachine.Type.DomainRouter)) { - DomainRouterVO domR = _domrDao.findById(vmInstance.getId()); - if (_vmMgr.advanceStart(domR, null, user, account) == null) { - String msg = "There was an error starting the domR id: " + vmInstance.getId() + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg); - throw new ExecutionException(msg); - } else { - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } - - // if the instance is of type user vm, call the user vm manager - if (vmInstance.getType().equals(VirtualMachine.Type.User)) { - UserVmVO userVm = _userVmDao.findById(vmInstance.getId()); - try { - if (_vmMgr.advanceStart(userVm, null, user, account) == null) { - - String msg = "There was an error starting the user vm id: " + vmInstance.getId() + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg); - throw new ExecutionException(msg); - } else { - // update work queue - work.setStartedAfterMaintenance(true); - _storagePoolWorkDao.update(work.getId(), work); - } - } catch (StorageUnavailableException e) { - String msg = "There was an error starting the user vm id: " + vmInstance.getId() + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg, e); - throw new ExecutionException(msg); - } catch (InsufficientCapacityException e) { - String msg = "There was an error starting the user vm id: " + vmInstance.getId() + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg, e); - throw new ExecutionException(msg); - } catch (ConcurrentOperationException e) { - String msg = "There was an error starting the user vm id: " + vmInstance.getId() + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg, e); - throw new ExecutionException(msg); - } catch (ExecutionException e) { - String msg = "There was an error starting the user vm id: " + vmInstance.getId() + " on storage pool, cannot complete primary storage maintenance"; - s_logger.warn(msg, e); - throw new ExecutionException(msg); - } - } - } - return primaryStorage; - } catch (Exception e) { - setPoolStateToError(primaryStorage); - if (e instanceof ExecutionException) { - throw (ResourceUnavailableException) e; - } else if (e instanceof InvalidParameterValueException) { - throw (InvalidParameterValueException) e; - } else {// all other exceptions - throw new CloudRuntimeException(e.getMessage()); - } + if (primaryStorage == null) { + String msg = "Unable to obtain lock on the storage pool in cancelPrimaryStorageForMaintenance()"; + s_logger.error(msg); + throw new InvalidParameterValueException(msg); } + + if (primaryStorage.getStatus().equals(DataStoreStatus.Up) + || primaryStorage.getStatus().equals( + DataStoreStatus.PrepareForMaintenance)) { + throw new StorageUnavailableException("Primary storage with id " + + primaryStorageId + + " is not ready to complete migration, as the status is:" + + primaryStorage.getStatus().toString(), primaryStorageId); + } + + DataStoreProvider provider = dataStoreProviderMgr + .getDataStoreProviderById(primaryStorage.getStorageProviderId()); + DataStoreLifeCycle lifeCycle = provider.getLifeCycle(); + lifeCycle.cancelMaintain(primaryStorage.getId()); + return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore( + primaryStorage.getId(), DataStoreRole.Primary); } - private boolean sendToVmResidesOn(StoragePoolVO storagePool, Command cmd) { - ClusterVO cluster = _clusterDao.findById(storagePool.getClusterId()); - if ((cluster.getHypervisorType() == HypervisorType.KVM || cluster.getHypervisorType() == HypervisorType.VMware) + private boolean sendToVmResidesOn(StoragePoolVO PrimaryDataStoreVO, + Command cmd) { + ClusterVO cluster = _clusterDao.findById(PrimaryDataStoreVO + .getClusterId()); + if ((cluster.getHypervisorType() == HypervisorType.KVM || cluster + .getHypervisorType() == HypervisorType.VMware) && ((cmd instanceof ManageSnapshotCommand) || (cmd instanceof BackupSnapshotCommand))) { return true; } else { @@ -2980,754 +1477,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } - @Override - @DB - @ActionEvent(eventType = EventTypes.EVENT_VOLUME_DELETE, eventDescription = "deleting volume") - public boolean deleteVolume(long volumeId, Account caller) throws ConcurrentOperationException { - - // Check that the volume ID is valid - VolumeVO volume = _volsDao.findById(volumeId); - if (volume == null) { - throw new InvalidParameterValueException("Unable to aquire volume with ID: " + volumeId); - } - - if (!_snapshotMgr.canOperateOnVolume(volume)) { - throw new InvalidParameterValueException("There are snapshot creating on it, Unable to delete the volume"); - } - - // permission check - _accountMgr.checkAccess(caller, null, true, volume); - - // Check that the volume is not currently attached to any VM - if (volume.getInstanceId() != null) { - throw new InvalidParameterValueException("Please specify a volume that is not attached to any VM."); - } - - // Check that volume is completely Uploaded - if (volume.getState() == Volume.State.UploadOp){ - VolumeHostVO volumeHost = _volumeHostDao.findByVolumeId(volume.getId()); - if (volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS){ - throw new InvalidParameterValueException("Please specify a volume that is not uploading"); - } - } - - // Check that the volume is not already destroyed - if (volume.getState() != Volume.State.Destroy) { - if (!destroyVolume(volume)) { - return false; - } - } - - try { - expungeVolume(volume, false); - } catch (Exception e) { - s_logger.warn("Failed to expunge volume:", e); - return false; - } - - return true; - } - - private boolean validateVolumeSizeRange(long size) { - if (size < 0 || (size > 0 && size < (1024 * 1024 * 1024))) { - throw new InvalidParameterValueException("Please specify a size of at least 1 Gb."); - } else if (size > (_maxVolumeSizeInGb * 1024 * 1024 * 1024)) { - throw new InvalidParameterValueException("volume size " + size + ", but the maximum size allowed is " + _maxVolumeSizeInGb + " Gb."); - } - - return true; - } - - protected DiskProfile toDiskProfile(VolumeVO vol, DiskOfferingVO offering) { - return new DiskProfile(vol.getId(), vol.getVolumeType(), vol.getName(), offering.getId(), vol.getSize(), offering.getTagsArray(), offering.getUseLocalStorage(), offering.isRecreatable(), - vol.getTemplateId()); - } - - @Override - public DiskProfile allocateRawVolume(Type type, String name, DiskOfferingVO offering, Long size, T vm, Account owner) { - if (size == null) { - size = offering.getDiskSize(); - } else { - size = (size * 1024 * 1024 * 1024); - } - VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), owner.getDomainId(), owner.getId(), offering.getId(), size); - if (vm != null) { - vol.setInstanceId(vm.getId()); - } - - if (type.equals(Type.ROOT)) { - vol.setDeviceId(0l); - } else { - vol.setDeviceId(1l); - } - - vol = _volsDao.persist(vol); - - // Save usage event and update resource count for user vm volumes - if (vm instanceof UserVm) { - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), - vol.getDataCenterId(), vol.getId(), vol.getName(), offering.getId(), null, size, - Volume.class.getName(), vol.getUuid()); - _resourceLimitMgr.incrementResourceCount(vm.getAccountId(), ResourceType.volume); - } - return toDiskProfile(vol, offering); - } - - @Override - public DiskProfile allocateTemplatedVolume(Type type, String name, DiskOfferingVO offering, VMTemplateVO template, T vm, Account owner) { - assert (template.getFormat() != ImageFormat.ISO) : "ISO is not a template really...."; - - SearchCriteria sc = HostTemplateStatesSearch.create(); - sc.setParameters("id", template.getId()); - sc.setParameters("state", com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOADED); - sc.setJoinParameters("host", "dcId", vm.getDataCenterId()); - List tsvs = _vmTemplateSwiftDao.listByTemplateId(template.getId()); - Long size = null; - if (tsvs != null && tsvs.size() > 0) { - size = tsvs.get(0).getSize(); - } - - if (size == null && _s3Mgr.isS3Enabled()) { - VMTemplateS3VO vmTemplateS3VO = _vmTemplateS3Dao.findOneByTemplateId(template.getId()); - if (vmTemplateS3VO != null) { - size = vmTemplateS3VO.getSize(); - } - } - - if (size == null) { - List sss = _vmTemplateHostDao.search(sc, null); - if (sss == null || sss.size() == 0) { - throw new CloudRuntimeException("Template " + template.getName() + " has not been completely downloaded to zone " + vm.getDataCenterId()); - } - size = sss.get(0).getSize(); - } - - VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), owner.getDomainId(), owner.getId(), offering.getId(), size); - if (vm != null) { - vol.setInstanceId(vm.getId()); - } - vol.setTemplateId(template.getId()); - - if (type.equals(Type.ROOT)) { - vol.setDeviceId(0l); - if (!vm.getType().equals(VirtualMachine.Type.User)) { - vol.setRecreatable(true); - } - } else { - vol.setDeviceId(1l); - } - - vol = _volsDao.persist(vol); - - // Create event and update resource count for volumes if vm is a user vm - if (vm instanceof UserVm) { - - Long offeringId = null; - - if (offering.getType() == DiskOfferingVO.Type.Disk) { - offeringId = offering.getId(); - } - - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), - vol.getDataCenterId(), vol.getId(), vol.getName(), offeringId, template.getId(), - vol.getSize(), Volume.class.getName(), vol.getUuid()); - - _resourceLimitMgr.incrementResourceCount(vm.getAccountId(), ResourceType.volume); - } - return toDiskProfile(vol, offering); - } - - @Override - public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest) { - List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Preparing " + vols.size() + " volumes for " + vm); - } - - for (VolumeVO vol : vols) { - StoragePool pool = _storagePoolDao.findById(vol.getPoolId()); - vm.addDisk(new VolumeTO(vol, pool)); - } - - if (vm.getType() == VirtualMachine.Type.User) { - UserVmVO userVM = (UserVmVO) vm.getVirtualMachine(); - if (userVM.getIsoId() != null) { - Pair isoPathPair = getAbsoluteIsoPath(userVM.getIsoId(), userVM.getDataCenterId()); - if (isoPathPair != null) { - String isoPath = isoPathPair.first(); - VolumeTO iso = new VolumeTO(vm.getId(), Volume.Type.ISO, StoragePoolType.ISO, null, null, null, isoPath, 0, null, null); - vm.addDisk(iso); - } - } - } - } - - @DB - @Override - public Volume migrateVolume(Long volumeId, Long storagePoolId) throws ConcurrentOperationException { - VolumeVO vol = _volsDao.findById(volumeId); - if (vol == null) { - throw new InvalidParameterValueException("Failed to find the volume id: " + volumeId); - } - - if (vol.getState() != Volume.State.Ready) { - throw new InvalidParameterValueException("Volume must be in ready state"); - } - - if (vol.getInstanceId() != null) { - throw new InvalidParameterValueException("Volume needs to be dettached from VM"); - } - - StoragePool destPool = _storagePoolDao.findById(storagePoolId); - if (destPool == null) { - throw new InvalidParameterValueException("Failed to find the destination storage pool: " + storagePoolId); - } - - if (!volumeOnSharedStoragePool(vol)) { - throw new InvalidParameterValueException("Migration of volume from local storage pool is not supported"); - } - - List vols = new ArrayList(); - vols.add(vol); - - migrateVolumes(vols, destPool); - return vol; - } - - @DB - public boolean migrateVolumes(List volumes, StoragePool destPool) throws ConcurrentOperationException { - Transaction txn = Transaction.currentTxn(); - txn.start(); - - boolean transitResult = false; - long checkPointTaskId = -1; - try { - List volIds = new ArrayList(); - for (Volume volume : volumes) { - if (!_snapshotMgr.canOperateOnVolume((VolumeVO) volume)) { - throw new CloudRuntimeException("There are snapshots creating on this volume, can not move this volume"); - } - - try { - if (!stateTransitTo(volume, Volume.Event.MigrationRequested)) { - throw new ConcurrentOperationException("Failed to transit volume state"); - } - } catch (NoTransitionException e) { - s_logger.debug("Failed to set state into migrate: " + e.toString()); - throw new CloudRuntimeException("Failed to set state into migrate: " + e.toString()); - } - volIds.add(volume.getId()); - } - - transitResult = true; - } finally { - if (!transitResult) { - txn.rollback(); - } else { - txn.commit(); - } - } - - // At this stage, nobody can modify volumes. Send the copyvolume command - List> destroyCmds = new ArrayList>(); - List answers = new ArrayList(); - try { - for (Volume volume : volumes) { - String secondaryStorageURL = getSecondaryStorageURL(volume.getDataCenterId()); - StoragePoolVO srcPool = _storagePoolDao.findById(volume.getPoolId()); - CopyVolumeCommand cvCmd = new CopyVolumeCommand(volume.getId(), volume.getPath(), srcPool, secondaryStorageURL, true, _copyvolumewait); - CopyVolumeAnswer cvAnswer; - try { - cvAnswer = (CopyVolumeAnswer) sendToPool(srcPool, cvCmd); - } catch (StorageUnavailableException e1) { - throw new CloudRuntimeException("Failed to copy the volume from the source primary storage pool to secondary storage.", e1); - } - - if (cvAnswer == null || !cvAnswer.getResult()) { - throw new CloudRuntimeException("Failed to copy the volume from the source primary storage pool to secondary storage."); - } - - String secondaryStorageVolumePath = cvAnswer.getVolumePath(); - - // Copy the volume from secondary storage to the destination storage - // pool - cvCmd = new CopyVolumeCommand(volume.getId(), secondaryStorageVolumePath, destPool, secondaryStorageURL, false, _copyvolumewait); - try { - cvAnswer = (CopyVolumeAnswer) sendToPool(destPool, cvCmd); - } catch (StorageUnavailableException e1) { - throw new CloudRuntimeException("Failed to copy the volume from secondary storage to the destination primary storage pool."); - } - - if (cvAnswer == null || !cvAnswer.getResult()) { - throw new CloudRuntimeException("Failed to copy the volume from secondary storage to the destination primary storage pool."); - } - - answers.add(cvAnswer); - destroyCmds.add(new Pair(srcPool, new DestroyCommand(srcPool, volume, null))); - } - } finally { - if (answers.size() != volumes.size()) { - // this means one of copying volume failed - for (Volume volume : volumes) { - try { - stateTransitTo(volume, Volume.Event.OperationFailed); - } catch (NoTransitionException e) { - s_logger.debug("Failed to change volume state: " + e.toString()); - } - } - } else { - // Need a transaction, make sure all the volumes get migrated to new storage pool - txn = Transaction.currentTxn(); - txn.start(); - - transitResult = false; - try { - for (int i = 0; i < volumes.size(); i++) { - CopyVolumeAnswer answer = answers.get(i); - VolumeVO volume = (VolumeVO) volumes.get(i); - Long oldPoolId = volume.getPoolId(); - volume.setPath(answer.getVolumePath()); - volume.setFolder(destPool.getPath()); - volume.setPodId(destPool.getPodId()); - volume.setPoolId(destPool.getId()); - volume.setLastPoolId(oldPoolId); - volume.setPodId(destPool.getPodId()); - try { - stateTransitTo(volume, Volume.Event.OperationSucceeded); - } catch (NoTransitionException e) { - s_logger.debug("Failed to change volume state: " + e.toString()); - throw new CloudRuntimeException("Failed to change volume state: " + e.toString()); - } - } - transitResult = true; - } finally { - if (!transitResult) { - txn.rollback(); - } else { - txn.commit(); - } - } - - } - } - - // all the volumes get migrated to new storage pool, need to delete the copy on old storage pool - for (Pair cmd : destroyCmds) { - try { - Answer cvAnswer = sendToPool(cmd.first(), cmd.second()); - } catch (StorageUnavailableException e) { - s_logger.debug("Unable to delete the old copy on storage pool: " + e.toString()); - } - } - return true; - } - - @Override - public boolean StorageMigration(VirtualMachineProfile vm, StoragePool destPool) throws ConcurrentOperationException { - List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); - List volumesNeedToMigrate = new ArrayList(); - - for (VolumeVO volume : vols) { - if (volume.getState() != Volume.State.Ready) { - s_logger.debug("volume: " + volume.getId() + " is in " + volume.getState() + " state"); - throw new CloudRuntimeException("volume: " + volume.getId() + " is in " + volume.getState() + " state"); - } - - if (volume.getPoolId() == destPool.getId()) { - s_logger.debug("volume: " + volume.getId() + " is on the same storage pool: " + destPool.getId()); - continue; - } - - volumesNeedToMigrate.add(volume); - } - - if (volumesNeedToMigrate.isEmpty()) { - s_logger.debug("No volume need to be migrated"); - return true; - } - - return migrateVolumes(volumesNeedToMigrate, destPool); - } - - @Override - public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException { - - if (dest == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("DeployDestination cannot be null, cannot prepare Volumes for the vm: " + vm); - } - throw new CloudRuntimeException("Unable to prepare Volume for vm because DeployDestination is null, vm:" + vm); - } - List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking if we need to prepare " + vols.size() + " volumes for " + vm); - } - - boolean recreate = _recreateSystemVmEnabled; - - List recreateVols = new ArrayList(vols.size()); - - for (VolumeVO vol : vols) { - StoragePool assignedPool = null; - if (dest.getStorageForDisks() != null) { - assignedPool = dest.getStorageForDisks().get(vol); - } - if (assignedPool == null && recreate) { - assignedPool = _storagePoolDao.findById(vol.getPoolId()); - - } - if (assignedPool != null || recreate) { - Volume.State state = vol.getState(); - if (state == Volume.State.Allocated || state == Volume.State.Creating) { - recreateVols.add(vol); - } else { - if (vol.isRecreatable()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Volume " + vol + " will be recreated on storage pool " + assignedPool + " assigned by deploymentPlanner"); - } - recreateVols.add(vol); - } else { - if (assignedPool.getId() != vol.getPoolId()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Mismatch in storage pool " + assignedPool + " assigned by deploymentPlanner and the one associated with volume " + vol); - } - DiskOfferingVO diskOffering = _diskOfferingDao.findById(vol.getDiskOfferingId()); - if (diskOffering.getUseLocalStorage()) - { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Local volume " + vol + " will be recreated on storage pool " + assignedPool + " assigned by deploymentPlanner"); - } - recreateVols.add(vol); - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Shared volume " + vol + " will be migrated on storage pool " + assignedPool + " assigned by deploymentPlanner"); - } - try { - List volumesToMigrate = new ArrayList(); - volumesToMigrate.add(vol); - migrateVolumes(volumesToMigrate, assignedPool); - vm.addDisk(new VolumeTO(vol, assignedPool)); - } catch (ConcurrentOperationException e) { - throw new CloudRuntimeException("Migration of volume " + vol + " to storage pool " + assignedPool + " failed", e); - } - } - } else { - StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId()); - vm.addDisk(new VolumeTO(vol, pool)); - } - - } - } - } else { - if (vol.getPoolId() == null) { - throw new StorageUnavailableException("Volume has no pool associate and also no storage pool assigned in DeployDestination, Unable to create " + vol, Volume.class, vol.getId()); - } - if (s_logger.isDebugEnabled()) { - s_logger.debug("No need to recreate the volume: " + vol + ", since it already has a pool assigned: " + vol.getPoolId() + ", adding disk to VM"); - } - StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId()); - vm.addDisk(new VolumeTO(vol, pool)); - } - } - - for (VolumeVO vol : recreateVols) { - VolumeVO newVol; - StoragePool existingPool = null; - if (recreate && (dest.getStorageForDisks() == null || dest.getStorageForDisks().get(vol) == null)) { - existingPool = _storagePoolDao.findById(vol.getPoolId()); - s_logger.debug("existing pool: " + existingPool.getId()); - } - - if (vol.getState() == Volume.State.Allocated || vol.getState() == Volume.State.Creating) { - newVol = vol; - } else { - newVol = switchVolume(vol, vm); - // update the volume->storagePool map since volumeId has changed - if (dest.getStorageForDisks() != null && dest.getStorageForDisks().containsKey(vol)) { - StoragePool poolWithOldVol = dest.getStorageForDisks().get(vol); - dest.getStorageForDisks().put(newVol, poolWithOldVol); - dest.getStorageForDisks().remove(vol); - } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Created new volume " + newVol + " for old volume " + vol); - } - } - - try { - stateTransitTo(newVol, Volume.Event.CreateRequested); - } catch (NoTransitionException e) { - throw new CloudRuntimeException("Unable to create " + e.toString()); - } - - Pair created = createVolume(newVol, _diskOfferingDao.findById(newVol.getDiskOfferingId()), vm, vols, dest, existingPool); - - if (created == null) { - Long poolId = newVol.getPoolId(); - newVol.setPoolId(null); - try { - stateTransitTo(newVol, Volume.Event.OperationFailed); - } catch (NoTransitionException e) { - throw new CloudRuntimeException("Unable to update the failure on a volume: " + newVol, e); - } - throw new StorageUnavailableException("Unable to create " + newVol, poolId == null ? -1L : poolId); - } - created.first().setDeviceId(newVol.getDeviceId().intValue()); - newVol.setFolder(created.second().getPath()); - newVol.setPath(created.first().getPath()); - newVol.setSize(created.first().getSize()); - newVol.setPoolType(created.second().getPoolType()); - newVol.setPodId(created.second().getPodId()); - try { - stateTransitTo(newVol, Volume.Event.OperationSucceeded); - } catch (NoTransitionException e) { - throw new CloudRuntimeException("Unable to update an CREATE operation succeeded on volume " + newVol, e); - } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Volume " + newVol + " is created on " + created.second()); - } - - vm.addDisk(created.first()); - } - } - - @DB - protected VolumeVO switchVolume(VolumeVO existingVolume, VirtualMachineProfile vm) throws StorageUnavailableException { - Transaction txn = Transaction.currentTxn(); - - Long templateIdToUse = null; - Long volTemplateId = existingVolume.getTemplateId(); - long vmTemplateId = vm.getTemplateId(); - if (volTemplateId != null && volTemplateId.longValue() != vmTemplateId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("switchVolume: Old Volume's templateId: " + volTemplateId + " does not match the VM's templateId: " + vmTemplateId + ", updating templateId in the new Volume"); - } - templateIdToUse = vmTemplateId; - } - - txn.start(); - VolumeVO newVolume = allocateDuplicateVolume(existingVolume, templateIdToUse); - // In case of Vmware if vm reference is not removed then during root disk cleanup - // the vm also gets deleted, so remove the reference - if (vm.getHypervisorType() == HypervisorType.VMware) { - _volsDao.detachVolume(existingVolume.getId()); - } - try { - stateTransitTo(existingVolume, Volume.Event.DestroyRequested); - } catch (NoTransitionException e) { - s_logger.debug("Unable to destroy existing volume: " + e.toString()); - } - txn.commit(); - return newVolume; - - } - - public Pair createVolume(VolumeVO toBeCreated, DiskOfferingVO offering, VirtualMachineProfile vm, List alreadyCreated, - DeployDestination dest, StoragePool sPool) throws StorageUnavailableException { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Creating volume: " + toBeCreated); - } - DiskProfile diskProfile = new DiskProfile(toBeCreated, offering, vm.getHypervisorType()); - - VMTemplateVO template = null; - if (toBeCreated.getTemplateId() != null) { - template = _templateDao.findById(toBeCreated.getTemplateId()); - } - - StoragePool pool = null; - if (sPool != null) { - pool = sPool; - } else { - pool = dest.getStorageForDisks().get(toBeCreated); - } - - if (pool != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Trying to create in " + pool); - } - toBeCreated.setPoolId(pool.getId()); - try { - stateTransitTo(toBeCreated, Volume.Event.OperationRetry); - } catch (NoTransitionException e) { - throw new CloudRuntimeException("Unable to retry a create operation on volume " + toBeCreated); - } - - CreateCommand cmd = null; - VMTemplateStoragePoolVO tmpltStoredOn = null; - - for (int i = 0; i < 2; i++) { - if (template != null && template.getFormat() != Storage.ImageFormat.ISO) { - if (pool.getPoolType() == StoragePoolType.CLVM) { - //prepareISOForCreate does what we need, which is to tell us where the template is - VMTemplateHostVO tmpltHostOn = _tmpltMgr.prepareISOForCreate(template, pool); - if (tmpltHostOn == null) { - s_logger.debug("cannot find template " + template.getId() + " " + template.getName()); - return null; - } - HostVO secondaryStorageHost = _hostDao.findById(tmpltHostOn.getHostId()); - String tmpltHostUrl = secondaryStorageHost.getStorageUrl(); - String fullTmpltUrl = tmpltHostUrl + "/" + tmpltHostOn.getInstallPath(); - cmd = new CreateCommand(diskProfile, fullTmpltUrl, new StorageFilerTO(pool)); - } else { - tmpltStoredOn = _tmpltMgr.prepareTemplateForCreate(template, pool); - if (tmpltStoredOn == null) { - s_logger.debug("Cannot use this pool " + pool + " because we can't propagate template " + template); - return null; - } - cmd = new CreateCommand(diskProfile, tmpltStoredOn.getLocalDownloadPath(), new StorageFilerTO(pool)); - } - } else { - if (template != null && Storage.ImageFormat.ISO == template.getFormat()) { - VMTemplateHostVO tmpltHostOn = _tmpltMgr.prepareISOForCreate(template, pool); - if (tmpltHostOn == null) { - throw new CloudRuntimeException("Did not find ISO in secondry storage in zone " + pool.getDataCenterId()); - } - } - cmd = new CreateCommand(diskProfile, new StorageFilerTO(pool)); - } - long[] hostIdsToTryFirst = { dest.getHost().getId() }; - Answer answer = sendToPool(pool, hostIdsToTryFirst, cmd); - if (answer.getResult()) { - CreateAnswer createAnswer = (CreateAnswer) answer; - return new Pair(createAnswer.getVolume(), pool); - } else { - if (tmpltStoredOn != null && (answer instanceof CreateAnswer) && ((CreateAnswer) answer).templateReloadRequested()) { - if (!_tmpltMgr.resetTemplateDownloadStateOnPool(tmpltStoredOn.getId())) { - break; // break out of template-redeploy retry loop - } - } else { - break; - } - } - } - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to create volume " + toBeCreated); - } - return null; - } - - @Override - public void release(VirtualMachineProfile profile) { - // add code here - } - - public void expungeVolume(VolumeVO vol, boolean force) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Expunging " + vol); - } - - //Find out if the volume is present on secondary storage - VolumeHostVO volumeHost = _volumeHostDao.findByVolumeId(vol.getId()); - if(volumeHost != null){ - if (volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED){ - HostVO ssHost = _hostDao.findById(volumeHost.getHostId()); - DeleteVolumeCommand dtCommand = new DeleteVolumeCommand(ssHost.getStorageUrl(), volumeHost.getInstallPath()); - Answer answer = _agentMgr.sendToSecStorage(ssHost, dtCommand); - if (answer == null || !answer.getResult()) { - s_logger.debug("Failed to delete " + volumeHost + " due to " + ((answer == null) ? "answer is null" : answer.getDetails())); - return; - } - }else if(volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS){ - s_logger.debug("Volume: " + vol.getName() + " is currently being uploaded; cant' delete it."); - throw new CloudRuntimeException("Please specify a volume that is not currently being uploaded."); - } - _volumeHostDao.remove(volumeHost.getId()); - _volumeDao.remove(vol.getId()); - return; - } - - String vmName = null; - if (vol.getVolumeType() == Type.ROOT && vol.getInstanceId() != null) { - VirtualMachine vm = _vmInstanceDao.findByIdIncludingRemoved(vol.getInstanceId()); - if (vm != null) { - vmName = vm.getInstanceName(); - } - } - - String volumePath = vol.getPath(); - Long poolId = vol.getPoolId(); - if (poolId == null || volumePath == null || volumePath.trim().isEmpty()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Marking volume that was never created as destroyed: " + vol); - } - _volsDao.remove(vol.getId()); - return; - } - - StoragePoolVO pool = _storagePoolDao.findById(poolId); - if (pool == null) { - s_logger.debug("Removing volume as storage pool is gone: " + poolId); - _volsDao.remove(vol.getId()); - return; - } - - DestroyCommand cmd = new DestroyCommand(pool, vol, vmName); - boolean removeVolume = false; - try { - Answer answer = sendToPool(pool, cmd); - if (answer != null && answer.getResult()) { - removeVolume = true; - } else { - s_logger.info("Will retry delete of " + vol + " from " + poolId); - } - } catch (StorageUnavailableException e) { - if (force) { - s_logger.info("Storage is unavailable currently, but marking volume id=" + vol.getId() + " as expunged anyway due to force=true"); - removeVolume = true; - } else { - s_logger.info("Storage is unavailable currently. Will retry delete of " + vol + " from " + poolId); - } - } catch (RuntimeException ex) { - if (force) { - s_logger.info("Failed to expunge volume, but marking volume id=" + vol.getId() + " as expunged anyway " + - "due to force=true. Volume failed to expunge due to ", ex); - removeVolume = true; - } else { - throw ex; - } - } finally { - if (removeVolume) { - _volsDao.remove(vol.getId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Volume successfully expunged from " + poolId); - } - } - } - - } - - @Override - @DB - public void cleanupVolumes(long vmId) throws ConcurrentOperationException { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Cleaning storage for vm: " + vmId); - } - List volumesForVm = _volsDao.findByInstance(vmId); - List toBeExpunged = new ArrayList(); - Transaction txn = Transaction.currentTxn(); - txn.start(); - for (VolumeVO vol : volumesForVm) { - if (vol.getVolumeType().equals(Type.ROOT)) { - // This check is for VM in Error state (volume is already destroyed) - if (!vol.getState().equals(Volume.State.Destroy)) { - destroyVolume(vol); - } - toBeExpunged.add(vol); - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detaching " + vol); - } - _volsDao.detachVolume(vol.getId()); - } - } - txn.commit(); - - for (VolumeVO expunge : toBeExpunged) { - expungeVolume(expunge, false); - } - } - + + + protected class StorageGarbageCollector implements Runnable { public StorageGarbageCollector() { @@ -3747,25 +1499,35 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } @Override - public void onManagementNodeJoined(List nodeList, long selfNodeId) { + public void onManagementNodeJoined(List nodeList, + long selfNodeId) { // TODO Auto-generated method stub } @Override - public void onManagementNodeLeft(List nodeList, long selfNodeId) { + public void onManagementNodeLeft(List nodeList, + long selfNodeId) { for (ManagementServerHostVO vo : nodeList) { if (vo.getMsid() == _serverId) { - s_logger.info("Cleaning up storage maintenance jobs associated with Management server" + vo.getMsid()); - List poolIds = _storagePoolWorkDao.searchForPoolIdsForPendingWorkJobs(vo.getMsid()); + s_logger.info("Cleaning up storage maintenance jobs associated with Management server" + + vo.getMsid()); + List poolIds = _storagePoolWorkDao + .searchForPoolIdsForPendingWorkJobs(vo.getMsid()); if (poolIds.size() > 0) { for (Long poolId : poolIds) { - StoragePoolVO pool = _storagePoolDao.findById(poolId); + StoragePoolVO pool = _storagePoolDao + .findById(poolId); // check if pool is in an inconsistent state if (pool != null - && (pool.getStatus().equals(StoragePoolStatus.ErrorInMaintenance) || pool.getStatus().equals(StoragePoolStatus.PrepareForMaintenance) || pool.getStatus().equals( - StoragePoolStatus.CancelMaintenance))) { - _storagePoolWorkDao.removePendingJobsOnMsRestart(vo.getMsid(), poolId); + && (pool.getStatus().equals( + DataStoreStatus.ErrorInMaintenance) + || pool.getStatus() + .equals(DataStoreStatus.PrepareForMaintenance) || pool + .getStatus() + .equals(DataStoreStatus.CancelMaintenance))) { + _storagePoolWorkDao.removePendingJobsOnMsRestart( + vo.getMsid(), poolId); pool.setStatus(StoragePoolStatus.ErrorInMaintenance); _storagePoolDao.update(poolId, pool); } @@ -3794,22 +1556,28 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C hosts = _ssvmMgr.listSecondaryStorageHostsInOneZone(zoneId); } - CapacityVO capacity = new CapacityVO(hostId, zoneId, null, null, 0, 0, CapacityVO.CAPACITY_TYPE_SECONDARY_STORAGE); + CapacityVO capacity = new CapacityVO(hostId, zoneId, null, null, 0, 0, + CapacityVO.CAPACITY_TYPE_SECONDARY_STORAGE); for (HostVO host : hosts) { - StorageStats stats = ApiDBUtils.getSecondaryStorageStatistics(host.getId()); + StorageStats stats = ApiDBUtils.getSecondaryStorageStatistics(host + .getId()); if (stats == null) { continue; } - capacity.setUsedCapacity(stats.getByteUsed() + capacity.getUsedCapacity()); - capacity.setTotalCapacity(stats.getCapacityBytes() + capacity.getTotalCapacity()); + capacity.setUsedCapacity(stats.getByteUsed() + + capacity.getUsedCapacity()); + capacity.setTotalCapacity(stats.getCapacityBytes() + + capacity.getTotalCapacity()); } return capacity; } @Override - public CapacityVO getStoragePoolUsedStats(Long poolId, Long clusterId, Long podId, Long zoneId) { - SearchCriteria sc = _storagePoolDao.createSearchCriteria(); + public CapacityVO getStoragePoolUsedStats(Long poolId, Long clusterId, + Long podId, Long zoneId) { + SearchCriteria sc = _storagePoolDao + .createSearchCriteria(); List pools = new ArrayList(); if (zoneId != null) { @@ -3833,59 +1601,29 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C pools = _storagePoolDao.search(sc, null); } - CapacityVO capacity = new CapacityVO(poolId, zoneId, podId, clusterId, 0, 0, CapacityVO.CAPACITY_TYPE_STORAGE); - for (StoragePoolVO storagePool : pools) { - StorageStats stats = ApiDBUtils.getStoragePoolStatistics(storagePool.getId()); + CapacityVO capacity = new CapacityVO(poolId, zoneId, podId, clusterId, + 0, 0, CapacityVO.CAPACITY_TYPE_STORAGE); + for (StoragePoolVO PrimaryDataStoreVO : pools) { + StorageStats stats = ApiDBUtils + .getStoragePoolStatistics(PrimaryDataStoreVO.getId()); if (stats == null) { continue; } - capacity.setUsedCapacity(stats.getByteUsed() + capacity.getUsedCapacity()); - capacity.setTotalCapacity(stats.getCapacityBytes() + capacity.getTotalCapacity()); + capacity.setUsedCapacity(stats.getByteUsed() + + capacity.getUsedCapacity()); + capacity.setTotalCapacity(stats.getCapacityBytes() + + capacity.getTotalCapacity()); } return capacity; } @Override - public StoragePool getStoragePool(long id) { - return _storagePoolDao.findById(id); + public PrimaryDataStoreInfo getStoragePool(long id) { + return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore(id, + DataStoreRole.Primary); } - @Override - public VMTemplateHostVO findVmTemplateHost(long templateId, StoragePool pool) { - long dcId = pool.getDataCenterId(); - Long podId = pool.getPodId(); - - List secHosts = _ssvmMgr.listSecondaryStorageHostsInOneZone(dcId); - - // FIXME, for cloudzone, the local secondary storoge - if (pool.isLocal() && pool.getPoolType() == StoragePoolType.Filesystem && secHosts.isEmpty()) { - List sphs = _storagePoolHostDao.listByPoolId(pool.getId()); - if (!sphs.isEmpty()) { - StoragePoolHostVO localStoragePoolHost = sphs.get(0); - return _templateHostDao.findLocalSecondaryStorageByHostTemplate(localStoragePoolHost.getHostId(), templateId); - } else { - return null; - } - } - - if (secHosts.size() == 1) { - VMTemplateHostVO templateHostVO = _templateHostDao.findByHostTemplate(secHosts.get(0).getId(), templateId); - return templateHostVO; - } - if (podId != null) { - List templHosts = _templateHostDao.listByTemplateStatus(templateId, dcId, podId, VMTemplateStorageResourceAssoc.Status.DOWNLOADED); - if (templHosts != null && !templHosts.isEmpty()) { - Collections.shuffle(templHosts); - return templHosts.get(0); - } - } - List templHosts = _templateHostDao.listByTemplateStatus(templateId, dcId, VMTemplateStorageResourceAssoc.Status.DOWNLOADED); - if (templHosts != null && !templHosts.isEmpty()) { - Collections.shuffle(templHosts); - return templHosts.get(0); - } - return null; - } + @Override @DB @@ -3900,9 +1638,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @DB public StoragePoolVO findLocalStorageOnHost(long hostId) { SearchCriteria sc = LocalStorageSearch.create(); - sc.setParameters("type", new Object[] { StoragePoolType.Filesystem, StoragePoolType.LVM }); + sc.setParameters("type", new Object[] { StoragePoolType.Filesystem, + StoragePoolType.LVM }); sc.setJoinParameters("poolHost", "hostId", hostId); - List storagePools = _storagePoolDao.search(sc, null); + List storagePools = _storagePoolDao + .search(sc, null); if (!storagePools.isEmpty()) { return storagePools.get(0); } else { @@ -3914,25 +1654,33 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C public Host updateSecondaryStorage(long secStorageId, String newUrl) { HostVO secHost = _hostDao.findById(secStorageId); if (secHost == null) { - throw new InvalidParameterValueException("Can not find out the secondary storage id: " + secStorageId); + throw new InvalidParameterValueException( + "Can not find out the secondary storage id: " + + secStorageId); } if (secHost.getType() != Host.Type.SecondaryStorage) { - throw new InvalidParameterValueException("host: " + secStorageId + " is not a secondary storage"); + throw new InvalidParameterValueException("host: " + secStorageId + + " is not a secondary storage"); } URI uri = null; try { uri = new URI(UriUtils.encodeURIComponent(newUrl)); if (uri.getScheme() == null) { - throw new InvalidParameterValueException("uri.scheme is null " + newUrl + ", add nfs:// as a prefix"); + throw new InvalidParameterValueException("uri.scheme is null " + + newUrl + ", add nfs:// as a prefix"); } else if (uri.getScheme().equalsIgnoreCase("nfs")) { - if (uri.getHost() == null || uri.getHost().equalsIgnoreCase("") || uri.getPath() == null || uri.getPath().equalsIgnoreCase("")) { - throw new InvalidParameterValueException("Your host and/or path is wrong. Make sure it's of the format nfs://hostname/path"); + if (uri.getHost() == null || uri.getHost().equalsIgnoreCase("") + || uri.getPath() == null + || uri.getPath().equalsIgnoreCase("")) { + throw new InvalidParameterValueException( + "Your host and/or path is wrong. Make sure it's of the format nfs://hostname/path"); } } } catch (URISyntaxException e) { - throw new InvalidParameterValueException(newUrl + " is not a valid uri"); + throw new InvalidParameterValueException(newUrl + + " is not a valid uri"); } String oldUrl = secHost.getStorageUrl(); @@ -3941,7 +1689,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C try { oldUri = new URI(UriUtils.encodeURIComponent(oldUrl)); if (!oldUri.getScheme().equalsIgnoreCase(uri.getScheme())) { - throw new InvalidParameterValueException("can not change old scheme:" + oldUri.getScheme() + " to " + uri.getScheme()); + throw new InvalidParameterValueException( + "can not change old scheme:" + oldUri.getScheme() + + " to " + uri.getScheme()); } } catch (URISyntaxException e) { s_logger.debug("Failed to get uri from " + oldUrl); @@ -3954,29 +1704,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return secHost; } - - - @Override - public String getSupportedImageFormatForCluster(Long clusterId) { - ClusterVO cluster = ApiDBUtils.findClusterById(clusterId); - - if (cluster.getHypervisorType() == HypervisorType.XenServer) { - return "vhd"; - } else if (cluster.getHypervisorType() == HypervisorType.KVM) { - return "qcow2"; - } else if (cluster.getHypervisorType() == HypervisorType.VMware) { - return "ova"; - } else if (cluster.getHypervisorType() == HypervisorType.Ovm) { - return "raw"; - } else { - return null; - } - } + @Override public HypervisorType getHypervisorTypeFromFormat(ImageFormat format) { - if(format == null) { + if (format == null) { return HypervisorType.None; } @@ -3993,22 +1726,32 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } - private boolean checkUsagedSpace(StoragePool pool){ + private boolean checkUsagedSpace(StoragePool pool) { StatsCollector sc = StatsCollector.getInstance(); if (sc != null) { long totalSize = pool.getCapacityBytes(); StorageStats stats = sc.getStoragePoolStats(pool.getId()); - if(stats == null){ + if (stats == null) { stats = sc.getStorageStats(pool.getId()); } if (stats != null) { - double usedPercentage = ((double)stats.getByteUsed() / (double)totalSize); + double usedPercentage = ((double) stats.getByteUsed() / (double) totalSize); if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking pool " + pool.getId() + " for storage, totalSize: " + pool.getCapacityBytes() + ", usedBytes: " + stats.getByteUsed() + ", usedPct: " + usedPercentage + ", disable threshold: " + _storageUsedThreshold); + s_logger.debug("Checking pool " + pool.getId() + + " for storage, totalSize: " + + pool.getCapacityBytes() + ", usedBytes: " + + stats.getByteUsed() + ", usedPct: " + + usedPercentage + ", disable threshold: " + + _storageUsedThreshold); } if (usedPercentage >= _storageUsedThreshold) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Insufficient space on pool: " + pool.getId() + " since its usage percentage: " +usedPercentage + " has crossed the pool.storage.capacity.disablethreshold: " + _storageUsedThreshold); + s_logger.debug("Insufficient space on pool: " + + pool.getId() + + " since its usage percentage: " + + usedPercentage + + " has crossed the pool.storage.capacity.disablethreshold: " + + _storageUsedThreshold); } return false; } @@ -4019,54 +1762,113 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } @Override - public boolean storagePoolHasEnoughSpace(List volumes, StoragePool pool) { - if(volumes == null || volumes.isEmpty()) + public boolean storagePoolHasEnoughSpace(List volumes, + StoragePool pool) { + if (volumes == null || volumes.isEmpty()) return false; - if(!checkUsagedSpace(pool)) + if (!checkUsagedSpace(pool)) return false; // allocated space includes template of specified volume StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId()); - long allocatedSizeWithtemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, null); + long allocatedSizeWithtemplate = _capacityMgr.getAllocatedPoolCapacity( + poolVO, null); long totalAskingSize = 0; for (Volume volume : volumes) { - if(volume.getTemplateId()!=null){ - VMTemplateVO tmpl = _templateDao.findById(volume.getTemplateId()); - if (tmpl.getFormat() != ImageFormat.ISO){ - allocatedSizeWithtemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, tmpl); + if (volume.getTemplateId() != null) { + VMTemplateVO tmpl = _templateDao.findById(volume + .getTemplateId()); + if (tmpl.getFormat() != ImageFormat.ISO) { + allocatedSizeWithtemplate = _capacityMgr + .getAllocatedPoolCapacity(poolVO, tmpl); } } - if(volume.getState() != Volume.State.Ready) + if (volume.getState() != Volume.State.Ready) totalAskingSize = totalAskingSize + volume.getSize(); } long totalOverProvCapacity; if (pool.getPoolType() == StoragePoolType.NetworkFilesystem) { - totalOverProvCapacity = _storageOverprovisioningFactor.multiply(new BigDecimal(pool.getCapacityBytes())).longValue();// All this for the inaccuracy of floats for big number multiplication. - }else { + totalOverProvCapacity = _storageOverprovisioningFactor.multiply( + new BigDecimal(pool.getCapacityBytes())).longValue(); + } else { totalOverProvCapacity = pool.getCapacityBytes(); } if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking pool: " + pool.getId() + " for volume allocation " + volumes.toString() + ", maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithtemplate + ", askingSize : " + totalAskingSize + ", allocated disable threshold: " + _storageAllocatedThreshold); + s_logger.debug("Checking pool: " + pool.getId() + + " for volume allocation " + volumes.toString() + + ", maxSize : " + totalOverProvCapacity + + ", totalAllocatedSize : " + allocatedSizeWithtemplate + + ", askingSize : " + totalAskingSize + + ", allocated disable threshold: " + + _storageAllocatedThreshold); } - double usedPercentage = (allocatedSizeWithtemplate + totalAskingSize) / (double)(totalOverProvCapacity); - if (usedPercentage > _storageAllocatedThreshold){ + double usedPercentage = (allocatedSizeWithtemplate + totalAskingSize) + / (double) (totalOverProvCapacity); + if (usedPercentage > _storageAllocatedThreshold) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + " since its allocated percentage: " +usedPercentage + " has crossed the allocated pool.storage.allocated.capacity.disablethreshold: " + _storageAllocatedThreshold + ", skipping this pool"); + s_logger.debug("Insufficient un-allocated capacity on: " + + pool.getId() + + " for volume allocation: " + + volumes.toString() + + " since its allocated percentage: " + + usedPercentage + + " has crossed the allocated pool.storage.allocated.capacity.disablethreshold: " + + _storageAllocatedThreshold + ", skipping this pool"); } return false; } if (totalOverProvCapacity < (allocatedSizeWithtemplate + totalAskingSize)) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + ", not enough storage, maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithtemplate + ", askingSize : " + totalAskingSize); + s_logger.debug("Insufficient un-allocated capacity on: " + + pool.getId() + " for volume allocation: " + + volumes.toString() + + ", not enough storage, maxSize : " + + totalOverProvCapacity + ", totalAllocatedSize : " + + allocatedSizeWithtemplate + ", askingSize : " + + totalAskingSize); } return false; } return true; } + @Override + public void createCapacityEntry(long poolId) { + StoragePoolVO storage = _storagePoolDao.findById(poolId); + createCapacityEntry(storage, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, 0); + } + + + @Override + public synchronized boolean registerHostListener(String providerUuid, + HypervisorHostListener listener) { + hostListeners.put(providerUuid, listener); + return true; + } + + @Override + public Answer sendToPool(long poolId, Command cmd) + throws StorageUnavailableException { + // TODO Auto-generated method stub + return null; + } + + @Override + public Answer[] sendToPool(long poolId, Commands cmd) + throws StorageUnavailableException { + // TODO Auto-generated method stub + return null; + } + + @Override + public String getName() { + // TODO Auto-generated method stub + return null; + } + } diff --git a/server/src/com/cloud/storage/TemplateProfile.java b/server/src/com/cloud/storage/TemplateProfile.java index 1d8b6bfc1a3..0b55f1fbea2 100755 --- a/server/src/com/cloud/storage/TemplateProfile.java +++ b/server/src/com/cloud/storage/TemplateProfile.java @@ -20,7 +20,6 @@ import java.util.Map; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage.ImageFormat; -import com.cloud.storage.VMTemplateVO; public class TemplateProfile { Long userId; @@ -46,6 +45,7 @@ public class TemplateProfile { Long templateId; VMTemplateVO template; String templateTag; + Long imageStoreId; Map details; public TemplateProfile(Long templateId, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, @@ -83,10 +83,12 @@ public class TemplateProfile { public TemplateProfile(Long templateId, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, String url, Boolean isPublic, Boolean featured, Boolean isExtractable, ImageFormat format, Long guestOsId, Long zoneId, - HypervisorType hypervisorType, String accountName, Long domainId, Long accountId, String chksum, Boolean bootable, String templateTag, Map details, Boolean sshKeyEnabled) { + HypervisorType hypervisorType, String accountName, Long domainId, Long accountId, String chksum, Boolean bootable, String templateTag, Map details, Boolean sshKeyEnabled, + Long imageStoreId) { this(templateId, userId, name, displayText, bits, passwordEnabled, requiresHvm, url, isPublic, featured, isExtractable, format, guestOsId, zoneId, hypervisorType, accountName, domainId, accountId, chksum, bootable, details, sshKeyEnabled); this.templateTag = templateTag; + this.imageStoreId = imageStoreId; } public Long getTemplateId() { @@ -252,4 +254,8 @@ public class TemplateProfile { public Boolean getSshKeyEnabled() { return this.sshKeyEnbaled; } + + public Long getImageStoreId() { + return this.imageStoreId; + } } diff --git a/server/src/com/cloud/storage/VolumeManager.java b/server/src/com/cloud/storage/VolumeManager.java new file mode 100644 index 00000000000..af3cbbfbae5 --- /dev/null +++ b/server/src/com/cloud/storage/VolumeManager.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.storage; + +import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; + +import com.cloud.deploy.DeployDestination; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientStorageCapacityException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.Volume.Type; +import com.cloud.user.Account; +import com.cloud.vm.DiskProfile; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; + +public interface VolumeManager extends VolumeApiService { + + VolumeInfo moveVolume(VolumeInfo volume, long destPoolDcId, Long destPoolPodId, + Long destPoolClusterId, HypervisorType dataDiskHyperType) + throws ConcurrentOperationException; + + VolumeVO uploadVolume(UploadVolumeCmd cmd) + throws ResourceAllocationException; + + VolumeVO allocateDuplicateVolume(VolumeVO oldVol, Long templateId); + + boolean volumeOnSharedStoragePool(VolumeVO volume); + + boolean volumeInactive(Volume volume); + + String getVmNameOnVolume(Volume volume); + + VolumeVO allocVolume(CreateVolumeCmd cmd) + throws ResourceAllocationException; + + VolumeVO createVolume(CreateVolumeCmd cmd); + + VolumeVO resizeVolume(ResizeVolumeCmd cmd); + + boolean deleteVolume(long volumeId, Account caller) + throws ConcurrentOperationException; + + void destroyVolume(VolumeVO volume); + + DiskProfile allocateRawVolume(Type type, String name, DiskOfferingVO offering, Long size, VMInstanceVO vm, Account owner); + Volume attachVolumeToVM(AttachVolumeCmd command); + + Volume detachVolumeFromVM(DetachVolumeCmd cmmd); + + void release(VirtualMachineProfile profile); + + void cleanupVolumes(long vmId) throws ConcurrentOperationException; + + Volume migrateVolume(MigrateVolumeCmd cmd); + + boolean storageMigration( + VirtualMachineProfile vm, + StoragePool destPool); + + void prepareForMigration( + VirtualMachineProfile vm, + DeployDestination dest); + + void prepare(VirtualMachineProfile vm, + DeployDestination dest) throws StorageUnavailableException, + InsufficientStorageCapacityException, ConcurrentOperationException; + + boolean canVmRestartOnAnotherServer(long vmId); + + DiskProfile allocateTemplatedVolume(Type type, String name, + DiskOfferingVO offering, VMTemplateVO template, VMInstanceVO vm, + Account owner); +} diff --git a/server/src/com/cloud/storage/VolumeManagerImpl.java b/server/src/com/cloud/storage/VolumeManagerImpl.java new file mode 100644 index 00000000000..4951975786f --- /dev/null +++ b/server/src/com/cloud/storage/VolumeManagerImpl.java @@ -0,0 +1,2460 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.storage; + +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.ExecutionException; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.AttachVolumeAnswer; +import com.cloud.agent.api.AttachVolumeCommand; +import com.cloud.agent.api.to.VolumeTO; +import com.cloud.alert.AlertManager; +import com.cloud.api.ApiDBUtils; +import com.cloud.async.AsyncJobExecutor; +import com.cloud.async.AsyncJobManager; +import com.cloud.async.AsyncJobVO; +import com.cloud.async.BaseAsyncJobExecutor; +import com.cloud.capacity.CapacityManager; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.configuration.Config; +import com.cloud.configuration.ConfigurationManager; +import com.cloud.configuration.Resource.ResourceType; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.consoleproxy.ConsoleProxyManager; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.HostPodVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.deploy.DeployDestination; +import com.cloud.domain.Domain; +import com.cloud.domain.dao.DomainDao; +import com.cloud.event.ActionEvent; +import com.cloud.event.EventTypes; +import com.cloud.event.UsageEventVO; +import com.cloud.event.dao.EventDao; +import com.cloud.event.dao.UsageEventDao; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientStorageCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.PermissionDeniedException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.HypervisorGuruManager; +import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; +import com.cloud.network.NetworkModel; +import com.cloud.org.Grouping; +import com.cloud.resource.ResourceManager; +import com.cloud.server.ManagementServer; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.Volume.Event; +import com.cloud.storage.Volume.Type; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.SnapshotPolicyDao; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.StoragePoolWorkDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateHostDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VMTemplateS3Dao; +import com.cloud.storage.dao.VMTemplateSwiftDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeHostDao; +import com.cloud.storage.download.DownloadMonitor; +import com.cloud.storage.s3.S3Manager; +import com.cloud.storage.secondary.SecondaryStorageVmManager; +import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.storage.snapshot.SnapshotScheduler; +import com.cloud.tags.dao.ResourceTagDao; +import com.cloud.template.TemplateManager; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.ResourceLimitService; +import com.cloud.user.UserContext; +import com.cloud.user.dao.AccountDao; +import com.cloud.user.dao.UserDao; +import com.cloud.uservm.UserVm; +import com.cloud.utils.EnumUtils; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.JoinBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.utils.fsm.StateMachine2; +import com.cloud.vm.DiskProfile; +import com.cloud.vm.UserVmManager; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachine.State; +import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.dao.ConsoleProxyDao; +import com.cloud.vm.dao.DomainRouterDao; +import com.cloud.vm.dao.SecondaryStorageVmDao; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; + +@Component +public class VolumeManagerImpl extends ManagerBase implements VolumeManager { + private static final Logger s_logger = Logger + .getLogger(VolumeManagerImpl.class); + @Inject + protected UserVmManager _userVmMgr; + @Inject + protected AgentManager _agentMgr; + @Inject + protected TemplateManager _tmpltMgr; + @Inject + protected AsyncJobManager _asyncMgr; + @Inject + protected SnapshotManager _snapshotMgr; + @Inject + protected SnapshotScheduler _snapshotScheduler; + @Inject + protected AccountManager _accountMgr; + @Inject + protected ConfigurationManager _configMgr; + @Inject + protected ConsoleProxyManager _consoleProxyMgr; + @Inject + protected SecondaryStorageVmManager _secStorageMgr; + @Inject + protected NetworkModel _networkMgr; + @Inject + protected ServiceOfferingDao _serviceOfferingDao; + @Inject + protected VolumeDao _volsDao; + @Inject + protected HostDao _hostDao; + @Inject + protected ConsoleProxyDao _consoleProxyDao; + @Inject + protected SnapshotDao _snapshotDao; + @Inject + protected SnapshotManager _snapMgr; + @Inject + protected SnapshotPolicyDao _snapshotPolicyDao; + @Inject + protected StoragePoolHostDao _storagePoolHostDao; + @Inject + protected AlertManager _alertMgr; + @Inject + protected VMTemplateHostDao _vmTemplateHostDao = null; + @Inject + protected VMTemplatePoolDao _vmTemplatePoolDao = null; + @Inject + protected VMTemplateSwiftDao _vmTemplateSwiftDao = null; + @Inject + protected VMTemplateS3Dao _vmTemplateS3Dao; + @Inject + protected S3Manager _s3Mgr; + @Inject + protected VMTemplateDao _vmTemplateDao = null; + @Inject + protected StoragePoolHostDao _poolHostDao = null; + @Inject + protected UserVmDao _userVmDao; + @Inject + VolumeHostDao _volumeHostDao; + @Inject + protected VMInstanceDao _vmInstanceDao; + @Inject + protected PrimaryDataStoreDao _storagePoolDao = null; + @Inject + protected CapacityDao _capacityDao; + @Inject + protected CapacityManager _capacityMgr; + @Inject + protected DiskOfferingDao _diskOfferingDao; + @Inject + protected AccountDao _accountDao; + @Inject + protected EventDao _eventDao = null; + @Inject + protected DataCenterDao _dcDao = null; + @Inject + protected HostPodDao _podDao = null; + @Inject + protected VMTemplateDao _templateDao; + @Inject + protected VMTemplateHostDao _templateHostDao; + @Inject + protected ServiceOfferingDao _offeringDao; + @Inject + protected DomainDao _domainDao; + @Inject + protected UserDao _userDao; + @Inject + protected ClusterDao _clusterDao; + @Inject + protected UsageEventDao _usageEventDao; + @Inject + protected VirtualMachineManager _vmMgr; + @Inject + protected DomainRouterDao _domrDao; + @Inject + protected SecondaryStorageVmDao _secStrgDao; + @Inject + protected StoragePoolWorkDao _storagePoolWorkDao; + @Inject + protected HypervisorGuruManager _hvGuruMgr; + @Inject + protected VolumeDao _volumeDao; + @Inject + protected OCFS2Manager _ocfs2Mgr; + @Inject + protected ResourceLimitService _resourceLimitMgr; + @Inject + protected SecondaryStorageVmManager _ssvmMgr; + @Inject + protected ResourceManager _resourceMgr; + @Inject + protected DownloadMonitor _downloadMonitor; + @Inject + protected ResourceTagDao _resourceTagDao; + @Inject + protected List _storagePoolAllocators; + @Inject + ConfigurationDao _configDao; + @Inject + ManagementServer _msServer; + @Inject + DataStoreManager dataStoreMgr; + @Inject + DataStoreProviderManager dataStoreProviderMgr; + @Inject + VolumeService volService; + @Inject + VolumeDataFactory volFactory; + @Inject + ImageDataFactory tmplFactory; + @Inject + SnapshotDataFactory snapshotFactory; + private int _copyvolumewait; + @Inject + protected HypervisorCapabilitiesDao _hypervisorCapabilitiesDao; + private final StateMachine2 _volStateMachine; + @Inject + StorageManager storageMgr; + private int _customDiskOfferingMinSize = 1; + private int _customDiskOfferingMaxSize = 1024; + private long _maxVolumeSizeInGb; + private boolean _recreateSystemVmEnabled; + protected SearchBuilder HostTemplateStatesSearch; + + public VolumeManagerImpl() { + _volStateMachine = Volume.State.getStateMachine(); + } + + @Override + public VolumeInfo moveVolume(VolumeInfo volume, long destPoolDcId, + Long destPoolPodId, Long destPoolClusterId, + HypervisorType dataDiskHyperType) + throws ConcurrentOperationException { + + // Find a destination storage pool with the specified criteria + DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume + .getDiskOfferingId()); + DiskProfile dskCh = new DiskProfile(volume.getId(), + volume.getVolumeType(), volume.getName(), diskOffering.getId(), + diskOffering.getDiskSize(), diskOffering.getTagsArray(), + diskOffering.getUseLocalStorage(), + diskOffering.isRecreatable(), null); + dskCh.setHyperType(dataDiskHyperType); + DataCenterVO destPoolDataCenter = _dcDao.findById(destPoolDcId); + HostPodVO destPoolPod = _podDao.findById(destPoolPodId); + + StoragePool destPool = storageMgr.findStoragePool(dskCh, + destPoolDataCenter, destPoolPod, destPoolClusterId, null, null, + new HashSet()); + + if (destPool == null) { + throw new CloudRuntimeException( + "Failed to find a storage pool with enough capacity to move the volume to."); + } + + Volume newVol = migrateVolume(volume, destPool); + return this.volFactory.getVolume(newVol.getId()); + } + + /* + * Upload the volume to secondary storage. + */ + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_UPLOAD, eventDescription = "uploading volume", async = true) + public VolumeVO uploadVolume(UploadVolumeCmd cmd) + throws ResourceAllocationException { + Account caller = UserContext.current().getCaller(); + long ownerId = cmd.getEntityOwnerId(); + Long zoneId = cmd.getZoneId(); + String volumeName = cmd.getVolumeName(); + String url = cmd.getUrl(); + String format = cmd.getFormat(); + String imageStoreUuid = cmd.getImageStoreUuid(); + DataStore store = this._tmpltMgr.getImageStore(imageStoreUuid, zoneId); + + validateVolume(caller, ownerId, zoneId, volumeName, url, format); + + VolumeVO volume = persistVolume(caller, ownerId, zoneId, volumeName, + url, cmd.getFormat()); + + VolumeInfo vol = this.volFactory.getVolume(volume.getId()); + + RegisterVolumePayload payload = new RegisterVolumePayload(cmd.getUrl(), cmd.getChecksum(), + cmd.getFormat()); + vol.addPayload(payload); + + this.volService.registerVolume(vol, store); + return volume; + } + + private boolean validateVolume(Account caller, long ownerId, Long zoneId, + String volumeName, String url, String format) + throws ResourceAllocationException { + + // permission check + _accountMgr.checkAccess(caller, null, true, + _accountMgr.getActiveAccountById(ownerId)); + + // Check that the resource limit for volumes won't be exceeded + _resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(ownerId), + ResourceType.volume); + + // Verify that zone exists + DataCenterVO zone = _dcDao.findById(zoneId); + if (zone == null) { + throw new InvalidParameterValueException( + "Unable to find zone by id " + zoneId); + } + + // Check if zone is disabled + if (Grouping.AllocationState.Disabled == zone.getAllocationState() + && !_accountMgr.isRootAdmin(caller.getType())) { + throw new PermissionDeniedException( + "Cannot perform this operation, Zone is currently disabled: " + + zoneId); + } + + if (url.toLowerCase().contains("file://")) { + throw new InvalidParameterValueException( + "File:// type urls are currently unsupported"); + } + + ImageFormat imgfmt = ImageFormat.valueOf(format.toUpperCase()); + if (imgfmt == null) { + throw new IllegalArgumentException("Image format is incorrect " + + format + ". Supported formats are " + + EnumUtils.listValues(ImageFormat.values())); + } + + String userSpecifiedName = volumeName; + if (userSpecifiedName == null) { + userSpecifiedName = getRandomVolumeName(); + } + if ((!url.toLowerCase().endsWith("vhd")) + && (!url.toLowerCase().endsWith("vhd.zip")) + && (!url.toLowerCase().endsWith("vhd.bz2")) + && (!url.toLowerCase().endsWith("vhd.gz")) + && (!url.toLowerCase().endsWith("qcow2")) + && (!url.toLowerCase().endsWith("qcow2.zip")) + && (!url.toLowerCase().endsWith("qcow2.bz2")) + && (!url.toLowerCase().endsWith("qcow2.gz")) + && (!url.toLowerCase().endsWith("ova")) + && (!url.toLowerCase().endsWith("ova.zip")) + && (!url.toLowerCase().endsWith("ova.bz2")) + && (!url.toLowerCase().endsWith("ova.gz")) + && (!url.toLowerCase().endsWith("img")) + && (!url.toLowerCase().endsWith("raw"))) { + throw new InvalidParameterValueException("Please specify a valid " + + format.toLowerCase()); + } + + if ((format.equalsIgnoreCase("vhd") && (!url.toLowerCase().endsWith( + ".vhd") + && !url.toLowerCase().endsWith("vhd.zip") + && !url.toLowerCase().endsWith("vhd.bz2") && !url.toLowerCase() + .endsWith("vhd.gz"))) + || (format.equalsIgnoreCase("qcow2") && (!url.toLowerCase() + .endsWith(".qcow2") + && !url.toLowerCase().endsWith("qcow2.zip") + && !url.toLowerCase().endsWith("qcow2.bz2") && !url + .toLowerCase().endsWith("qcow2.gz"))) + || (format.equalsIgnoreCase("ova") && (!url.toLowerCase() + .endsWith(".ova") + && !url.toLowerCase().endsWith("ova.zip") + && !url.toLowerCase().endsWith("ova.bz2") && !url + .toLowerCase().endsWith("ova.gz"))) + || (format.equalsIgnoreCase("raw") && (!url.toLowerCase() + .endsWith(".img") && !url.toLowerCase().endsWith("raw")))) { + throw new InvalidParameterValueException( + "Please specify a valid URL. URL:" + url + + " is an invalid for the format " + + format.toLowerCase()); + } + validateUrl(url); + + return false; + } + + @Override + public VolumeVO allocateDuplicateVolume(VolumeVO oldVol, Long templateId) { + VolumeVO newVol = new VolumeVO(oldVol.getVolumeType(), + oldVol.getName(), oldVol.getDataCenterId(), + oldVol.getDomainId(), oldVol.getAccountId(), + oldVol.getDiskOfferingId(), oldVol.getSize()); + if (templateId != null) { + newVol.setTemplateId(templateId); + } else { + newVol.setTemplateId(oldVol.getTemplateId()); + } + newVol.setDeviceId(oldVol.getDeviceId()); + newVol.setInstanceId(oldVol.getInstanceId()); + newVol.setRecreatable(oldVol.isRecreatable()); + return _volsDao.persist(newVol); + } + + @DB + protected VolumeInfo createVolumeFromSnapshot(VolumeVO volume, + SnapshotVO snapshot) { + Account account = _accountDao.findById(volume.getAccountId()); + + final HashSet poolsToAvoid = new HashSet(); + StoragePool pool = null; + + Set podsToAvoid = new HashSet(); + Pair pod = null; + + + DiskOfferingVO diskOffering = _diskOfferingDao + .findByIdIncludingRemoved(volume.getDiskOfferingId()); + DataCenterVO dc = _dcDao.findById(volume.getDataCenterId()); + DiskProfile dskCh = new DiskProfile(volume, diskOffering, + snapshot.getHypervisorType()); + + // Determine what pod to store the volume in + while ((pod = _resourceMgr.findPod(null, null, dc, account.getId(), + podsToAvoid)) != null) { + podsToAvoid.add(pod.first().getId()); + // Determine what storage pool to store the volume in + while ((pool = storageMgr.findStoragePool(dskCh, dc, pod.first(), null, null, + null, poolsToAvoid)) != null) { + break; + + } + } + + VolumeInfo vol = this.volFactory.getVolume(volume.getId()); + DataStore store = this.dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); + SnapshotInfo snapInfo = this.snapshotFactory.getSnapshot(snapshot.getId()); + AsyncCallFuture future = this.volService.createVolumeFromSnapshot(vol, store, snapInfo); + try { + VolumeApiResult result = future.get(); + if (result.isFailed()) { + s_logger.debug("Failed to create volume from snapshot:" + result.getResult()); + throw new CloudRuntimeException("Failed to create volume from snapshot:" + result.getResult()); + } + return result.getVolume(); + } catch (InterruptedException e) { + s_logger.debug("Failed to create volume from snapshot", e); + throw new CloudRuntimeException("Failed to create volume from snapshot", e); + } catch (ExecutionException e) { + s_logger.debug("Failed to create volume from snapshot", e); + throw new CloudRuntimeException("Failed to create volume from snapshot", e); + } + + } + + protected DiskProfile createDiskCharacteristics(VolumeInfo volume, + VMTemplateVO template, DataCenterVO dc, DiskOfferingVO diskOffering) { + if (volume.getVolumeType() == Type.ROOT + && Storage.ImageFormat.ISO != template.getFormat()) { + SearchCriteria sc = HostTemplateStatesSearch + .create(); + sc.setParameters("id", template.getId()); + sc.setParameters( + "state", + com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOADED); + sc.setJoinParameters("host", "dcId", dc.getId()); + + List sss = _vmTemplateHostDao.search(sc, null); + if (sss.size() == 0) { + throw new CloudRuntimeException("Template " + + template.getName() + + " has not been completely downloaded to zone " + + dc.getId()); + } + VMTemplateHostVO ss = sss.get(0); + + return new DiskProfile(volume.getId(), volume.getVolumeType(), + volume.getName(), diskOffering.getId(), ss.getSize(), + diskOffering.getTagsArray(), + diskOffering.getUseLocalStorage(), + diskOffering.isRecreatable(), + Storage.ImageFormat.ISO != template.getFormat() ? template + .getId() : null); + } else { + return new DiskProfile(volume.getId(), volume.getVolumeType(), + volume.getName(), diskOffering.getId(), + diskOffering.getDiskSize(), diskOffering.getTagsArray(), + diskOffering.getUseLocalStorage(), + diskOffering.isRecreatable(), null); + } + } + + protected VolumeVO createVolumeFromSnapshot(VolumeVO volume, long snapshotId) { + VolumeInfo createdVolume = null; + SnapshotVO snapshot = _snapshotDao.findById(snapshotId); + createdVolume = createVolumeFromSnapshot(volume, + snapshot); + + UsageEventVO usageEvent = new UsageEventVO( + EventTypes.EVENT_VOLUME_CREATE, + createdVolume.getAccountId(), + createdVolume.getDataCenterId(), createdVolume.getId(), + createdVolume.getName(), createdVolume.getDiskOfferingId(), + null, createdVolume.getSize()); + _usageEventDao.persist(usageEvent); + + return this._volsDao.findById(createdVolume.getId()); + } + + @DB + public VolumeInfo copyVolumeFromSecToPrimary(VolumeInfo volume, + VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, + HostPodVO pod, Long clusterId, ServiceOfferingVO offering, + DiskOfferingVO diskOffering, List avoids, + long size, HypervisorType hyperType) throws NoTransitionException { + + final HashSet avoidPools = new HashSet( + avoids); + DiskProfile dskCh = createDiskCharacteristics(volume, template, dc, + diskOffering); + dskCh.setHyperType(vm.getHypervisorType()); + // Find a suitable storage to create volume on + StoragePool destPool = storageMgr.findStoragePool(dskCh, dc, pod, + clusterId, null, vm, avoidPools); + DataStore destStore = this.dataStoreMgr.getDataStore(destPool.getId(), DataStoreRole.Primary); + AsyncCallFuture future = this.volService.copyVolume(volume, destStore); + + try { + VolumeApiResult result = future.get(); + if (result.isFailed()) { + s_logger.debug("copy volume failed: " + result.getResult()); + throw new CloudRuntimeException("copy volume failed: " + result.getResult()); + } + return result.getVolume(); + } catch (InterruptedException e) { + s_logger.debug("Failed to copy volume: " + volume.getId(), e); + throw new CloudRuntimeException("Failed to copy volume", e); + } catch (ExecutionException e) { + s_logger.debug("Failed to copy volume: " + volume.getId(), e); + throw new CloudRuntimeException("Failed to copy volume", e); + } + } + + @DB + public VolumeInfo createVolume(VolumeInfo volume, VMInstanceVO vm, + VMTemplateVO template, DataCenterVO dc, HostPodVO pod, + Long clusterId, ServiceOfferingVO offering, + DiskOfferingVO diskOffering, List avoids, + long size, HypervisorType hyperType) { + StoragePool pool = null; + + if (diskOffering != null && diskOffering.isCustomized()) { + diskOffering.setDiskSize(size); + } + + DiskProfile dskCh = null; + if (volume.getVolumeType() == Type.ROOT + && Storage.ImageFormat.ISO != template.getFormat()) { + dskCh = createDiskCharacteristics(volume, template, dc, offering); + } else { + dskCh = createDiskCharacteristics(volume, template, dc, + diskOffering); + } + + dskCh.setHyperType(hyperType); + + final HashSet avoidPools = new HashSet( + avoids); + + pool = storageMgr.findStoragePool(dskCh, dc, pod, clusterId, vm.getHostId(), + vm, avoidPools); + if (pool == null) { + s_logger.warn("Unable to find storage poll when create volume " + + volume.getName()); + throw new CloudRuntimeException("Unable to find storage poll when create volume" + volume.getName()); + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Trying to create " + volume + " on " + pool); + } + DataStore store = this.dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); + AsyncCallFuture future = null; + boolean isNotCreatedFromTemplate = volume.getTemplateId() == null ? true : false; + if (isNotCreatedFromTemplate) { + future = this.volService.createVolumeAsync(volume, store); + } else { + TemplateInfo templ = this.tmplFactory.getTemplate(template.getId()); + future = this.volService.createVolumeFromTemplateAsync(volume, store.getId(), templ); + } + try { + VolumeApiResult result = future.get(); + if (result.isFailed()) { + s_logger.debug("create volume failed: " + result.getResult()); + throw new CloudRuntimeException("create volume failed:" + result.getResult()); + } + UsageEventVO usageEvent = new UsageEventVO( + EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), + volume.getDataCenterId(), volume.getId(), volume.getName(), + volume.getDiskOfferingId(), null, volume.getSize()); + _usageEventDao.persist(usageEvent); + return result.getVolume(); + } catch (InterruptedException e) { + s_logger.error("create volume failed", e); + throw new CloudRuntimeException("create volume failed", e); + } catch (ExecutionException e) { + s_logger.error("create volume failed", e); + throw new CloudRuntimeException("create volume failed", e); + } + + } + + public String getRandomVolumeName() { + return UUID.randomUUID().toString(); + } + + private VolumeVO persistVolume(Account caller, long ownerId, Long zoneId, + String volumeName, String url, String format) { + + Transaction txn = Transaction.currentTxn(); + txn.start(); + + VolumeVO volume = new VolumeVO(volumeName, zoneId, -1, -1, -1, + new Long(-1), null, null, 0, Volume.Type.DATADISK); + volume.setPoolId(null); + volume.setDataCenterId(zoneId); + volume.setPodId(null); + volume.setAccountId(ownerId); + volume.setDomainId(((caller == null) ? Domain.ROOT_DOMAIN : caller + .getDomainId())); + long diskOfferingId = _diskOfferingDao.findByUniqueName( + "Cloud.com-Custom").getId(); + volume.setDiskOfferingId(diskOfferingId); + // volume.setSize(size); + volume.setInstanceId(null); + volume.setUpdated(new Date()); + volume.setDomainId((caller == null) ? Domain.ROOT_DOMAIN : caller + .getDomainId()); + + volume = _volsDao.persist(volume); + try { + stateTransitTo(volume, Event.UploadRequested); + } catch (NoTransitionException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + UserContext.current().setEventDetails("Volume Id: " + volume.getId()); + + // Increment resource count during allocation; if actual creation fails, + // decrement it + _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), + ResourceType.volume); + + txn.commit(); + return volume; + } + + @Override + public boolean volumeOnSharedStoragePool(VolumeVO volume) { + Long poolId = volume.getPoolId(); + if (poolId == null) { + return false; + } else { + StoragePoolVO pool = _storagePoolDao.findById(poolId); + + if (pool == null) { + return false; + } else { + return (pool.getScope() == ScopeType.HOST) ? false : true; + } + } + } + + @Override + public boolean volumeInactive(Volume volume) { + Long vmId = volume.getInstanceId(); + if (vmId != null) { + UserVm vm = _userVmDao.findById(vmId); + if (vm == null) { + return true; + } + State state = vm.getState(); + if (state.equals(State.Stopped) || state.equals(State.Destroyed)) { + return true; + } + } + return false; + } + + @Override + public String getVmNameOnVolume(Volume volume) { + Long vmId = volume.getInstanceId(); + if (vmId != null) { + VMInstanceVO vm = _vmInstanceDao.findById(vmId); + + if (vm == null) { + return null; + } + return vm.getInstanceName(); + } + return null; + } + + /* + * Just allocate a volume in the database, don't send the createvolume cmd + * to hypervisor. The volume will be finally created only when it's attached + * to a VM. + */ + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_CREATE, eventDescription = "creating volume", create = true) + public VolumeVO allocVolume(CreateVolumeCmd cmd) + throws ResourceAllocationException { + // FIXME: some of the scheduled event stuff might be missing here... + Account caller = UserContext.current().getCaller(); + + long ownerId = cmd.getEntityOwnerId(); + + // permission check + _accountMgr.checkAccess(caller, null, true, + _accountMgr.getActiveAccountById(ownerId)); + + // Check that the resource limit for volumes won't be exceeded + _resourceLimitMgr.checkResourceLimit(_accountMgr.getAccount(ownerId), + ResourceType.volume); + + Long zoneId = cmd.getZoneId(); + Long diskOfferingId = null; + DiskOfferingVO diskOffering = null; + Long size = null; + // Volume VO used for extracting the source template id + VolumeVO parentVolume = null; + + // validate input parameters before creating the volume + if ((cmd.getSnapshotId() == null && cmd.getDiskOfferingId() == null) + || (cmd.getSnapshotId() != null && cmd.getDiskOfferingId() != null)) { + throw new InvalidParameterValueException( + "Either disk Offering Id or snapshot Id must be passed whilst creating volume"); + } + + if (cmd.getSnapshotId() == null) {// create a new volume + + diskOfferingId = cmd.getDiskOfferingId(); + size = cmd.getSize(); + Long sizeInGB = size; + if (size != null) { + if (size > 0) { + size = size * 1024 * 1024 * 1024; // user specify size in GB + } else { + throw new InvalidParameterValueException( + "Disk size must be larger than 0"); + } + } + + // Check that the the disk offering is specified + diskOffering = _diskOfferingDao.findById(diskOfferingId); + if ((diskOffering == null) || diskOffering.getRemoved() != null + || !DiskOfferingVO.Type.Disk.equals(diskOffering.getType())) { + throw new InvalidParameterValueException( + "Please specify a valid disk offering."); + } + + if (diskOffering.isCustomized()) { + if (size == null) { + throw new InvalidParameterValueException( + "This disk offering requires a custom size specified"); + } + if ((sizeInGB < _customDiskOfferingMinSize) + || (sizeInGB > _customDiskOfferingMaxSize)) { + throw new InvalidParameterValueException("Volume size: " + + sizeInGB + "GB is out of allowed range. Max: " + + _customDiskOfferingMaxSize + " Min:" + + _customDiskOfferingMinSize); + } + } + + if (!diskOffering.isCustomized() && size != null) { + throw new InvalidParameterValueException( + "This disk offering does not allow custom size"); + } + + if (diskOffering.getDomainId() == null) { + // do nothing as offering is public + } else { + _configMgr.checkDiskOfferingAccess(caller, diskOffering); + } + + if (diskOffering.getDiskSize() > 0) { + size = diskOffering.getDiskSize(); + } + + if (!validateVolumeSizeRange(size)) {// convert size from mb to gb + // for validation + throw new InvalidParameterValueException( + "Invalid size for custom volume creation: " + size + + " ,max volume size is:" + _maxVolumeSizeInGb); + } + } else { // create volume from snapshot + Long snapshotId = cmd.getSnapshotId(); + SnapshotVO snapshotCheck = _snapshotDao.findById(snapshotId); + if (snapshotCheck == null) { + throw new InvalidParameterValueException( + "unable to find a snapshot with id " + snapshotId); + } + + if (snapshotCheck.getState() != Snapshot.State.BackedUp) { + throw new InvalidParameterValueException("Snapshot id=" + + snapshotId + " is not in " + Snapshot.State.BackedUp + + " state yet and can't be used for volume creation"); + } + parentVolume = _volsDao.findByIdIncludingRemoved(snapshotCheck.getVolumeId()); + + diskOfferingId = snapshotCheck.getDiskOfferingId(); + diskOffering = _diskOfferingDao.findById(diskOfferingId); + zoneId = snapshotCheck.getDataCenterId(); + size = snapshotCheck.getSize(); // ; disk offering is used for tags + // purposes + + // check snapshot permissions + _accountMgr.checkAccess(caller, null, true, snapshotCheck); + } + + // Verify that zone exists + DataCenterVO zone = _dcDao.findById(zoneId); + if (zone == null) { + throw new InvalidParameterValueException( + "Unable to find zone by id " + zoneId); + } + + // Check if zone is disabled + if (Grouping.AllocationState.Disabled == zone.getAllocationState() + && !_accountMgr.isRootAdmin(caller.getType())) { + throw new PermissionDeniedException( + "Cannot perform this operation, Zone is currently disabled: " + + zoneId); + } + + // If local storage is disabled then creation of volume with local disk + // offering not allowed + if (!zone.isLocalStorageEnabled() && diskOffering.getUseLocalStorage()) { + throw new InvalidParameterValueException( + "Zone is not configured to use local storage but volume's disk offering " + + diskOffering.getName() + " uses it"); + } + + String userSpecifiedName = cmd.getVolumeName(); + if (userSpecifiedName == null) { + userSpecifiedName = getRandomVolumeName(); + } + + Transaction txn = Transaction.currentTxn(); + txn.start(); + + VolumeVO volume = new VolumeVO(userSpecifiedName, -1, -1, -1, -1, + new Long(-1), null, null, 0, Volume.Type.DATADISK); + volume.setPoolId(null); + volume.setDataCenterId(zoneId); + volume.setPodId(null); + volume.setAccountId(ownerId); + volume.setDomainId(((caller == null) ? Domain.ROOT_DOMAIN : caller + .getDomainId())); + volume.setDiskOfferingId(diskOfferingId); + volume.setSize(size); + volume.setInstanceId(null); + volume.setUpdated(new Date()); + volume.setDomainId((caller == null) ? Domain.ROOT_DOMAIN : caller + .getDomainId()); + if (parentVolume != null) { + volume.setTemplateId(parentVolume.getTemplateId()); + } else { + volume.setTemplateId(null); + } + + volume = _volsDao.persist(volume); + if (cmd.getSnapshotId() == null) { + // for volume created from snapshot, create usage event after volume + // creation + UsageEventVO usageEvent = new UsageEventVO( + EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), + volume.getDataCenterId(), volume.getId(), volume.getName(), + diskOfferingId, null, size); + _usageEventDao.persist(usageEvent); + } + + UserContext.current().setEventDetails("Volume Id: " + volume.getId()); + + // Increment resource count during allocation; if actual creation fails, + // decrement it + _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), + ResourceType.volume); + + txn.commit(); + + return volume; + } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_CREATE, eventDescription = "creating volume", async = true) + public VolumeVO createVolume(CreateVolumeCmd cmd) { + VolumeVO volume = _volsDao.findById(cmd.getEntityId()); + boolean created = true; + + try { + if (cmd.getSnapshotId() != null) { + volume = createVolumeFromSnapshot(volume, cmd.getSnapshotId()); + if (volume.getState() != Volume.State.Ready) { + created = false; + } + } + return volume; + } catch(Exception e) { + created = false; + s_logger.debug("Failed to create volume: " + volume.getId(), e); + return null; + } finally { + if (!created) { + s_logger.trace("Decrementing volume resource count for account id=" + + volume.getAccountId() + + " as volume failed to create on the backend"); + _resourceLimitMgr.decrementResourceCount(volume.getAccountId(), + ResourceType.volume); + } + } + } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_RESIZE, eventDescription = "resizing volume", async = true) + public VolumeVO resizeVolume(ResizeVolumeCmd cmd) { + Long newSize = null; + boolean shrinkOk = cmd.getShrinkOk(); + + VolumeVO volume = _volsDao.findById(cmd.getEntityId()); + if (volume == null) { + throw new InvalidParameterValueException("No such volume"); + } + + DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume + .getDiskOfferingId()); + DiskOfferingVO newDiskOffering = null; + + newDiskOffering = _diskOfferingDao.findById(cmd.getNewDiskOfferingId()); + + /* + * Volumes with no hypervisor have never been assigned, and can be + * resized by recreating. perhaps in the future we can just update the + * db entry for the volume + */ + if (_volsDao.getHypervisorType(volume.getId()) == HypervisorType.None) { + throw new InvalidParameterValueException( + "Can't resize a volume that has never been attached, not sure which hypervisor type. Recreate volume to resize."); + } + + /* Only works for KVM/Xen for now */ + if (_volsDao.getHypervisorType(volume.getId()) != HypervisorType.KVM + && _volsDao.getHypervisorType(volume.getId()) != HypervisorType.XenServer) { + throw new InvalidParameterValueException( + "Cloudstack currently only supports volumes marked as KVM or XenServer hypervisor for resize"); + } + + + if (volume.getState() != Volume.State.Ready) { + throw new InvalidParameterValueException( + "Volume should be in ready state before attempting a resize"); + } + + if (!volume.getVolumeType().equals(Volume.Type.DATADISK)) { + throw new InvalidParameterValueException( + "Can only resize DATA volumes"); + } + + /* + * figure out whether or not a new disk offering or size parameter is + * required, get the correct size value + */ + if (newDiskOffering == null) { + if (diskOffering.isCustomized()) { + newSize = cmd.getSize(); + + if (newSize == null) { + throw new InvalidParameterValueException( + "new offering is of custom size, need to specify a size"); + } + + newSize = (newSize << 30); + } else { + throw new InvalidParameterValueException("current offering" + + volume.getDiskOfferingId() + + " cannot be resized, need to specify a disk offering"); + } + } else { + + if (newDiskOffering.getRemoved() != null + || !DiskOfferingVO.Type.Disk.equals(newDiskOffering + .getType())) { + throw new InvalidParameterValueException( + "Disk offering ID is missing or invalid"); + } + + if (diskOffering.getTags() != null) { + if (!newDiskOffering.getTags().equals(diskOffering.getTags())) { + throw new InvalidParameterValueException( + "Tags on new and old disk offerings must match"); + } + } else if (newDiskOffering.getTags() != null) { + throw new InvalidParameterValueException( + "There are no tags on current disk offering, new disk offering needs to have no tags"); + } + + if (newDiskOffering.getDomainId() == null) { + // do nothing as offering is public + } else { + _configMgr.checkDiskOfferingAccess(UserContext.current() + .getCaller(), newDiskOffering); + } + + if (newDiskOffering.isCustomized()) { + newSize = cmd.getSize(); + + if (newSize == null) { + throw new InvalidParameterValueException( + "new offering is of custom size, need to specify a size"); + } + + newSize = (newSize << 30); + } else { + newSize = newDiskOffering.getDiskSize(); + } + } + + if (newSize == null) { + throw new InvalidParameterValueException( + "could not detect a size parameter or fetch one from the diskofferingid parameter"); + } + + if (!validateVolumeSizeRange(newSize)) { + throw new InvalidParameterValueException( + "Requested size out of range"); + } + + /* does the caller have the authority to act on this volume? */ + _accountMgr.checkAccess(UserContext.current().getCaller(), null, true, + volume); + + UserVmVO userVm = _userVmDao.findById(volume.getInstanceId()); + + PrimaryDataStoreInfo pool = (PrimaryDataStoreInfo)this.dataStoreMgr.getDataStore(volume.getPoolId(), DataStoreRole.Primary); + long currentSize = volume.getSize(); + + /* + * lets make certain they (think they) know what they're doing if they + * want to shrink, by forcing them to provide the shrinkok parameter. + * This will be checked again at the hypervisor level where we can see + * the actual disk size + */ + if (currentSize > newSize && !shrinkOk) { + throw new InvalidParameterValueException( + "Going from existing size of " + + currentSize + + " to size of " + + newSize + + " would shrink the volume, need to sign off by supplying the shrinkok parameter with value of true"); + } + + /* + * get a list of hosts to send the commands to, try the system the + * associated vm is running on first, then the last known place it ran. + * If not attached to a userVm, we pass 'none' and resizevolume.sh is ok + * with that since it only needs the vm name to live resize + */ + long[] hosts = null; + String instanceName = "none"; + if (userVm != null) { + instanceName = userVm.getInstanceName(); + if (userVm.getHostId() != null) { + hosts = new long[] { userVm.getHostId() }; + } else if (userVm.getLastHostId() != null) { + hosts = new long[] { userVm.getLastHostId() }; + } + + /* Xen only works offline, SR does not support VDI.resizeOnline */ + if (_volsDao.getHypervisorType(volume.getId()) == HypervisorType.XenServer + && !userVm.getState().equals(State.Stopped)) { + throw new InvalidParameterValueException( + "VM must be stopped or disk detached in order to resize with the Xen HV"); + } + } + + ResizeVolumePayload payload = new ResizeVolumePayload(newSize, shrinkOk, instanceName, hosts); + + try { + VolumeInfo vol = this.volFactory.getVolume(volume.getId()); + vol.addPayload(payload); + + AsyncCallFuture future = this.volService.resize(vol); + future.get(); + volume = _volsDao.findById(volume.getId()); + + if (newDiskOffering != null) { + volume.setDiskOfferingId(cmd.getNewDiskOfferingId()); + } + _volsDao.update(volume.getId(), volume); + + return volume; + } catch (InterruptedException e) { + s_logger.debug("failed get resize volume result", e); + } catch (ExecutionException e) { + s_logger.debug("failed get resize volume result", e); + } catch (Exception e) { + s_logger.debug("failed get resize volume result", e); + } + + return null; + } + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_DELETE, eventDescription = "deleting volume") + public boolean deleteVolume(long volumeId, Account caller) + throws ConcurrentOperationException { + + VolumeVO volume = _volsDao.findById(volumeId); + if (volume == null) { + throw new InvalidParameterValueException( + "Unable to aquire volume with ID: " + volumeId); + } + + if (!_snapshotMgr.canOperateOnVolume(volume)) { + throw new InvalidParameterValueException( + "There are snapshot creating on it, Unable to delete the volume"); + } + + _accountMgr.checkAccess(caller, null, true, volume); + + if (volume.getInstanceId() != null) { + throw new InvalidParameterValueException( + "Please specify a volume that is not attached to any VM."); + } + + if (volume.getState() == Volume.State.UploadOp) { + VolumeHostVO volumeHost = _volumeHostDao.findByVolumeId(volume + .getId()); + if (volumeHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS) { + throw new InvalidParameterValueException( + "Please specify a volume that is not uploading"); + } + } + + try { + if (volume.getState() != Volume.State.Destroy && volume.getState() != Volume.State.Expunging && volume.getState() != Volume.State.Expunging) { + Long instanceId = volume.getInstanceId(); + if (!this.volService.destroyVolume(volume.getId())) { + return false; + } + + VMInstanceVO vmInstance = this._vmInstanceDao.findById(instanceId); + if (instanceId == null + || (vmInstance.getType().equals(VirtualMachine.Type.User))) { + // Decrement the resource count for volumes belonging user VM's only + _resourceLimitMgr.decrementResourceCount(volume.getAccountId(), + ResourceType.volume); + // Log usage event for volumes belonging user VM's only + UsageEventVO usageEvent = new UsageEventVO( + EventTypes.EVENT_VOLUME_DELETE, volume.getAccountId(), + volume.getDataCenterId(), volume.getId(), volume.getName()); + _usageEventDao.persist(usageEvent); + } + } + AsyncCallFuture future = this.volService.expungeVolumeAsync(this.volFactory.getVolume(volume.getId())); + future.get(); + + } catch (Exception e) { + s_logger.warn("Failed to expunge volume:", e); + return false; + } + + return true; + } + + private boolean validateVolumeSizeRange(long size) { + if (size < 0 || (size > 0 && size < (1024 * 1024 * 1024))) { + throw new InvalidParameterValueException( + "Please specify a size of at least 1 Gb."); + } else if (size > (_maxVolumeSizeInGb * 1024 * 1024 * 1024)) { + throw new InvalidParameterValueException("volume size " + size + + ", but the maximum size allowed is " + _maxVolumeSizeInGb + + " Gb."); + } + + return true; + } + + protected DiskProfile toDiskProfile(VolumeVO vol, DiskOfferingVO offering) { + return new DiskProfile(vol.getId(), vol.getVolumeType(), vol.getName(), + offering.getId(), vol.getSize(), offering.getTagsArray(), + offering.getUseLocalStorage(), offering.isRecreatable(), + vol.getTemplateId()); + } + + @Override + public DiskProfile allocateRawVolume(Type type, + String name, DiskOfferingVO offering, Long size, VMInstanceVO vm, Account owner) { + if (size == null) { + size = offering.getDiskSize(); + } else { + size = (size * 1024 * 1024 * 1024); + } + VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), + owner.getDomainId(), owner.getId(), offering.getId(), size); + if (vm != null) { + vol.setInstanceId(vm.getId()); + } + + if (type.equals(Type.ROOT)) { + vol.setDeviceId(0l); + } else { + vol.setDeviceId(1l); + } + + vol = _volsDao.persist(vol); + + // Save usage event and update resource count for user vm volumes + if (vm instanceof UserVm) { + + UsageEventVO usageEvent = new UsageEventVO( + EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), + vol.getDataCenterId(), vol.getId(), vol.getName(), + offering.getId(), null, size); + _usageEventDao.persist(usageEvent); + + _resourceLimitMgr.incrementResourceCount(vm.getAccountId(), + ResourceType.volume); + } + return toDiskProfile(vol, offering); + } + + @Override + public DiskProfile allocateTemplatedVolume( + Type type, String name, DiskOfferingVO offering, + VMTemplateVO template, VMInstanceVO vm, Account owner) { + assert (template.getFormat() != ImageFormat.ISO) : "ISO is not a template really...."; + + Long size = this._tmpltMgr.getTemplateSize(template.getId(), vm.getDataCenterId()); + + VolumeVO vol = new VolumeVO(type, name, vm.getDataCenterId(), + owner.getDomainId(), owner.getId(), offering.getId(), size); + if (vm != null) { + vol.setInstanceId(vm.getId()); + } + vol.setTemplateId(template.getId()); + + if (type.equals(Type.ROOT)) { + vol.setDeviceId(0l); + if (!vm.getType().equals(VirtualMachine.Type.User)) { + vol.setRecreatable(true); + } + } else { + vol.setDeviceId(1l); + } + + vol = _volsDao.persist(vol); + + // Create event and update resource count for volumes if vm is a user vm + if (vm instanceof UserVm) { + + Long offeringId = null; + + if (offering.getType() == DiskOfferingVO.Type.Disk) { + offeringId = offering.getId(); + } + + UsageEventVO usageEvent = new UsageEventVO( + EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), + vol.getDataCenterId(), vol.getId(), vol.getName(), + offeringId, template.getId(), vol.getSize()); + _usageEventDao.persist(usageEvent); + + _resourceLimitMgr.incrementResourceCount(vm.getAccountId(), + ResourceType.volume); + } + return toDiskProfile(vol, offering); + } + + private String getSupportedImageFormatForCluster(Long clusterId) { + ClusterVO cluster = ApiDBUtils.findClusterById(clusterId); + + if (cluster.getHypervisorType() == HypervisorType.XenServer) { + return "vhd"; + } else if (cluster.getHypervisorType() == HypervisorType.KVM) { + return "qcow2"; + } else if (cluster.getHypervisorType() == HypervisorType.VMware) { + return "ova"; + } else if (cluster.getHypervisorType() == HypervisorType.Ovm) { + return "raw"; + } else { + return null; + } + } + + private VolumeInfo copyVolume(StoragePoolVO rootDiskPool + , VolumeInfo volume, VMInstanceVO vm, VMTemplateVO rootDiskTmplt, DataCenterVO dcVO, + HostPodVO pod, DiskOfferingVO diskVO, ServiceOfferingVO svo, HypervisorType rootDiskHyperType) throws NoTransitionException { + VolumeHostVO volHostVO = _volumeHostDao.findByHostVolume(volume.getDataStore().getId(), volume.getId()); + if (!volHostVO + .getFormat() + .getFileExtension() + .equals( + getSupportedImageFormatForCluster(rootDiskPool + .getClusterId()))) { + throw new InvalidParameterValueException( + "Failed to attach volume to VM since volumes format " + + volHostVO.getFormat() + .getFileExtension() + + " is not compatible with the vm hypervisor type"); + } + + VolumeInfo volumeOnPrimary = copyVolumeFromSecToPrimary(volume, + vm, rootDiskTmplt, dcVO, pod, + rootDiskPool.getClusterId(), svo, diskVO, + new ArrayList(), + volume.getSize(), rootDiskHyperType); + + return volumeOnPrimary; + } + + private VolumeInfo createVolumeOnPrimaryStorage(VMInstanceVO vm, VolumeVO rootVolumeOfVm, VolumeInfo volume, HypervisorType rootDiskHyperType) throws NoTransitionException { + VMTemplateVO rootDiskTmplt = _templateDao.findById(vm + .getTemplateId()); + DataCenterVO dcVO = _dcDao.findById(vm + .getDataCenterId()); + HostPodVO pod = _podDao.findById(vm.getPodIdToDeployIn()); + StoragePoolVO rootDiskPool = _storagePoolDao + .findById(rootVolumeOfVm.getPoolId()); + ServiceOfferingVO svo = _serviceOfferingDao.findById(vm + .getServiceOfferingId()); + DiskOfferingVO diskVO = _diskOfferingDao.findById(volume + .getDiskOfferingId()); + Long clusterId = (rootDiskPool == null ? null : rootDiskPool + .getClusterId()); + + VolumeInfo vol = null; + if (volume.getState() == Volume.State.Allocated) { + vol = createVolume(volume, vm, + rootDiskTmplt, dcVO, pod, clusterId, svo, diskVO, + new ArrayList(), volume.getSize(), + rootDiskHyperType); + } else if (volume.getState() == Volume.State.Uploaded) { + vol = copyVolume(rootDiskPool + , volume, vm, rootDiskTmplt, dcVO, + pod, diskVO, svo, rootDiskHyperType); + } + return vol; + } + + private boolean needMoveVolume(VolumeVO rootVolumeOfVm, VolumeInfo volume) { + StoragePoolVO vmRootVolumePool = _storagePoolDao + .findById(rootVolumeOfVm.getPoolId()); + DiskOfferingVO volumeDiskOffering = _diskOfferingDao + .findById(volume.getDiskOfferingId()); + String[] volumeTags = volumeDiskOffering.getTagsArray(); + + boolean isVolumeOnSharedPool = !volumeDiskOffering + .getUseLocalStorage(); + StoragePoolVO sourcePool = _storagePoolDao.findById(volume + .getPoolId()); + List matchingVMPools = _storagePoolDao + .findPoolsByTags(vmRootVolumePool.getDataCenterId(), + vmRootVolumePool.getPodId(), + vmRootVolumePool.getClusterId(), volumeTags + ); + + boolean moveVolumeNeeded = true; + if (matchingVMPools.size() == 0) { + String poolType; + if (vmRootVolumePool.getClusterId() != null) { + poolType = "cluster"; + } else if (vmRootVolumePool.getPodId() != null) { + poolType = "pod"; + } else { + poolType = "zone"; + } + throw new CloudRuntimeException( + "There are no storage pools in the VM's " + poolType + + " with all of the volume's tags (" + + volumeDiskOffering.getTags() + ")."); + } else { + long sourcePoolId = sourcePool.getId(); + Long sourcePoolDcId = sourcePool.getDataCenterId(); + Long sourcePoolPodId = sourcePool.getPodId(); + Long sourcePoolClusterId = sourcePool.getClusterId(); + for (StoragePoolVO vmPool : matchingVMPools) { + long vmPoolId = vmPool.getId(); + Long vmPoolDcId = vmPool.getDataCenterId(); + Long vmPoolPodId = vmPool.getPodId(); + Long vmPoolClusterId = vmPool.getClusterId(); + + // Moving a volume is not required if storage pools belongs + // to same cluster in case of shared volume or + // identical storage pool in case of local + if (sourcePoolDcId == vmPoolDcId + && sourcePoolPodId == vmPoolPodId + && sourcePoolClusterId == vmPoolClusterId + && (isVolumeOnSharedPool || sourcePoolId == vmPoolId)) { + moveVolumeNeeded = false; + break; + } + } + } + + return moveVolumeNeeded; + } + + + private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volume, Long deviceId) { + String errorMsg = "Failed to attach volume: " + volume.getName() + + " to VM: " + vm.getHostName(); + boolean sendCommand = (vm.getState() == State.Running); + AttachVolumeAnswer answer = null; + Long hostId = vm.getHostId(); + if (hostId == null) { + hostId = vm.getLastHostId(); + HostVO host = _hostDao.findById(hostId); + if (host != null + && host.getHypervisorType() == HypervisorType.VMware) { + sendCommand = true; + } + } + + if (sendCommand) { + StoragePoolVO volumePool = _storagePoolDao.findById(volume + .getPoolId()); + AttachVolumeCommand cmd = new AttachVolumeCommand(true, + vm.getInstanceName(), volume.getPoolType(), + volume.getFolder(), volume.getPath(), volume.getName(), + deviceId, volume.getChainInfo()); + cmd.setPoolUuid(volumePool.getUuid()); + + try { + answer = (AttachVolumeAnswer) _agentMgr.send(hostId, cmd); + } catch (Exception e) { + throw new CloudRuntimeException(errorMsg + " due to: " + + e.getMessage()); + } + } + + if (!sendCommand || (answer != null && answer.getResult())) { + // Mark the volume as attached + if (sendCommand) { + _volsDao.attachVolume(volume.getId(), vm.getId(), + answer.getDeviceId()); + } else { + _volsDao.attachVolume(volume.getId(), vm.getId(), deviceId); + } + return _volsDao.findById(volume.getId()); + } else { + if (answer != null) { + String details = answer.getDetails(); + if (details != null && !details.isEmpty()) { + errorMsg += "; " + details; + } + } + throw new CloudRuntimeException(errorMsg); + } + } + + private int getMaxDataVolumesSupported(UserVmVO vm) { + Long hostId = vm.getHostId(); + if (hostId == null) { + hostId = vm.getLastHostId(); + } + HostVO host = _hostDao.findById(hostId); + Integer maxDataVolumesSupported = null; + if (host != null) { + _hostDao.loadDetails(host); + maxDataVolumesSupported = _hypervisorCapabilitiesDao + .getMaxDataVolumesLimit(host.getHypervisorType(), + host.getDetail("product_version")); + } + if (maxDataVolumesSupported == null) { + maxDataVolumesSupported = 6; // 6 data disks by default if nothing + // is specified in + // 'hypervisor_capabilities' table + } + + return maxDataVolumesSupported.intValue(); + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_ATTACH, eventDescription = "attaching volume", async = true) + public Volume attachVolumeToVM(AttachVolumeCmd command) { + Long vmId = command.getVirtualMachineId(); + Long volumeId = command.getId(); + Long deviceId = command.getDeviceId(); + Account caller = UserContext.current().getCaller(); + + // Check that the volume ID is valid + VolumeInfo volume = volFactory.getVolume(volumeId); + // Check that the volume is a data volume + if (volume == null || volume.getVolumeType() != Volume.Type.DATADISK) { + throw new InvalidParameterValueException( + "Please specify a valid data volume."); + } + + // Check that the volume is not currently attached to any VM + if (volume.getInstanceId() != null) { + throw new InvalidParameterValueException( + "Please specify a volume that is not attached to any VM."); + } + + // Check that the volume is not destroyed + if (volume.getState() == Volume.State.Destroy) { + throw new InvalidParameterValueException( + "Please specify a volume that is not destroyed."); + } + + // Check that the virtual machine ID is valid and it's a user vm + UserVmVO vm = _userVmDao.findById(vmId); + if (vm == null || vm.getType() != VirtualMachine.Type.User) { + throw new InvalidParameterValueException( + "Please specify a valid User VM."); + } + + // Check that the VM is in the correct state + if (vm.getState() != State.Running && vm.getState() != State.Stopped) { + throw new InvalidParameterValueException( + "Please specify a VM that is either running or stopped."); + } + + // Check that the device ID is valid + if (deviceId != null) { + if (deviceId.longValue() == 0) { + throw new InvalidParameterValueException( + "deviceId can't be 0, which is used by Root device"); + } + } + + // Check that the number of data volumes attached to VM is less than + // that supported by hypervisor + List existingDataVolumes = _volsDao.findByInstanceAndType( + vmId, Volume.Type.DATADISK); + int maxDataVolumesSupported = getMaxDataVolumesSupported(vm); + if (existingDataVolumes.size() >= maxDataVolumesSupported) { + throw new InvalidParameterValueException( + "The specified VM already has the maximum number of data disks (" + + maxDataVolumesSupported + + "). Please specify another VM."); + } + + // Check that the VM and the volume are in the same zone + if (vm.getDataCenterId() != volume.getDataCenterId()) { + throw new InvalidParameterValueException( + "Please specify a VM that is in the same zone as the volume."); + } + + // If local storage is disabled then attaching a volume with local disk + // offering not allowed + DataCenterVO dataCenter = _dcDao.findById(volume.getDataCenterId()); + if (!dataCenter.isLocalStorageEnabled()) { + DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume + .getDiskOfferingId()); + if (diskOffering.getUseLocalStorage()) { + throw new InvalidParameterValueException( + "Zone is not configured to use local storage but volume's disk offering " + + diskOffering.getName() + " uses it"); + } + } + + // permission check + _accountMgr.checkAccess(caller, null, true, volume, vm); + + if (!(Volume.State.Allocated.equals(volume.getState()) + || Volume.State.Ready.equals(volume.getState()) || Volume.State.Uploaded + .equals(volume.getState()))) { + throw new InvalidParameterValueException( + "Volume state must be in Allocated, Ready or in Uploaded state"); + } + + VolumeVO rootVolumeOfVm = null; + List rootVolumesOfVm = _volsDao.findByInstanceAndType(vmId, + Volume.Type.ROOT); + if (rootVolumesOfVm.size() != 1) { + throw new CloudRuntimeException( + "The VM " + + vm.getHostName() + + " has more than one ROOT volume and is in an invalid state."); + } else { + rootVolumeOfVm = rootVolumesOfVm.get(0); + } + + HypervisorType rootDiskHyperType = vm.getHypervisorType(); + + HypervisorType dataDiskHyperType = _volsDao.getHypervisorType(volume + .getId()); + if (dataDiskHyperType != HypervisorType.None + && rootDiskHyperType != dataDiskHyperType) { + throw new InvalidParameterValueException( + "Can't attach a volume created by: " + dataDiskHyperType + + " to a " + rootDiskHyperType + " vm"); + } + + + deviceId = getDeviceId(vmId, deviceId); + VolumeInfo volumeOnPrimaryStorage = volume; + if (volume.getState().equals(Volume.State.Allocated) + || volume.getState() == Volume.State.Uploaded) { + try { + volumeOnPrimaryStorage = createVolumeOnPrimaryStorage(vm, rootVolumeOfVm, volume, rootDiskHyperType); + } catch (NoTransitionException e) { + s_logger.debug("Failed to create volume on primary storage", e); + throw new CloudRuntimeException("Failed to create volume on primary storage", e); + } + } + + boolean moveVolumeNeeded = needMoveVolume(rootVolumeOfVm, volumeOnPrimaryStorage); + + if (moveVolumeNeeded) { + PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)volumeOnPrimaryStorage.getDataStore(); + if (primaryStore.isLocal()) { + throw new CloudRuntimeException( + "Failed to attach local data volume " + + volume.getName() + + " to VM " + + vm.getDisplayName() + + " as migration of local data volume is not allowed"); + } + StoragePoolVO vmRootVolumePool = _storagePoolDao + .findById(rootVolumeOfVm.getPoolId()); + + try { + volumeOnPrimaryStorage = moveVolume(volumeOnPrimaryStorage, + vmRootVolumePool.getDataCenterId(), + vmRootVolumePool.getPodId(), + vmRootVolumePool.getClusterId(), + dataDiskHyperType); + } catch (ConcurrentOperationException e) { + s_logger.debug("move volume failed", e); + throw new CloudRuntimeException("move volume failed", e); + } + } + + + AsyncJobExecutor asyncExecutor = BaseAsyncJobExecutor + .getCurrentExecutor(); + if (asyncExecutor != null) { + AsyncJobVO job = asyncExecutor.getJob(); + + if (s_logger.isInfoEnabled()) { + s_logger.info("Trying to attaching volume " + volumeId + + " to vm instance:" + vm.getId() + + ", update async job-" + job.getId() + + " progress status"); + } + + _asyncMgr.updateAsyncJobAttachment(job.getId(), "volume", volumeId); + _asyncMgr.updateAsyncJobStatus(job.getId(), + BaseCmd.PROGRESS_INSTANCE_CREATED, volumeId); + } + + VolumeVO newVol = _volumeDao.findById(volumeOnPrimaryStorage.getId()); + newVol = sendAttachVolumeCommand(vm, newVol, deviceId); + return newVol; + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_DETACH, eventDescription = "detaching volume", async = true) + public Volume detachVolumeFromVM(DetachVolumeCmd cmmd) { + Account caller = UserContext.current().getCaller(); + if ((cmmd.getId() == null && cmmd.getDeviceId() == null && cmmd + .getVirtualMachineId() == null) + || (cmmd.getId() != null && (cmmd.getDeviceId() != null || cmmd + .getVirtualMachineId() != null)) + || (cmmd.getId() == null && (cmmd.getDeviceId() == null || cmmd + .getVirtualMachineId() == null))) { + throw new InvalidParameterValueException( + "Please provide either a volume id, or a tuple(device id, instance id)"); + } + + Long volumeId = cmmd.getId(); + VolumeVO volume = null; + + if (volumeId != null) { + volume = _volsDao.findById(volumeId); + } else { + volume = _volsDao.findByInstanceAndDeviceId( + cmmd.getVirtualMachineId(), cmmd.getDeviceId()).get(0); + } + + Long vmId = null; + + if (cmmd.getVirtualMachineId() == null) { + vmId = volume.getInstanceId(); + } else { + vmId = cmmd.getVirtualMachineId(); + } + + // Check that the volume ID is valid + if (volume == null) { + throw new InvalidParameterValueException( + "Unable to find volume with ID: " + volumeId); + } + + // Permissions check + _accountMgr.checkAccess(caller, null, true, volume); + + // Check that the volume is a data volume + if (volume.getVolumeType() != Volume.Type.DATADISK) { + throw new InvalidParameterValueException( + "Please specify a data volume."); + } + + // Check that the volume is currently attached to a VM + if (vmId == null) { + throw new InvalidParameterValueException( + "The specified volume is not attached to a VM."); + } + + // Check that the VM is in the correct state + UserVmVO vm = this._userVmDao.findById(vmId); + if (vm.getState() != State.Running && vm.getState() != State.Stopped + && vm.getState() != State.Destroyed) { + throw new InvalidParameterValueException( + "Please specify a VM that is either running or stopped."); + } + + AsyncJobExecutor asyncExecutor = BaseAsyncJobExecutor + .getCurrentExecutor(); + if (asyncExecutor != null) { + AsyncJobVO job = asyncExecutor.getJob(); + + if (s_logger.isInfoEnabled()) { + s_logger.info("Trying to attaching volume " + volumeId + + "to vm instance:" + vm.getId() + + ", update async job-" + job.getId() + + " progress status"); + } + + _asyncMgr.updateAsyncJobAttachment(job.getId(), "volume", volumeId); + _asyncMgr.updateAsyncJobStatus(job.getId(), + BaseCmd.PROGRESS_INSTANCE_CREATED, volumeId); + } + + String errorMsg = "Failed to detach volume: " + volume.getName() + + " from VM: " + vm.getHostName(); + boolean sendCommand = (vm.getState() == State.Running); + Answer answer = null; + + if (sendCommand) { + AttachVolumeCommand cmd = new AttachVolumeCommand(false, + vm.getInstanceName(), volume.getPoolType(), + volume.getFolder(), volume.getPath(), volume.getName(), + cmmd.getDeviceId() != null ? cmmd.getDeviceId() : volume + .getDeviceId(), volume.getChainInfo()); + + StoragePoolVO volumePool = _storagePoolDao.findById(volume + .getPoolId()); + cmd.setPoolUuid(volumePool.getUuid()); + + try { + answer = _agentMgr.send(vm.getHostId(), cmd); + } catch (Exception e) { + throw new CloudRuntimeException(errorMsg + " due to: " + + e.getMessage()); + } + } + + if (!sendCommand || (answer != null && answer.getResult())) { + // Mark the volume as detached + _volsDao.detachVolume(volume.getId()); + if (answer != null && answer instanceof AttachVolumeAnswer) { + volume.setChainInfo(((AttachVolumeAnswer) answer) + .getChainInfo()); + _volsDao.update(volume.getId(), volume); + } + + return _volsDao.findById(volumeId); + } else { + + if (answer != null) { + String details = answer.getDetails(); + if (details != null && !details.isEmpty()) { + errorMsg += "; " + details; + } + } + + throw new CloudRuntimeException(errorMsg); + } + } + + + + + + + @DB + protected VolumeVO switchVolume(VolumeVO existingVolume, + VirtualMachineProfile vm) + throws StorageUnavailableException { + Transaction txn = Transaction.currentTxn(); + + Long templateIdToUse = null; + Long volTemplateId = existingVolume.getTemplateId(); + long vmTemplateId = vm.getTemplateId(); + if (volTemplateId != null && volTemplateId.longValue() != vmTemplateId) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("switchVolume: Old Volume's templateId: " + + volTemplateId + + " does not match the VM's templateId: " + + vmTemplateId + + ", updating templateId in the new Volume"); + } + templateIdToUse = vmTemplateId; + } + + txn.start(); + VolumeVO newVolume = allocateDuplicateVolume(existingVolume, + templateIdToUse); + // In case of Vmware if vm reference is not removed then during root + // disk cleanup + // the vm also gets deleted, so remove the reference + if (vm.getHypervisorType() == HypervisorType.VMware) { + _volsDao.detachVolume(existingVolume.getId()); + } + try { + stateTransitTo(existingVolume, Volume.Event.DestroyRequested); + } catch (NoTransitionException e) { + s_logger.debug("Unable to destroy existing volume: " + e.toString()); + } + txn.commit(); + return newVolume; + + } + + + @Override + public void release(VirtualMachineProfile profile) { + // add code here + } + + + @Override + @DB + public void cleanupVolumes(long vmId) throws ConcurrentOperationException { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Cleaning storage for vm: " + vmId); + } + List volumesForVm = _volsDao.findByInstance(vmId); + List toBeExpunged = new ArrayList(); + Transaction txn = Transaction.currentTxn(); + txn.start(); + for (VolumeVO vol : volumesForVm) { + if (vol.getVolumeType().equals(Type.ROOT)) { + // This check is for VM in Error state (volume is already + // destroyed) + if (!vol.getState().equals(Volume.State.Destroy)) { + this.volService.destroyVolume(vol.getId()); + } + toBeExpunged.add(vol); + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Detaching " + vol); + } + _volsDao.detachVolume(vol.getId()); + } + } + txn.commit(); + AsyncCallFuture future = null; + for (VolumeVO expunge : toBeExpunged) { + future = this.volService.expungeVolumeAsync(this.volFactory.getVolume(expunge.getId())); + try { + future.get(); + } catch (InterruptedException e) { + s_logger.debug("failed expunge volume" + expunge.getId(), e); + } catch (ExecutionException e) { + s_logger.debug("failed expunge volume" + expunge.getId(), e); + } + } + } + + @DB + @Override + public Volume migrateVolume(MigrateVolumeCmd cmd) { + Long volumeId = cmd.getVolumeId(); + Long storagePoolId = cmd.getStoragePoolId(); + + VolumeVO vol = _volsDao.findById(volumeId); + if (vol == null) { + throw new InvalidParameterValueException( + "Failed to find the volume id: " + volumeId); + } + + if (vol.getState() != Volume.State.Ready) { + throw new InvalidParameterValueException( + "Volume must be in ready state"); + } + + if (vol.getInstanceId() != null) { + throw new InvalidParameterValueException( + "Volume needs to be dettached from VM"); + } + + StoragePool destPool = (StoragePool)this.dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); + if (destPool == null) { + throw new InvalidParameterValueException( + "Failed to find the destination storage pool: " + + storagePoolId); + } + + if (!volumeOnSharedStoragePool(vol)) { + throw new InvalidParameterValueException( + "Migration of volume from local storage pool is not supported"); + } + + Volume newVol = migrateVolume(vol, destPool); + return newVol; + } + + + + @DB + protected Volume migrateVolume(Volume volume, StoragePool destPool) { + VolumeInfo vol = this.volFactory.getVolume(volume.getId()); + AsyncCallFuture future = this.volService.copyVolume(vol, (DataStore)destPool); + try { + VolumeApiResult result = future.get(); + if (result.isFailed()) { + s_logger.debug("migrate volume failed:" + result.getResult()); + return null; + } + return result.getVolume(); + } catch (InterruptedException e) { + s_logger.debug("migrate volume failed", e); + return null; + } catch (ExecutionException e) { + s_logger.debug("migrate volume failed", e); + return null; + } + } + + @Override + public boolean storageMigration( + VirtualMachineProfile vm, + StoragePool destPool) { + List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); + List volumesNeedToMigrate = new ArrayList(); + + for (VolumeVO volume : vols) { + if (volume.getState() != Volume.State.Ready) { + s_logger.debug("volume: " + volume.getId() + " is in " + + volume.getState() + " state"); + throw new CloudRuntimeException("volume: " + volume.getId() + + " is in " + volume.getState() + " state"); + } + + if (volume.getPoolId() == destPool.getId()) { + s_logger.debug("volume: " + volume.getId() + + " is on the same storage pool: " + destPool.getId()); + continue; + } + + volumesNeedToMigrate.add(volume); + } + + if (volumesNeedToMigrate.isEmpty()) { + s_logger.debug("No volume need to be migrated"); + return true; + } + + for (Volume vol : volumesNeedToMigrate) { + Volume result = migrateVolume(vol, destPool); + if (result == null) { + return false; + } + } + return true; + } + + @Override + public void prepareForMigration( + VirtualMachineProfile vm, + DeployDestination dest) { + List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Preparing " + vols.size() + " volumes for " + vm); + } + + for (VolumeVO vol : vols) { + PrimaryDataStoreInfo pool = (PrimaryDataStoreInfo)this.dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary); + vm.addDisk(new VolumeTO(vol, pool)); + } + + if (vm.getType() == VirtualMachine.Type.User) { + UserVmVO userVM = (UserVmVO) vm.getVirtualMachine(); + if (userVM.getIsoId() != null) { + Pair isoPathPair = this._tmpltMgr.getAbsoluteIsoPath( + userVM.getIsoId(), userVM.getDataCenterId()); + if (isoPathPair != null) { + String isoPath = isoPathPair.first(); + VolumeTO iso = new VolumeTO(vm.getId(), Volume.Type.ISO, + StoragePoolType.ISO, null, null, null, isoPath, 0, + null, null); + vm.addDisk(iso); + } + } + } + } + + + + private static enum VolumeTaskType { + RECREATE, + NOP, + MIGRATE + } + private static class VolumeTask { + final VolumeTaskType type; + final StoragePoolVO pool; + final VolumeVO volume; + VolumeTask(VolumeTaskType type, VolumeVO volume, StoragePoolVO pool) { + this.type = type; + this.pool = pool; + this.volume = volume; + } + } + + private List getTasks(List vols, Map destVols) throws StorageUnavailableException { + boolean recreate = _recreateSystemVmEnabled; + List tasks = new ArrayList(); + for (VolumeVO vol : vols) { + StoragePoolVO assignedPool = null; + if (destVols != null) { + StoragePool pool = destVols.get(vol); + if (pool != null) { + assignedPool = _storagePoolDao.findById(pool.getId()); + } + } + if (assignedPool == null && recreate) { + assignedPool = _storagePoolDao.findById(vol.getPoolId()); + } + if (assignedPool != null || recreate) { + Volume.State state = vol.getState(); + if (state == Volume.State.Allocated + || state == Volume.State.Creating) { + VolumeTask task = new VolumeTask(VolumeTaskType.RECREATE, vol, null); + tasks.add(task); + } else { + if (vol.isRecreatable()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Volume " + vol + + " will be recreated on storage pool " + + assignedPool + + " assigned by deploymentPlanner"); + } + VolumeTask task = new VolumeTask(VolumeTaskType.RECREATE, vol, null); + tasks.add(task); + } else { + if (assignedPool.getId() != vol.getPoolId()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Mismatch in storage pool " + + assignedPool + + " assigned by deploymentPlanner and the one associated with volume " + + vol); + } + DiskOfferingVO diskOffering = _diskOfferingDao + .findById(vol.getDiskOfferingId()); + if (diskOffering.getUseLocalStorage()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Local volume " + + vol + + " will be recreated on storage pool " + + assignedPool + + " assigned by deploymentPlanner"); + } + VolumeTask task = new VolumeTask(VolumeTaskType.RECREATE, vol, null); + tasks.add(task); + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Shared volume " + + vol + + " will be migrated on storage pool " + + assignedPool + + " assigned by deploymentPlanner"); + } + VolumeTask task = new VolumeTask(VolumeTaskType.MIGRATE, vol, null); + tasks.add(task); + } + } else { + StoragePoolVO pool = _storagePoolDao + .findById(vol.getPoolId()); + VolumeTask task = new VolumeTask(VolumeTaskType.NOP, vol, pool); + tasks.add(task); + } + + } + } + } else { + if (vol.getPoolId() == null) { + throw new StorageUnavailableException( + "Volume has no pool associate and also no storage pool assigned in DeployDestination, Unable to create " + + vol, Volume.class, vol.getId()); + } + if (s_logger.isDebugEnabled()) { + s_logger.debug("No need to recreate the volume: " + vol + + ", since it already has a pool assigned: " + + vol.getPoolId() + ", adding disk to VM"); + } + StoragePoolVO pool = _storagePoolDao.findById(vol + .getPoolId()); + VolumeTask task = new VolumeTask(VolumeTaskType.NOP, vol, pool); + tasks.add(task); + } + } + + return tasks; + } + + private Pair recreateVolume(VolumeVO vol, VirtualMachineProfile vm, + DeployDestination dest) throws StorageUnavailableException { + VolumeVO newVol; + boolean recreate = _recreateSystemVmEnabled; + DataStore destPool = null; + if (recreate + && (dest.getStorageForDisks() == null || dest + .getStorageForDisks().get(vol) == null)) { + destPool = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary); + s_logger.debug("existing pool: " + destPool.getId()); + } else { + StoragePool pool = dest.getStorageForDisks().get(vol); + destPool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); + } + + if (vol.getState() == Volume.State.Allocated + || vol.getState() == Volume.State.Creating) { + newVol = vol; + } else { + newVol = switchVolume(vol, vm); + // update the volume->PrimaryDataStoreVO map since volumeId has + // changed + if (dest.getStorageForDisks() != null + && dest.getStorageForDisks().containsKey(vol)) { + StoragePool poolWithOldVol = dest + .getStorageForDisks().get(vol); + dest.getStorageForDisks().put(newVol, poolWithOldVol); + dest.getStorageForDisks().remove(vol); + } + if (s_logger.isDebugEnabled()) { + s_logger.debug("Created new volume " + newVol + + " for old volume " + vol); + } + } + VolumeInfo volume = volFactory.getVolume(newVol.getId(), destPool); + Long templateId = newVol.getTemplateId(); + AsyncCallFuture future = null; + if (templateId == null) { + future = this.volService.createVolumeAsync(volume, destPool); + } else { + TemplateInfo templ = this.tmplFactory.getTemplate(templateId); + future = this.volService.createVolumeFromTemplateAsync(volume, destPool.getId(), templ); + } + VolumeApiResult result = null; + try { + result = future.get(); + if (result.isFailed()) { + s_logger.debug("Unable to create " + + newVol + ":" + result.getResult()); + throw new StorageUnavailableException("Unable to create " + + newVol + ":" + result.getResult(), destPool.getId()); + } + newVol = this._volsDao.findById(newVol.getId()); + } catch (InterruptedException e) { + s_logger.error("Unable to create " + newVol, e); + throw new StorageUnavailableException("Unable to create " + + newVol + ":" + e.toString(), destPool.getId()); + } catch (ExecutionException e) { + s_logger.error("Unable to create " + newVol, e); + throw new StorageUnavailableException("Unable to create " + + newVol + ":" + e.toString(), destPool.getId()); + } + + return new Pair(newVol, destPool); + } + + @Override + public void prepare(VirtualMachineProfile vm, + DeployDestination dest) throws StorageUnavailableException, + InsufficientStorageCapacityException, ConcurrentOperationException { + + if (dest == null) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("DeployDestination cannot be null, cannot prepare Volumes for the vm: " + + vm); + } + throw new CloudRuntimeException( + "Unable to prepare Volume for vm because DeployDestination is null, vm:" + + vm); + } + List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Checking if we need to prepare " + vols.size() + + " volumes for " + vm); + } + + List tasks = getTasks(vols, dest.getStorageForDisks()); + Volume vol = null; + StoragePool pool = null; + for (VolumeTask task : tasks) { + if (task.type == VolumeTaskType.NOP) { + pool = (StoragePool)dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary); + vol = task.volume; + } else if (task.type == VolumeTaskType.MIGRATE) { + pool = (StoragePool)dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary); + migrateVolume(task.volume, pool); + vol = task.volume; + } else if (task.type == VolumeTaskType.RECREATE) { + Pair result = recreateVolume(task.volume, vm, dest); + pool = (StoragePool)dataStoreMgr.getDataStore(result.second().getId(), DataStoreRole.Primary); + vol = result.first(); + } + vm.addDisk(new VolumeTO(vol, pool)); + } + } + + private Long getDeviceId(long vmId, Long deviceId) { + // allocate deviceId + List vols = _volsDao.findByInstance(vmId); + if (deviceId != null) { + if (deviceId.longValue() > 15 || deviceId.longValue() == 0 + || deviceId.longValue() == 3) { + throw new RuntimeException("deviceId should be 1,2,4-15"); + } + for (VolumeVO vol : vols) { + if (vol.getDeviceId().equals(deviceId)) { + throw new RuntimeException("deviceId " + deviceId + + " is used by vm" + vmId); + } + } + } else { + // allocate deviceId here + List devIds = new ArrayList(); + for (int i = 1; i < 15; i++) { + devIds.add(String.valueOf(i)); + } + devIds.remove("3"); + for (VolumeVO vol : vols) { + devIds.remove(vol.getDeviceId().toString().trim()); + } + deviceId = Long.parseLong(devIds.iterator().next()); + } + + return deviceId; + } + + private boolean stateTransitTo(Volume vol, Volume.Event event) + throws NoTransitionException { + return _volStateMachine.transitTo(vol, event, null, _volsDao); + } + + + private String validateUrl(String url) { + try { + URI uri = new URI(url); + if ((uri.getScheme() == null) + || (!uri.getScheme().equalsIgnoreCase("http") + && !uri.getScheme().equalsIgnoreCase("https") && !uri + .getScheme().equalsIgnoreCase("file"))) { + throw new IllegalArgumentException( + "Unsupported scheme for url: " + url); + } + + int port = uri.getPort(); + if (!(port == 80 || port == 443 || port == -1)) { + throw new IllegalArgumentException( + "Only ports 80 and 443 are allowed"); + } + String host = uri.getHost(); + try { + InetAddress hostAddr = InetAddress.getByName(host); + if (hostAddr.isAnyLocalAddress() + || hostAddr.isLinkLocalAddress() + || hostAddr.isLoopbackAddress() + || hostAddr.isMulticastAddress()) { + throw new IllegalArgumentException( + "Illegal host specified in url"); + } + if (hostAddr instanceof Inet6Address) { + throw new IllegalArgumentException( + "IPV6 addresses not supported (" + + hostAddr.getHostAddress() + ")"); + } + } catch (UnknownHostException uhe) { + throw new IllegalArgumentException("Unable to resolve " + host); + } + + return uri.toString(); + } catch (URISyntaxException e) { + throw new IllegalArgumentException("Invalid URL " + url); + } + + } + + @Override + public boolean canVmRestartOnAnotherServer(long vmId) { + List vols = _volsDao.findCreatedByInstance(vmId); + for (VolumeVO vol : vols) { + if (!vol.isRecreatable() && !vol.getPoolType().isShared()) { + return false; + } + } + return true; + } + + @Override + public boolean configure(String name, Map params) + throws ConfigurationException { + String _customDiskOfferingMinSizeStr = _configDao + .getValue(Config.CustomDiskOfferingMinSize.toString()); + _customDiskOfferingMinSize = NumbersUtil.parseInt( + _customDiskOfferingMinSizeStr, Integer + .parseInt(Config.CustomDiskOfferingMinSize + .getDefaultValue())); + + String maxVolumeSizeInGbString = _configDao + .getValue("storage.max.volume.size"); + _maxVolumeSizeInGb = NumbersUtil.parseLong(maxVolumeSizeInGbString, + 2000); + + String value = _configDao.getValue(Config.RecreateSystemVmEnabled.key()); + _recreateSystemVmEnabled = Boolean.parseBoolean(value); + _copyvolumewait = NumbersUtil.parseInt(value, + Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); + + HostTemplateStatesSearch = _vmTemplateHostDao.createSearchBuilder(); + HostTemplateStatesSearch.and("id", HostTemplateStatesSearch.entity() + .getTemplateId(), SearchCriteria.Op.EQ); + HostTemplateStatesSearch.and("state", HostTemplateStatesSearch.entity() + .getDownloadState(), SearchCriteria.Op.EQ); + + SearchBuilder HostSearch = _hostDao.createSearchBuilder(); + HostSearch.and("dcId", HostSearch.entity().getDataCenterId(), + SearchCriteria.Op.EQ); + + HostTemplateStatesSearch.join("host", HostSearch, HostSearch.entity() + .getId(), HostTemplateStatesSearch.entity().getHostId(), + JoinBuilder.JoinType.INNER); + HostSearch.done(); + HostTemplateStatesSearch.done(); + return true; + } + + @Override + public boolean start() { + return true; + } + + @Override + public boolean stop() { + return true; + } + + @Override + public String getName() { + return "Volume Manager"; + } + + @Override + public void destroyVolume(VolumeVO volume) { + try { + this.volService.destroyVolume(volume.getId()); + } catch (ConcurrentOperationException e) { + s_logger.debug("Failed to destroy volume" + volume.getId(), e); + throw new CloudRuntimeException("Failed to destroy volume" + volume.getId(), e); + } + } + +} diff --git a/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java deleted file mode 100644 index 13a010729e0..00000000000 --- a/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java +++ /dev/null @@ -1,174 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.storage.allocator; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; - -import com.cloud.deploy.DeploymentPlan; -import com.cloud.deploy.DeploymentPlanner.ExcludeList; -import com.cloud.offering.ServiceOffering; -import com.cloud.server.StatsCollector; -import com.cloud.storage.DiskOfferingVO; -import com.cloud.storage.dao.DiskOfferingDao; -import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolVO; -import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.Storage.StoragePoolType; -import com.cloud.user.Account; -import com.cloud.vm.DiskProfile; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachineProfile; - -@Local(value=StoragePoolAllocator.class) -public class FirstFitStoragePoolAllocator extends AbstractStoragePoolAllocator { - private static final Logger s_logger = Logger.getLogger(FirstFitStoragePoolAllocator.class); - protected String _allocationAlgorithm = "random"; - - @Inject - DiskOfferingDao _diskOfferingDao; - - @Override - public boolean allocatorIsCorrectType(DiskProfile dskCh) { - return !localStorageAllocationNeeded(dskCh); - } - - @Override - public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { - - - VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate(); - Account account = null; - if(vmProfile.getVirtualMachine() != null){ - account = vmProfile.getOwner(); - } - - List suitablePools = new ArrayList(); - - // Check that the allocator type is correct - if (!allocatorIsCorrectType(dskCh)) { - return suitablePools; - } - long dcId = plan.getDataCenterId(); - Long podId = plan.getPodId(); - Long clusterId = plan.getClusterId(); - - if(dskCh.getTags() != null && dskCh.getTags().length != 0){ - s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + " having tags:" + Arrays.toString(dskCh.getTags())); - }else{ - s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId); - } - - List pools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags(), null); - if (pools.size() == 0) { - if (s_logger.isDebugEnabled()) { - String storageType = dskCh.useLocalStorage() ? ServiceOffering.StorageType.local.toString() : ServiceOffering.StorageType.shared.toString(); - s_logger.debug("No storage pools available for " + storageType + " volume allocation, returning"); - } - return suitablePools; - } - - StatsCollector sc = StatsCollector.getInstance(); - - //FixMe: We are ignoring userdispersing algorithm when account is null. Find a way to get account ID when VMprofile is null - if(_allocationAlgorithm.equals("random") || _allocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) { - // Shuffle this so that we don't check the pools in the same order. - Collections.shuffle(pools); - }else if(_allocationAlgorithm.equals("userdispersing")){ - pools = reorderPoolsByNumberOfVolumes(plan, pools, account); - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("FirstFitStoragePoolAllocator has " + pools.size() + " pools to check for allocation"); - } - - DiskOfferingVO diskOffering = _diskOfferingDao.findById(dskCh.getDiskOfferingId()); - for (StoragePoolVO pool: pools) { - if(suitablePools.size() == returnUpTo){ - break; - } - if (diskOffering.getSystemUse() && pool.getPoolType() == StoragePoolType.RBD) { - s_logger.debug("Skipping RBD pool " + pool.getName() + " as a suitable pool. RBD is not supported for System VM's"); - continue; - } - - if (checkPool(avoid, pool, dskCh, template, null, sc, plan)) { - suitablePools.add(pool); - } - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("FirstFitStoragePoolAllocator returning "+suitablePools.size() +" suitable storage pools"); - } - - return suitablePools; - } - - private List reorderPoolsByNumberOfVolumes(DeploymentPlan plan, List pools, Account account) { - if(account == null){ - return pools; - } - long dcId = plan.getDataCenterId(); - Long podId = plan.getPodId(); - Long clusterId = plan.getClusterId(); - - List poolIdsByVolCount = _volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId()); - if (s_logger.isDebugEnabled()) { - s_logger.debug("List of pools in ascending order of number of volumes for account id: "+ account.getAccountId() + " is: "+ poolIdsByVolCount); - } - - //now filter the given list of Pools by this ordered list - Map poolMap = new HashMap(); - for (StoragePoolVO pool : pools) { - poolMap.put(pool.getId(), pool); - } - List matchingPoolIds = new ArrayList(poolMap.keySet()); - - poolIdsByVolCount.retainAll(matchingPoolIds); - - List reorderedPools = new ArrayList(); - for(Long id: poolIdsByVolCount){ - reorderedPools.add(poolMap.get(id)); - } - - return reorderedPools; - } - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - super.configure(name, params); - - if (_configDao != null) { - Map configs = _configDao.getConfiguration(params); - String allocationAlgorithm = configs.get("vm.allocation.algorithm"); - if (allocationAlgorithm != null) { - _allocationAlgorithm = allocationAlgorithm; - } - } - return true; - } -} diff --git a/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java deleted file mode 100644 index b6b8e8e98ff..00000000000 --- a/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java +++ /dev/null @@ -1,287 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.storage.allocator; - -import java.math.BigDecimal; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.log4j.Logger; - -import com.cloud.capacity.CapacityVO; -import com.cloud.capacity.dao.CapacityDao; -import com.cloud.configuration.dao.ConfigurationDao; -import com.cloud.deploy.DeploymentPlan; -import com.cloud.deploy.DeploymentPlanner.ExcludeList; -import com.cloud.offering.ServiceOffering; -import com.cloud.service.dao.ServiceOfferingDao; -import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolHostVO; -import com.cloud.storage.StoragePoolVO; -import com.cloud.storage.Volume; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.StoragePoolHostDao; -import com.cloud.utils.DateUtil; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.db.GenericSearchBuilder; -import com.cloud.utils.db.JoinBuilder; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria.Func; -import com.cloud.vm.DiskProfile; -import com.cloud.vm.UserVmVO; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachine.State; -import com.cloud.vm.VirtualMachineProfile; -import com.cloud.vm.dao.UserVmDao; -import com.cloud.vm.dao.VMInstanceDao; - -// -// TODO -// Rush to make LocalStoragePoolAllocator use static allocation status, we should revisit the overall -// allocation process to make it more reliable in next release. The code put in here is pretty ugly -// -@Local(value = StoragePoolAllocator.class) -public class LocalStoragePoolAllocator extends FirstFitStoragePoolAllocator { - private static final Logger s_logger = Logger.getLogger(LocalStoragePoolAllocator.class); - - @Inject - StoragePoolHostDao _poolHostDao; - @Inject - VMInstanceDao _vmInstanceDao; - @Inject - UserVmDao _vmDao; - @Inject - ServiceOfferingDao _offeringDao; - @Inject - CapacityDao _capacityDao; - @Inject - ConfigurationDao _configDao; - - protected GenericSearchBuilder VmsOnPoolSearch; - - private int _secondsToSkipStoppedVMs = 86400; - - @Override - public boolean allocatorIsCorrectType(DiskProfile dskCh) { - return localStorageAllocationNeeded(dskCh); - } - - @Override - public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { - - List suitablePools = new ArrayList(); - - // Check that the allocator type is correct - if (!allocatorIsCorrectType(dskCh)) { - return suitablePools; - } - - ExcludeList myAvoids = new ExcludeList(avoid.getDataCentersToAvoid(), avoid.getPodsToAvoid(), avoid.getClustersToAvoid(), avoid.getHostsToAvoid(), avoid.getPoolsToAvoid()); - - if (s_logger.isDebugEnabled()) { - s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm"); - } - - // data disk and host identified from deploying vm (attach volume case) - if (dskCh.getType() == Volume.Type.DATADISK && plan.getHostId() != null) { - List hostPools = _poolHostDao.listByHostId(plan.getHostId()); - for (StoragePoolHostVO hostPool: hostPools) { - StoragePoolVO pool = _storagePoolDao.findById(hostPool.getPoolId()); - if (pool != null && pool.isLocal()) { - s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list"); - suitablePools.add(pool); - } - - if (suitablePools.size() == returnUpTo) { - break; - } - } - } else { - List availablePool; - while (!(availablePool = super.allocateToPool(dskCh, vmProfile, plan, myAvoids, 1)).isEmpty()) { - StoragePool pool = availablePool.get(0); - myAvoids.addPool(pool.getId()); - List hostsInSPool = _poolHostDao.listByPoolId(pool.getId()); - assert (hostsInSPool.size() == 1) : "Local storage pool should be one host per pool"; - - s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list"); - suitablePools.add(pool); - - if (suitablePools.size() == returnUpTo) { - break; - } - } - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("LocalStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools"); - } - - if (suitablePools.isEmpty()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Unable to find storage pool to fit the vm"); - } - } - return suitablePools; - } - - // we don't need to check host capacity now, since hostAllocators will do that anyway - private boolean hostHasCpuMemoryCapacity(long hostId, List vmOnHost, VMInstanceVO vm) { - - ServiceOffering so = _offeringDao.findById(vm.getServiceOfferingId()); - - long usedMemory = calcHostAllocatedCpuMemoryCapacity(vmOnHost, CapacityVO.CAPACITY_TYPE_MEMORY); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Calculated static-allocated memory for VMs on host " + hostId + ": " + usedMemory + " bytes, requesting memory: " + (so != null ? so.getRamSize() * 1024L * 1024L : "") - + " bytes"); - } - - SearchCriteria sc = _capacityDao.createSearchCriteria(); - sc.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, hostId); - sc.addAnd("capacityType", SearchCriteria.Op.EQ, CapacityVO.CAPACITY_TYPE_MEMORY); - List capacities = _capacityDao.search(sc, null); - if (capacities.size() > 0) { - if (capacities.get(0).getTotalCapacity() < usedMemory + (so != null ? so.getRamSize() * 1024L * 1024L : 0)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host " + hostId + " runs out of memory capacity"); - } - return false; - } - } else { - s_logger.warn("Host " + hostId + " has not reported memory capacity yet"); - return false; - } - - long usedCpu = calcHostAllocatedCpuMemoryCapacity(vmOnHost, CapacityVO.CAPACITY_TYPE_CPU); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Calculated static-allocated CPU for VMs on host " + hostId + ": " + usedCpu + " GHz, requesting cpu: " + (so != null ? so.getCpu() * so.getSpeed() : "") + " GHz"); - } - - sc = _capacityDao.createSearchCriteria(); - sc.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, hostId); - sc.addAnd("capacityType", SearchCriteria.Op.EQ, CapacityVO.CAPACITY_TYPE_CPU); - capacities = _capacityDao.search(sc, null); - if (capacities.size() > 0) { - if (capacities.get(0).getTotalCapacity() < usedCpu + (so != null ? so.getCpu() * so.getSpeed() : 0)) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Host " + hostId + " runs out of CPU capacity"); - } - return false; - } - } else { - s_logger.warn("Host " + hostId + " has not reported CPU capacity yet"); - return false; - } - - return true; - } - - private boolean skipCalculation(VMInstanceVO vm) { - if (vm == null) { - return true; - } - - if (vm.getState() == State.Expunging) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Skip counting capacity for Expunging VM : " + vm.getInstanceName()); - } - return true; - } - - if (vm.getState() == State.Destroyed && vm.getType() != VirtualMachine.Type.User) { - return true; - } - - if (vm.getState() == State.Stopped || vm.getState() == State.Destroyed) { - // for stopped/Destroyed VMs, we will skip counting it if it hasn't been used for a while - - long millisecondsSinceLastUpdate = DateUtil.currentGMTTime().getTime() - vm.getUpdateTime().getTime(); - if (millisecondsSinceLastUpdate > _secondsToSkipStoppedVMs * 1000L) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Skip counting vm " + vm.getInstanceName() + " in capacity allocation as it has been stopped for " + millisecondsSinceLastUpdate / 60000 + " minutes"); - } - return true; - } - } - return false; - } - - private long calcHostAllocatedCpuMemoryCapacity(List vmOnHost, short capacityType) { - assert (capacityType == CapacityVO.CAPACITY_TYPE_MEMORY || capacityType == CapacityVO.CAPACITY_TYPE_CPU) : "Invalid capacity type passed in calcHostAllocatedCpuCapacity()"; - - long usedCapacity = 0; - for (Long vmId : vmOnHost) { - VMInstanceVO vm = _vmInstanceDao.findById(vmId); - if (skipCalculation(vm)) { - continue; - } - - ServiceOffering so = _offeringDao.findById(vm.getServiceOfferingId()); - if (vm.getType() == VirtualMachine.Type.User) { - UserVmVO userVm = _vmDao.findById(vm.getId()); - if (userVm == null) { - continue; - } - } - - if (capacityType == CapacityVO.CAPACITY_TYPE_MEMORY) { - usedCapacity += so.getRamSize() * 1024L * 1024L; - } else if (capacityType == CapacityVO.CAPACITY_TYPE_CPU) { - usedCapacity += so.getCpu() * so.getSpeed(); - } - } - - return usedCapacity; - } - - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - super.configure(name, params); - - _storageOverprovisioningFactor = new BigDecimal(1); - _extraBytesPerVolume = NumbersUtil.parseLong((String) params.get("extra.bytes.per.volume"), 50 * 1024L * 1024L); - - Map configs = _configDao.getConfiguration("management-server", params); - String value = configs.get("vm.resource.release.interval"); - _secondsToSkipStoppedVMs = NumbersUtil.parseInt(value, 86400); - - VmsOnPoolSearch = _vmInstanceDao.createSearchBuilder(Long.class); - VmsOnPoolSearch.select(null, Func.DISTINCT, VmsOnPoolSearch.entity().getId()); - VmsOnPoolSearch.and("removed", VmsOnPoolSearch.entity().getRemoved(), SearchCriteria.Op.NULL); - VmsOnPoolSearch.and("state", VmsOnPoolSearch.entity().getState(), SearchCriteria.Op.NIN); - - SearchBuilder sbVolume = _volumeDao.createSearchBuilder(); - sbVolume.and("poolId", sbVolume.entity().getPoolId(), SearchCriteria.Op.EQ); - - VmsOnPoolSearch.join("volumeJoin", sbVolume, VmsOnPoolSearch.entity().getId(), sbVolume.entity().getInstanceId(), JoinBuilder.JoinType.INNER); - - sbVolume.done(); - VmsOnPoolSearch.done(); - - return true; - } - - public LocalStoragePoolAllocator() { - } -} diff --git a/server/src/com/cloud/storage/dao/LaunchPermissionDao.java b/server/src/com/cloud/storage/dao/LaunchPermissionDao.java index 86e5a9bf827..0ad60b50ee8 100644 --- a/server/src/com/cloud/storage/dao/LaunchPermissionDao.java +++ b/server/src/com/cloud/storage/dao/LaunchPermissionDao.java @@ -18,6 +18,7 @@ package com.cloud.storage.dao; import java.util.List; + import com.cloud.storage.LaunchPermissionVO; import com.cloud.storage.VMTemplateVO; import com.cloud.utils.db.GenericDao; diff --git a/server/src/com/cloud/storage/dao/SnapshotDao.java b/server/src/com/cloud/storage/dao/SnapshotDao.java index 3b961f6fa89..0e378a724b4 100644 --- a/server/src/com/cloud/storage/dao/SnapshotDao.java +++ b/server/src/com/cloud/storage/dao/SnapshotDao.java @@ -25,7 +25,7 @@ import com.cloud.utils.fsm.StateDao; import java.util.List; -public interface SnapshotDao extends GenericDao, StateDao { +public interface SnapshotDao extends GenericDao, StateDao { List listByVolumeId(long volumeId); List listByVolumeId(Filter filter, long volumeId); SnapshotVO findNextSnapshot(long parentSnapId); diff --git a/server/src/com/cloud/storage/dao/SnapshotDaoImpl.java b/server/src/com/cloud/storage/dao/SnapshotDaoImpl.java index a8a07dcc3a6..825b6d56e00 100644 --- a/server/src/com/cloud/storage/dao/SnapshotDaoImpl.java +++ b/server/src/com/cloud/storage/dao/SnapshotDaoImpl.java @@ -35,16 +35,14 @@ import com.cloud.storage.Snapshot.Type; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; +import com.cloud.tags.dao.ResourceTagDao; import com.cloud.tags.dao.ResourceTagsDaoImpl; -import com.cloud.utils.db.DB; -import com.cloud.utils.db.Filter; -import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.*; import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.dao.VMInstanceDao; import com.cloud.vm.dao.VMInstanceDaoImpl; @Component @@ -66,10 +64,10 @@ public class SnapshotDaoImpl extends GenericDaoBase implements private SearchBuilder InstanceIdSearch; private SearchBuilder StatusSearch; private GenericSearchBuilder CountSnapshotsByAccount; - @Inject ResourceTagsDaoImpl _tagsDao; + @Inject ResourceTagDao _tagsDao; - @Inject protected VMInstanceDaoImpl _instanceDao; - @Inject protected VolumeDaoImpl _volumeDao; + @Inject protected VMInstanceDao _instanceDao; + @Inject protected VolumeDao _volumeDao; @Override public SnapshotVO findNextSnapshot(long snapshotId) { @@ -324,7 +322,7 @@ public class SnapshotDaoImpl extends GenericDaoBase implements } @Override - public boolean updateState(State currentState, Event event, State nextState, Snapshot snapshot, Object data) { + public boolean updateState(State currentState, Event event, State nextState, SnapshotVO snapshot, Object data) { Transaction txn = Transaction.currentTxn(); txn.start(); SnapshotVO snapshotVO = (SnapshotVO)snapshot; diff --git a/server/src/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java b/server/src/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java index 8cc5d7be9d4..0d797ed3545 100644 --- a/server/src/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java +++ b/server/src/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java @@ -22,9 +22,10 @@ import java.util.Map; import javax.ejb.Local; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.springframework.stereotype.Component; -import com.cloud.storage.StoragePoolDetailVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; diff --git a/server/src/com/cloud/storage/dao/VMTemplateDao.java b/server/src/com/cloud/storage/dao/VMTemplateDao.java index a043a2c6079..c39626f54dd 100755 --- a/server/src/com/cloud/storage/dao/VMTemplateDao.java +++ b/server/src/com/cloud/storage/dao/VMTemplateDao.java @@ -20,6 +20,9 @@ import java.util.List; import java.util.Map; import java.util.Set; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateEvent; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateState; + import com.cloud.domain.DomainVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.projects.Project.ListProjectResourcesCriteria; @@ -28,11 +31,12 @@ import com.cloud.template.VirtualMachineTemplate.TemplateFilter; import com.cloud.user.Account; import com.cloud.utils.Pair; import com.cloud.utils.db.GenericDao; +import com.cloud.utils.fsm.StateDao; /* * Data Access Object for vm_templates table */ -public interface VMTemplateDao extends GenericDao { +public interface VMTemplateDao extends GenericDao, StateDao { public List listByPublic(); diff --git a/server/src/com/cloud/storage/dao/VMTemplateDaoImpl.java b/server/src/com/cloud/storage/dao/VMTemplateDaoImpl.java index 42f10d34c1b..b51b216717b 100755 --- a/server/src/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/server/src/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -16,8 +16,8 @@ // under the License. package com.cloud.storage.dao; -import static com.cloud.utils.StringUtils.*; -import static com.cloud.utils.db.DbUtil.*; +import static com.cloud.utils.StringUtils.join; +import static com.cloud.utils.db.DbUtil.closeResources; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -34,10 +34,12 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateEvent; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateState; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import org.apache.cloudstack.api.BaseCmd; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.domain.DomainVO; @@ -55,11 +57,10 @@ import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateZoneVO; import com.cloud.tags.ResourceTagVO; -import com.cloud.tags.dao.ResourceTagsDaoImpl; +import com.cloud.tags.dao.ResourceTagDao; import com.cloud.template.VirtualMachineTemplate.TemplateFilter; import com.cloud.user.Account; import com.cloud.utils.Pair; - import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; @@ -68,19 +69,21 @@ import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; +import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value={VMTemplateDao.class}) public class VMTemplateDaoImpl extends GenericDaoBase implements VMTemplateDao { private static final Logger s_logger = Logger.getLogger(VMTemplateDaoImpl.class); - + @Inject VMTemplateZoneDao _templateZoneDao; @Inject VMTemplateDetailsDao _templateDetailsDao; - + @Inject ConfigurationDao _configDao; @Inject @@ -91,10 +94,10 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem DataCenterDao _dcDao; private final String SELECT_TEMPLATE_HOST_REF = "SELECT t.id, h.data_center_id, t.unique_name, t.name, t.public, t.featured, t.type, t.hvm, t.bits, t.url, t.format, t.created, t.account_id, " + "t.checksum, t.display_text, t.enable_password, t.guest_os_id, t.bootable, t.prepopulate, t.cross_zones, t.hypervisor_type FROM vm_template t"; - + private final String SELECT_TEMPLATE_ZONE_REF = "SELECT t.id, tzr.zone_id, t.unique_name, t.name, t.public, t.featured, t.type, t.hvm, t.bits, t.url, t.format, t.created, t.account_id, " + "t.checksum, t.display_text, t.enable_password, t.guest_os_id, t.bootable, t.prepopulate, t.cross_zones, t.hypervisor_type FROM vm_template t INNER JOIN template_zone_ref tzr on (t.id = tzr.template_id) "; - + private final String SELECT_TEMPLATE_SWIFT_REF = "SELECT t.id, t.unique_name, t.name, t.public, t.featured, t.type, t.hvm, t.bits, t.url, t.format, t.created, t.account_id, " + "t.checksum, t.display_text, t.enable_password, t.guest_os_id, t.bootable, t.prepopulate, t.cross_zones, t.hypervisor_type FROM vm_template t"; @@ -122,23 +125,24 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem private SearchBuilder PublicIsoSearch; private SearchBuilder UserIsoSearch; private GenericSearchBuilder CountTemplatesByAccount; + private SearchBuilder updateStateSearch; - @Inject ResourceTagsDaoImpl _tagsDao; + @Inject ResourceTagDao _tagsDao; private String routerTmpltName; private String consoleProxyTmpltName; - - protected VMTemplateDaoImpl() { + + public VMTemplateDaoImpl() { } - + @Override public List listByPublic() { SearchCriteria sc = PublicSearch.create(); sc.setParameters("public", 1); return listBy(sc); } - + @Override public VMTemplateVO findByName(String templateName) { SearchCriteria sc = UniqueNameSearch.create(); @@ -155,7 +159,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem @Override public List publicIsoSearch(Boolean bootable, boolean listRemoved, Map tags){ - + SearchBuilder sb = null; if (tags == null || tags.isEmpty()) { sb = PublicIsoSearch; @@ -166,7 +170,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem sb.and("type", sb.entity().getTemplateType(), SearchCriteria.Op.EQ); sb.and("bootable", sb.entity().isBootable(), SearchCriteria.Op.EQ); sb.and("removed", sb.entity().getRemoved(), SearchCriteria.Op.EQ); - + SearchBuilder tagSearch = _tagsDao.createSearchBuilder(); for (int count=0; count < tags.size(); count++) { tagSearch.or().op("key" + String.valueOf(count), tagSearch.entity().getKey(), SearchCriteria.Op.EQ); @@ -177,20 +181,20 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem sb.groupBy(sb.entity().getId()); sb.join("tagSearch", tagSearch, sb.entity().getId(), tagSearch.entity().getResourceId(), JoinBuilder.JoinType.INNER); } - + SearchCriteria sc = sb.create(); - + sc.setParameters("public", 1); sc.setParameters("format", "ISO"); sc.setParameters("type", TemplateType.PERHOST.toString()); if (bootable != null) { sc.setParameters("bootable", bootable); } - + if (!listRemoved) { sc.setParameters("removed", (Object)null); } - + if (tags != null && !tags.isEmpty()) { int count = 0; sc.setJoinParameters("tagSearch", "resourceType", TaggedResourceType.ISO.toString()); @@ -200,10 +204,10 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem count++; } } - + return listBy(sc); } - + @Override public List userIsoSearch(boolean listRemoved){ @@ -224,7 +228,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem public List listAllSystemVMTemplates() { SearchCriteria sc = tmpltTypeSearch.create(); sc.setParameters("templateType", Storage.TemplateType.SYSTEM); - + Filter filter = new Filter(VMTemplateVO.class, "id", false, null, null); return listBy(sc, filter); } @@ -232,7 +236,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem @Override public List listPrivateTemplatesByHost(Long hostId) { - String sql = "select * from template_host_ref as thr INNER JOIN vm_template as t ON t.id=thr.template_id " + String sql = "select * from template_host_ref as thr INNER JOIN vm_template as t ON t.id=thr.template_id " + "where thr.host_id=? and t.public=0 and t.featured=0 and t.type='USER' and t.removed is NULL"; List l = new ArrayList(); @@ -252,7 +256,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem } return l; } - + @Override public List listReadyTemplates() { SearchCriteria sc = createSearchCriteria(); @@ -260,7 +264,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem sc.addAnd("format", SearchCriteria.Op.NEQ, Storage.ImageFormat.ISO); return listIncludingRemovedBy(sc); } - + @Override public List findIsosByIdAndPath(Long domainId, Long accountId, String path) { SearchCriteria sc = createSearchCriteria(); @@ -283,7 +287,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem sc.setParameters("accountId", accountId); return listBy(sc); } - + @Override public List listByHypervisorType(List hyperTypes) { SearchCriteria sc = createSearchCriteria(); @@ -295,17 +299,17 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem @Override public boolean configure(String name, Map params) throws ConfigurationException { boolean result = super.configure(name, params); - + PublicSearch = createSearchBuilder(); PublicSearch.and("public", PublicSearch.entity().isPublicTemplate(), SearchCriteria.Op.EQ); routerTmpltName = (String)params.get("routing.uniquename"); - + s_logger.debug("Found parameter routing unique name " + routerTmpltName); if (routerTmpltName==null) { routerTmpltName="routing"; } - + consoleProxyTmpltName = (String)params.get("consoleproxy.uniquename"); if(consoleProxyTmpltName == null) { consoleProxyTmpltName = "routing"; @@ -341,16 +345,16 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem hostHyperSearch.and("type", hostHyperSearch.entity().getType(), SearchCriteria.Op.EQ); hostHyperSearch.and("zoneId", hostHyperSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); hostHyperSearch.groupBy(hostHyperSearch.entity().getHypervisorType()); - + tmpltTypeHyperSearch.join("tmplHyper", hostHyperSearch, hostHyperSearch.entity().getHypervisorType(), tmpltTypeHyperSearch.entity().getHypervisorType(), JoinBuilder.JoinType.INNER); hostHyperSearch.done(); tmpltTypeHyperSearch.done(); - + tmpltTypeHyperSearch2 = createSearchBuilder(); tmpltTypeHyperSearch2.and("templateType", tmpltTypeHyperSearch2.entity().getTemplateType(), SearchCriteria.Op.EQ); tmpltTypeHyperSearch2.and("hypervisorType", tmpltTypeHyperSearch2.entity().getHypervisorType(), SearchCriteria.Op.EQ); - + tmpltTypeSearch = createSearchBuilder(); tmpltTypeSearch.and("removed", tmpltTypeSearch.entity().getRemoved(), SearchCriteria.Op.NULL); tmpltTypeSearch.and("templateType", tmpltTypeSearch.entity().getTemplateType(), SearchCriteria.Op.EQ); @@ -359,11 +363,11 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem AccountIdSearch.and("accountId", AccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ); AccountIdSearch.and("publicTemplate", AccountIdSearch.entity().isPublicTemplate(), SearchCriteria.Op.EQ); AccountIdSearch.done(); - + SearchBuilder tmpltZoneSearch = _templateZoneDao.createSearchBuilder(); tmpltZoneSearch.and("removed", tmpltZoneSearch.entity().getRemoved(), SearchCriteria.Op.NULL); tmpltZoneSearch.and("zoneId", tmpltZoneSearch.entity().getZoneId(), SearchCriteria.Op.EQ); - + TmpltsInZoneSearch = createSearchBuilder(); TmpltsInZoneSearch.and("removed", TmpltsInZoneSearch.entity().getRemoved(), SearchCriteria.Op.NULL); TmpltsInZoneSearch.and().op("avoidtype", TmpltsInZoneSearch.entity().getTemplateType(), SearchCriteria.Op.NEQ); @@ -374,11 +378,17 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem TmpltsInZoneSearch.done(); CountTemplatesByAccount = createSearchBuilder(Long.class); - CountTemplatesByAccount.select(null, Func.COUNT, null); + CountTemplatesByAccount.select(null, Func.COUNT, null); CountTemplatesByAccount.and("account", CountTemplatesByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); CountTemplatesByAccount.and("removed", CountTemplatesByAccount.entity().getRemoved(), SearchCriteria.Op.NULL); CountTemplatesByAccount.done(); + updateStateSearch = this.createSearchBuilder(); + updateStateSearch.and("id", updateStateSearch.entity().getId(), Op.EQ); + updateStateSearch.and("state", updateStateSearch.entity().getState(), Op.EQ); + updateStateSearch.and("updatedCount", updateStateSearch.entity().getUpdatedCount(), Op.EQ); + updateStateSearch.done(); + return result; } @@ -462,7 +472,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem } else { whereClause += " AND t.account_id IN (" + permittedAccountsStr + ")"; } - } else if (templateFilter == TemplateFilter.sharedexecutable && caller.getType() != Account.ACCOUNT_TYPE_ADMIN) { + } else if ((templateFilter == TemplateFilter.shared || templateFilter == TemplateFilter.sharedexecutable) && caller.getType() != Account.ACCOUNT_TYPE_ADMIN) { if (caller.getType() == Account.ACCOUNT_TYPE_NORMAL) { joinClause += " LEFT JOIN launch_permission lp ON t.id = lp.template_id WHERE" + " (t.account_id IN (" + permittedAccountsStr + ") OR" + " lp.account_id IN (" + permittedAccountsStr + "))"; @@ -507,7 +517,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem @Override - public Set> searchTemplates(String name, String keyword, TemplateFilter templateFilter, + public Set> searchTemplates(String name, String keyword, TemplateFilter templateFilter, boolean isIso, List hypers, Boolean bootable, DomainVO domain, Long pageSize, Long startIndex, Long zoneId, HypervisorType hyperType, boolean onlyReady, boolean showDomr,List permittedAccounts, Account caller, ListProjectResourcesCriteria listProjectResourcesCriteria, Map tags) { @@ -517,17 +527,17 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem builder.append(permittedAccount.getAccountId() + ","); } } - + String permittedAccountsStr = builder.toString(); - + if (permittedAccountsStr.length() > 0) { //chop the "," off permittedAccountsStr = permittedAccountsStr.substring(0, permittedAccountsStr.length()-1); } - + Transaction txn = Transaction.currentTxn(); txn.start(); - + /* Use LinkedHashSet here to guarantee iteration order */ Set> templateZonePairList = new LinkedHashSet>(); PreparedStatement pstmt = null; @@ -535,15 +545,15 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem StringBuilder relatedDomainIds = new StringBuilder(); String sql = SELECT_TEMPLATE_ZONE_REF; String groupByClause = ""; - try { + try { //short accountType; //String accountId = null; - String guestOSJoin = ""; + String guestOSJoin = ""; StringBuilder templateHostRefJoin = new StringBuilder(); String dataCenterJoin = "", lpjoin = ""; String tagsJoin = ""; - if (isIso && !hyperType.equals(HypervisorType.None)) { + if (isIso && !hyperType.equals(HypervisorType.None)) { guestOSJoin = " INNER JOIN guest_os guestOS on (guestOS.id = t.guest_os_id) INNER JOIN guest_os_hypervisor goh on ( goh.guest_os_id = guestOS.id) "; } if (onlyReady){ @@ -554,34 +564,34 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem if ((templateFilter == TemplateFilter.featured) || (templateFilter == TemplateFilter.community)) { dataCenterJoin = " INNER JOIN data_center dc on (h.data_center_id = dc.id)"; } - - if (templateFilter == TemplateFilter.sharedexecutable){ + + if (templateFilter == TemplateFilter.sharedexecutable || templateFilter == TemplateFilter.shared ){ lpjoin = " INNER JOIN launch_permission lp ON t.id = lp.template_id "; } - + if (tags != null && !tags.isEmpty()) { tagsJoin = " INNER JOIN resource_tags r ON t.id = r.resource_id "; } - + sql += guestOSJoin + templateHostRefJoin + dataCenterJoin + lpjoin + tagsJoin; String whereClause = ""; - + //All joins have to be made before we start setting the condition settings - if ((listProjectResourcesCriteria == ListProjectResourcesCriteria.SkipProjectResources - || (!permittedAccounts.isEmpty() && !(templateFilter == TemplateFilter.community || templateFilter == TemplateFilter.featured))) && + if ((listProjectResourcesCriteria == ListProjectResourcesCriteria.SkipProjectResources + || (!permittedAccounts.isEmpty() && !(templateFilter == TemplateFilter.community || templateFilter == TemplateFilter.featured))) && !(caller.getType() != Account.ACCOUNT_TYPE_NORMAL && templateFilter == TemplateFilter.all)) { whereClause += " INNER JOIN account a on (t.account_id = a.id)"; - if ((templateFilter == TemplateFilter.self || templateFilter == TemplateFilter.selfexecutable) && (caller.getType() == Account.ACCOUNT_TYPE_DOMAIN_ADMIN || caller.getType() == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN)) { - whereClause += " INNER JOIN domain d on (a.domain_id = d.id) WHERE d.path LIKE '" + domain.getPath() + "%'"; + if ((templateFilter == TemplateFilter.self || templateFilter == TemplateFilter.selfexecutable) && (caller.getType() == Account.ACCOUNT_TYPE_DOMAIN_ADMIN || caller.getType() == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN)) { + whereClause += " INNER JOIN domain d on (a.domain_id = d.id) WHERE d.path LIKE '" + domain.getPath() + "%'"; if (listProjectResourcesCriteria == ListProjectResourcesCriteria.SkipProjectResources) { whereClause += " AND a.type != " + Account.ACCOUNT_TYPE_PROJECT; } - } else + } else if (listProjectResourcesCriteria == ListProjectResourcesCriteria.SkipProjectResources) { whereClause += " WHERE a.type != " + Account.ACCOUNT_TYPE_PROJECT; } } - + if (!permittedAccounts.isEmpty()) { for (Account account : permittedAccounts) { //accountType = account.getType(); @@ -611,12 +621,12 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem relatedDomainIds.setLength(relatedDomainIds.length()-1); } } - + String attr = " AND "; if (whereClause.endsWith(" WHERE ")) { attr += " WHERE "; } - + if (!isIso) { if ( hypers.isEmpty() ) { return templateZonePairList; @@ -632,9 +642,10 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem whereClause += attr + " t.hypervisor_type IN (" + relatedHypers + ")"; } } - - if (!permittedAccounts.isEmpty() && !(templateFilter == TemplateFilter.featured || - templateFilter == TemplateFilter.community || templateFilter == TemplateFilter.executable) && !isAdmin(caller.getType()) ) { + + if (!permittedAccounts.isEmpty() && !(templateFilter == TemplateFilter.featured || + templateFilter == TemplateFilter.community || templateFilter == TemplateFilter.executable + || templateFilter == TemplateFilter.shared || templateFilter == TemplateFilter.sharedexecutable) && !isAdmin(caller.getType()) ) { whereClause += attr + "t.account_id IN (" + permittedAccountsStr + ")"; } @@ -642,13 +653,13 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem whereClause += attr + "t.public = 1 AND t.featured = 1"; if (!permittedAccounts.isEmpty()) { whereClause += attr + "(dc.domain_id IN (" + relatedDomainIds + ") OR dc.domain_id is NULL)"; - } + } } else if (templateFilter == TemplateFilter.self || templateFilter == TemplateFilter.selfexecutable) { whereClause += " AND t.account_id IN (" + permittedAccountsStr + ")"; - } else if (templateFilter == TemplateFilter.sharedexecutable) { + } else if (templateFilter == TemplateFilter.sharedexecutable || templateFilter == TemplateFilter.shared ) { whereClause += " AND " + " (t.account_id IN (" + permittedAccountsStr + ") OR" + - " lp.account_id IN (" + permittedAccountsStr + "))"; + " lp.account_id IN (" + permittedAccountsStr + "))"; } else if (templateFilter == TemplateFilter.executable && !permittedAccounts.isEmpty()) { whereClause += attr + "(t.public = 1 OR t.account_id IN (" + permittedAccountsStr + "))"; } else if (templateFilter == TemplateFilter.community) { @@ -659,7 +670,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem } else if (caller.getType() != Account.ACCOUNT_TYPE_ADMIN && !isIso) { return templateZonePairList; } - + if (tags != null && !tags.isEmpty()) { whereClause += " AND ("; boolean first = true; @@ -672,13 +683,13 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem } whereClause += ")"; } - + if (whereClause.equals("")) { whereClause += " WHERE "; } else if (!whereClause.equals(" WHERE ")) { whereClause += " AND "; } - + sql += whereClause + getExtrasWhere(templateFilter, name, keyword, isIso, bootable, hyperType, zoneId, onlyReady, showDomr) + groupByClause + getOrderByLimit(pageSize, startIndex); @@ -689,8 +700,8 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem Pair templateZonePair = new Pair(rs.getLong(1), rs.getLong(2)); templateZonePairList.add(templateZonePair); } - //for now, defaulting pageSize to a large val if null; may need to revisit post 2.2RC2 - if(isIso && templateZonePairList.size() < (pageSize != null ? pageSize : 500) + //for now, defaulting pageSize to a large val if null; may need to revisit post 2.2RC2 + if(isIso && templateZonePairList.size() < (pageSize != null ? pageSize : 500) && templateFilter != TemplateFilter.community && !(templateFilter == TemplateFilter.self && !BaseCmd.isRootAdmin(caller.getType())) ){ //evaluates to true If root admin and filter=self @@ -737,7 +748,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem s_logger.warn("Error in cleaning up", sqle); } } - + return templateZonePairList; } @@ -760,7 +771,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem sql += " AND t.hypervisor_type = '" + hyperType.toString() + "'"; } } - + if (bootable != null) { sql += " AND t.bootable = " + bootable; } @@ -777,7 +788,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem } if (!showDomr){ sql += " AND t.type != '" +Storage.TemplateType.SYSTEM.toString() + "'"; - } + } sql += " AND t.removed IS NULL"; @@ -787,14 +798,14 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem private String getOrderByLimit(Long pageSize, Long startIndex) { Boolean isAscending = Boolean.parseBoolean(_configDao.getValue("sortkey.algorithm")); isAscending = (isAscending == null ? true : isAscending); - + String sql; if (isAscending) { sql = " ORDER BY t.sort_key ASC"; } else { sql = " ORDER BY t.sort_key DESC"; } - + if ((pageSize != null) && (startIndex != null)) { sql += " LIMIT " + startIndex.toString() + "," + pageSize.toString(); } @@ -825,7 +836,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem _templateZoneDao.update(tmpltZoneVO.getId(), tmpltZoneVO); } txn.commit(); - + return tmplt.getId(); } @@ -844,7 +855,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem sc.setParameters("templateType", Storage.TemplateType.BUILTIN); return listBy(sc); } - + @Override public VMTemplateVO findSystemVMTemplate(long zoneId) { SearchCriteria sc = tmpltTypeHyperSearch.create(); @@ -854,14 +865,14 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem //order by descending order of id and select the first (this is going to be the latest) List tmplts = listBy(sc, new Filter(VMTemplateVO.class, "id", false, null, 1l)); - + if (tmplts.size() > 0) { return tmplts.get(0); } else { return null; } } - + public VMTemplateVO findSystemVMTemplate(long zoneId, HypervisorType hType) { SearchCriteria sc = tmpltTypeHyperSearch.create(); sc.setParameters("templateType", Storage.TemplateType.SYSTEM); @@ -890,7 +901,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem //order by descending order of id and select the first (this is going to be the latest) List tmplts = listBy(sc, new Filter(VMTemplateVO.class, "id", false, null, 1l)); - + if (tmplts.size() > 0) { return tmplts.get(0); } else { @@ -904,7 +915,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem sc.setParameters("account", accountId); return customSearch(sc, null).get(0); } - + @Override @DB public boolean remove(Long id) { @@ -912,7 +923,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem txn.start(); VMTemplateVO template = createForUpdate(); template.setRemoved(new Date()); - + VMTemplateVO vo = findById(id); if (vo != null) { if (vo.getFormat() == ImageFormat.ISO) { @@ -926,14 +937,14 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem txn.commit(); return result; } - + private boolean isAdmin(short accountType) { return ((accountType == Account.ACCOUNT_TYPE_ADMIN) || (accountType == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) || (accountType == Account.ACCOUNT_TYPE_DOMAIN_ADMIN) || (accountType == Account.ACCOUNT_TYPE_READ_ONLY_ADMIN)); } - + @Override public List findTemplatesToSyncToS3() { return executeList(SELECT_S3_CANDIDATE_TEMPLATES, new Object[] {}); @@ -1027,7 +1038,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem joinClause.append(" LEFT JOIN launch_permission lp ON t.id = lp.template_id WHERE (t.account_id IN ("); joinClause.append(permittedAccountsStr); joinClause.append(") OR lp.account_id IN ("); - joinClause.append(permittedAccountsStr); + joinClause.append(permittedAccountsStr); joinClause.append("))"); } else { joinClause.append(" INNER JOIN account a on (t.account_id = a.id) "); @@ -1073,4 +1084,39 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem return templateZonePairList; } + @Override + public boolean updateState(TemplateState currentState, TemplateEvent event, + TemplateState nextState, VMTemplateVO vo, Object data) { + Long oldUpdated = vo.getUpdatedCount(); + Date oldUpdatedTime = vo.getUpdated(); + + + SearchCriteria sc = updateStateSearch.create(); + sc.setParameters("id", vo.getId()); + sc.setParameters("state", currentState); + sc.setParameters("updatedCount", vo.getUpdatedCount()); + + vo.incrUpdatedCount(); + + UpdateBuilder builder = getUpdateBuilder(vo); + builder.set(vo, "state", nextState); + builder.set(vo, "updated", new Date()); + + int rows = update((VMTemplateVO) vo, sc); + if (rows == 0 && s_logger.isDebugEnabled()) { + VMTemplateVO dbVol = findByIdIncludingRemoved(vo.getId()); + if (dbVol != null) { + StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); + str.append(": DB Data={id=").append(dbVol.getId()).append("; state=").append(dbVol.getState()).append("; updatecount=").append(dbVol.getUpdatedCount()).append(";updatedTime=") + .append(dbVol.getUpdated()); + str.append(": New Data={id=").append(vo.getId()).append("; state=").append(nextState).append("; event=").append(event).append("; updatecount=").append(vo.getUpdatedCount()) + .append("; updatedTime=").append(vo.getUpdated()); + str.append(": stale Data={id=").append(vo.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatecount=").append(oldUpdated) + .append("; updatedTime=").append(oldUpdatedTime); + } else { + s_logger.debug("Unable to update objectIndatastore: id=" + vo.getId() + ", as there is no such object exists in the database anymore"); + } + } + return rows > 0; + } } diff --git a/server/src/com/cloud/storage/dao/VMTemplateHostDao.java b/server/src/com/cloud/storage/dao/VMTemplateHostDao.java index 5625e568ef0..23241cd17da 100755 --- a/server/src/com/cloud/storage/dao/VMTemplateHostDao.java +++ b/server/src/com/cloud/storage/dao/VMTemplateHostDao.java @@ -18,11 +18,15 @@ package com.cloud.storage.dao; import java.util.List; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; + import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.utils.db.GenericDao; +import com.cloud.utils.fsm.StateDao; -public interface VMTemplateHostDao extends GenericDao { +public interface VMTemplateHostDao extends GenericDao, StateDao { List listByHostId(long id); List listByTemplateId(long templateId); @@ -30,6 +34,8 @@ public interface VMTemplateHostDao extends GenericDao { List listByOnlyTemplateId(long templateId); VMTemplateHostVO findByHostTemplate(long hostId, long templateId); + + VMTemplateHostVO findByTemplateId(long templateId); VMTemplateHostVO findByHostTemplate(long hostId, long templateId, boolean lock); diff --git a/server/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java b/server/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java index 4d1ac0208ac..7f35eabfaa7 100755 --- a/server/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java +++ b/server/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java @@ -29,6 +29,9 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -42,7 +45,9 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.UpdateBuilder; @Component @Local(value={VMTemplateHostDao.class}) @@ -57,6 +62,7 @@ public class VMTemplateHostDaoImpl extends GenericDaoBase HostDestroyedSearch; protected final SearchBuilder TemplateStatusSearch; protected final SearchBuilder TemplateStatesSearch; + protected final SearchBuilder updateStateSearch; protected SearchBuilder ZONE_TEMPLATE_SEARCH; protected SearchBuilder LOCAL_SECONDARY_STORAGE_SEARCH; @@ -120,6 +126,12 @@ public class VMTemplateHostDaoImpl extends GenericDaoBase sc = HostTemplateSearch.create(); + sc.setParameters("template_id", templateId); + sc.setParameters("destroyed", false); + return findOneIncludingRemovedBy(sc); + } @Override public List listByTemplateStatus(long templateId, VMTemplateHostVO.Status downloadState) { @@ -238,7 +258,6 @@ public class VMTemplateHostDaoImpl extends GenericDaoBase sc = updateStateSearch.create(); + sc.setParameters("id", templateHost.getId()); + sc.setParameters("state", currentState); + sc.setParameters("updatedCount", templateHost.getUpdatedCount()); + + templateHost.incrUpdatedCount(); + + UpdateBuilder builder = getUpdateBuilder(vo); + builder.set(vo, "state", nextState); + builder.set(vo, "updated", new Date()); + + int rows = update((VMTemplateHostVO) vo, sc); + if (rows == 0 && s_logger.isDebugEnabled()) { + VMTemplateHostVO dbVol = findByIdIncludingRemoved(templateHost.getId()); + if (dbVol != null) { + StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); + str.append(": DB Data={id=").append(dbVol.getId()).append("; state=").append(dbVol.getState()).append("; updatecount=").append(dbVol.getUpdatedCount()).append(";updatedTime=") + .append(dbVol.getUpdated()); + str.append(": New Data={id=").append(templateHost.getId()).append("; state=").append(nextState).append("; event=").append(event).append("; updatecount=").append(templateHost.getUpdatedCount()) + .append("; updatedTime=").append(templateHost.getUpdated()); + str.append(": stale Data={id=").append(templateHost.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatecount=").append(oldUpdated) + .append("; updatedTime=").append(oldUpdatedTime); + } else { + s_logger.debug("Unable to update objectIndatastore: id=" + templateHost.getId() + ", as there is no such object exists in the database anymore"); + } + } + return rows > 0; + } } diff --git a/server/src/com/cloud/storage/dao/VMTemplatePoolDao.java b/server/src/com/cloud/storage/dao/VMTemplatePoolDao.java index f485be7f05c..501c3ca5cc8 100644 --- a/server/src/com/cloud/storage/dao/VMTemplatePoolDao.java +++ b/server/src/com/cloud/storage/dao/VMTemplatePoolDao.java @@ -18,10 +18,14 @@ package com.cloud.storage.dao; import java.util.List; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; + import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.utils.db.GenericDao; +import com.cloud.utils.fsm.StateDao; -public interface VMTemplatePoolDao extends GenericDao { +public interface VMTemplatePoolDao extends GenericDao, StateDao { public List listByPoolId(long id); public List listByTemplateId(long templateId); @@ -42,5 +46,4 @@ public interface VMTemplatePoolDao extends GenericDao TemplateStatusSearch; protected final SearchBuilder TemplatePoolStatusSearch; protected final SearchBuilder TemplateStatesSearch; + protected final SearchBuilder updateStateSearch; protected static final String UPDATE_TEMPLATE_HOST_REF = "UPDATE template_spool_ref SET download_state = ?, download_pct= ?, last_updated = ? " @@ -94,6 +101,12 @@ public class VMTemplatePoolDaoImpl extends GenericDaoBase sc = updateStateSearch.create(); + sc.setParameters("id", templatePool.getId()); + sc.setParameters("state", currentState); + sc.setParameters("updatedCount", templatePool.getUpdatedCount()); + + templatePool.incrUpdatedCount(); + + UpdateBuilder builder = getUpdateBuilder(vo); + builder.set(vo, "state", nextState); + builder.set(vo, "updated", new Date()); + + int rows = update((VMTemplateStoragePoolVO) vo, sc); + if (rows == 0 && s_logger.isDebugEnabled()) { + VMTemplateStoragePoolVO dbVol = findByIdIncludingRemoved(templatePool.getId()); + if (dbVol != null) { + StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); + str.append(": DB Data={id=").append(dbVol.getId()).append("; state=").append(dbVol.getState()).append("; updatecount=").append(dbVol.getUpdatedCount()).append(";updatedTime=") + .append(dbVol.getUpdated()); + str.append(": New Data={id=").append(templatePool.getId()).append("; state=").append(nextState).append("; event=").append(event).append("; updatecount=").append(templatePool.getUpdatedCount()) + .append("; updatedTime=").append(templatePool.getUpdated()); + str.append(": stale Data={id=").append(templatePool.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatecount=").append(oldUpdated) + .append("; updatedTime=").append(oldUpdatedTime); + } else { + s_logger.debug("Unable to update objectIndatastore: id=" + templatePool.getId() + ", as there is no such object exists in the database anymore"); + } + } + return rows > 0; + } + } diff --git a/server/src/com/cloud/storage/dao/VolumeDaoImpl.java b/server/src/com/cloud/storage/dao/VolumeDaoImpl.java index a189d00fead..40ed875caab 100755 --- a/server/src/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/server/src/com/cloud/storage/dao/VolumeDaoImpl.java @@ -37,9 +37,9 @@ import com.cloud.storage.Volume.Event; import com.cloud.storage.Volume.State; import com.cloud.storage.Volume.Type; import com.cloud.storage.VolumeVO; +import com.cloud.tags.dao.ResourceTagDao; import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.utils.Pair; - import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; @@ -62,8 +62,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol protected final SearchBuilder InstanceStatesSearch; protected final SearchBuilder AllFieldsSearch; protected GenericSearchBuilder CountByAccount; - // ResourceTagsDaoImpl _tagsDao = ComponentLocator.inject(ResourceTagsDaoImpl.class); - @Inject ResourceTagsDaoImpl _tagsDao; + @Inject ResourceTagDao _tagsDao; protected static final String SELECT_VM_SQL = "SELECT DISTINCT instance_id from volumes v where v.host_id = ? and v.mirror_state = ?"; protected static final String SELECT_HYPERTYPE_FROM_VOLUME = "SELECT c.hypervisor_type from volumes v, storage_pool s, cluster c where v.pool_id = s.id and s.cluster_id = c.id and v.id = ?"; @@ -250,7 +249,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol } } - protected VolumeDaoImpl() { + public VolumeDaoImpl() { AllFieldsSearch = createSearchBuilder(); AllFieldsSearch.and("state", AllFieldsSearch.entity().getState(), Op.EQ); AllFieldsSearch.and("accountId", AllFieldsSearch.entity().getAccountId(), Op.EQ); diff --git a/server/src/com/cloud/storage/dao/VolumeHostDao.java b/server/src/com/cloud/storage/dao/VolumeHostDao.java index 6ba82370608..39dda12345b 100755 --- a/server/src/com/cloud/storage/dao/VolumeHostDao.java +++ b/server/src/com/cloud/storage/dao/VolumeHostDao.java @@ -18,10 +18,14 @@ package com.cloud.storage.dao; import java.util.List; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; + import com.cloud.storage.VolumeHostVO; import com.cloud.utils.db.GenericDao; +import com.cloud.utils.fsm.StateDao; -public interface VolumeHostDao extends GenericDao { +public interface VolumeHostDao extends GenericDao, StateDao{ VolumeHostVO findByHostVolume(long hostId, long volumeId); diff --git a/server/src/com/cloud/storage/dao/VolumeHostDaoImpl.java b/server/src/com/cloud/storage/dao/VolumeHostDaoImpl.java index 57f2153f10b..2fd39e6eeca 100755 --- a/server/src/com/cloud/storage/dao/VolumeHostDaoImpl.java +++ b/server/src/com/cloud/storage/dao/VolumeHostDaoImpl.java @@ -16,28 +16,35 @@ // under the License. package com.cloud.storage.dao; +import java.util.Date; import java.util.List; import javax.ejb.Local; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; +import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.storage.VolumeHostVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.UpdateBuilder; @Component @Local(value={VolumeHostDao.class}) public class VolumeHostDaoImpl extends GenericDaoBase implements VolumeHostDao { - + private static final Logger s_logger = Logger.getLogger(VolumeHostDaoImpl.class); protected final SearchBuilder HostVolumeSearch; protected final SearchBuilder ZoneVolumeSearch; protected final SearchBuilder VolumeSearch; protected final SearchBuilder HostSearch; protected final SearchBuilder HostDestroyedSearch; - - VolumeHostDaoImpl(){ + protected final SearchBuilder updateStateSearch; + public VolumeHostDaoImpl(){ HostVolumeSearch = createSearchBuilder(); HostVolumeSearch.and("host_id", HostVolumeSearch.entity().getHostId(), SearchCriteria.Op.EQ); HostVolumeSearch.and("volume_id", HostVolumeSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); @@ -64,6 +71,12 @@ public class VolumeHostDaoImpl extends GenericDaoBase implem HostDestroyedSearch.and("host_id", HostDestroyedSearch.entity().getHostId(), SearchCriteria.Op.EQ); HostDestroyedSearch.and("destroyed", HostDestroyedSearch.entity().getDestroyed(), SearchCriteria.Op.EQ); HostDestroyedSearch.done(); + + updateStateSearch = this.createSearchBuilder(); + updateStateSearch.and("id", updateStateSearch.entity().getId(), Op.EQ); + updateStateSearch.and("state", updateStateSearch.entity().getState(), Op.EQ); + updateStateSearch.and("updatedCount", updateStateSearch.entity().getUpdatedCount(), Op.EQ); + updateStateSearch.done(); } @@ -112,4 +125,41 @@ public class VolumeHostDaoImpl extends GenericDaoBase implem return listIncludingRemovedBy(sc); } + @Override + public boolean updateState(State currentState, Event event, + State nextState, DataObjectInStore vo, Object data) { + VolumeHostVO volHost = (VolumeHostVO) vo; + Long oldUpdated = volHost.getUpdatedCount(); + Date oldUpdatedTime = volHost.getUpdated(); + + + SearchCriteria sc = updateStateSearch.create(); + sc.setParameters("id", volHost.getId()); + sc.setParameters("state", currentState); + sc.setParameters("updatedCount", volHost.getUpdatedCount()); + + volHost.incrUpdatedCount(); + + UpdateBuilder builder = getUpdateBuilder(vo); + builder.set(vo, "state", nextState); + builder.set(vo, "updated", new Date()); + + int rows = update((VolumeHostVO) vo, sc); + if (rows == 0 && s_logger.isDebugEnabled()) { + VolumeHostVO dbVol = findByIdIncludingRemoved(volHost.getId()); + if (dbVol != null) { + StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); + str.append(": DB Data={id=").append(dbVol.getId()).append("; state=").append(dbVol.getState()).append("; updatecount=").append(dbVol.getUpdatedCount()).append(";updatedTime=") + .append(dbVol.getUpdated()); + str.append(": New Data={id=").append(volHost.getId()).append("; state=").append(nextState).append("; event=").append(event).append("; updatecount=").append(volHost.getUpdatedCount()) + .append("; updatedTime=").append(volHost.getUpdated()); + str.append(": stale Data={id=").append(volHost.getId()).append("; state=").append(currentState).append("; event=").append(event).append("; updatecount=").append(oldUpdated) + .append("; updatedTime=").append(oldUpdatedTime); + } else { + s_logger.debug("Unable to update objectIndatastore: id=" + volHost.getId() + ", as there is no such object exists in the database anymore"); + } + } + return rows > 0; + } + } diff --git a/server/src/com/cloud/storage/download/DownloadListener.java b/server/src/com/cloud/storage/download/DownloadListener.java index 036d40ad015..d4c20cf4989 100755 --- a/server/src/com/cloud/storage/download/DownloadListener.java +++ b/server/src/com/cloud/storage/download/DownloadListener.java @@ -46,11 +46,11 @@ import com.cloud.host.HostVO; import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; import com.cloud.storage.VMTemplateHostVO; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VolumeHostVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.Volume.Event; -import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateHostDao; import com.cloud.storage.dao.VolumeDao; @@ -343,8 +343,14 @@ public class DownloadListener implements Listener { updateBuilder.setInstallPath(answer.getInstallPath()); updateBuilder.setSize(answer.getTemplateSize()); updateBuilder.setPhysicalSize(answer.getTemplatePhySicalSize()); - + volumeHostDao.update(getVolumeHostId(), updateBuilder); + + // Update volume size in Volume table. + VolumeVO updateVolume = _volumeDao.createForUpdate(); + updateVolume.setSize(answer.getTemplateSize()); + _volumeDao.update(volume.getId(), updateVolume); + /*if (answer.getCheckSum() != null) { VMTemplateVO templateDaoBuilder = _vmTemplateDao.createForUpdate(); templateDaoBuilder.setChecksum(answer.getCheckSum()); diff --git a/server/src/com/cloud/storage/download/DownloadMonitor.java b/server/src/com/cloud/storage/download/DownloadMonitor.java index 30ec3b1623b..897befa250b 100644 --- a/server/src/com/cloud/storage/download/DownloadMonitor.java +++ b/server/src/com/cloud/storage/download/DownloadMonitor.java @@ -18,6 +18,7 @@ package com.cloud.storage.download; import java.util.Map; + import com.cloud.exception.StorageUnavailableException; import com.cloud.host.HostVO; import com.cloud.storage.VMTemplateVO; diff --git a/server/src/com/cloud/storage/download/DownloadMonitorImpl.java b/server/src/com/cloud/storage/download/DownloadMonitorImpl.java index 6d3cf2a101b..0bc89e31f66 100755 --- a/server/src/com/cloud/storage/download/DownloadMonitorImpl.java +++ b/server/src/com/cloud/storage/download/DownloadMonitorImpl.java @@ -37,10 +37,21 @@ import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; -import com.cloud.agent.api.storage.*; + +import com.cloud.agent.api.storage.DeleteTemplateCommand; +import com.cloud.agent.api.storage.DeleteVolumeCommand; +import com.cloud.agent.api.storage.DownloadCommand; + import com.cloud.agent.api.storage.DownloadCommand.Proxy; import com.cloud.agent.api.storage.DownloadCommand.ResourceType; import com.cloud.agent.api.storage.DownloadProgressCommand.RequestType; + +import com.cloud.agent.api.storage.DownloadProgressCommand; +import com.cloud.agent.api.storage.ListTemplateAnswer; +import com.cloud.agent.api.storage.ListTemplateCommand; +import com.cloud.agent.api.storage.ListVolumeAnswer; +import com.cloud.agent.api.storage.ListVolumeCommand; + import com.cloud.agent.manager.Commands; import com.cloud.alert.AlertManager; import com.cloud.configuration.Config; @@ -50,6 +61,7 @@ import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; +import com.cloud.event.dao.UsageEventDao; import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.StorageUnavailableException; @@ -59,13 +71,31 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.resource.ResourceManager; import com.cloud.storage.Storage.ImageFormat; -import com.cloud.storage.*; + +import com.cloud.storage.StorageManager; +import com.cloud.storage.SwiftVO; +import com.cloud.storage.VMTemplateHostVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; -import com.cloud.storage.dao.*; +import com.cloud.storage.VMTemplateZoneVO; +import com.cloud.storage.VolumeHostVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.SwiftDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateHostDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VMTemplateSwiftDao; +import com.cloud.storage.dao.VMTemplateZoneDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeHostDao; + import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.storage.swift.SwiftManager; import com.cloud.storage.template.TemplateConstants; import com.cloud.storage.template.TemplateInfo; +import com.cloud.template.TemplateManager; import com.cloud.user.Account; import com.cloud.user.ResourceLimitService; import com.cloud.utils.component.ManagerBase; @@ -78,8 +108,11 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.SecondaryStorageVm; import com.cloud.vm.SecondaryStorageVmVO; import com.cloud.vm.UserVmManager; +import com.cloud.vm.UserVmVO; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.SecondaryStorageVmDao; +import com.cloud.vm.dao.UserVmDao; + import edu.emory.mathcs.backport.java.util.Collections; @@ -124,6 +157,14 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor ConfigurationDao _configDao; @Inject UserVmManager _vmMgr; + + @Inject TemplateManager templateMgr; + + + @Inject + private UsageEventDao _usageEventDao; + + @Inject private ClusterDao _clusterDao; @Inject @@ -134,6 +175,8 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor private SwiftDao _swiftDao; @Inject protected ResourceLimitService _resourceLimitMgr; + @Inject + protected UserVmDao _userVmDao; private Boolean _sslCopy = new Boolean(false); private String _copyAuthPasswd; @@ -233,7 +276,7 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor if(destTmpltHost != null) { start(); - String sourceChecksum = _vmMgr.getChecksum(srcTmpltHost.getHostId(), srcTmpltHost.getInstallPath()); + String sourceChecksum = this.templateMgr.getChecksum(srcTmpltHost.getHostId(), srcTmpltHost.getInstallPath()); DownloadCommand dcmd = new DownloadCommand(destServer.getStorageUrl(), url, template, TemplateConstants.DEFAULT_HTTP_AUTH_USER, _copyAuthPasswd, maxTemplateSizeInBytes); dcmd.setProxy(getHttpProxy()); @@ -473,6 +516,8 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor long size = -1; if(vmTemplateHost!=null){ size = vmTemplateHost.getPhysicalSize(); + template.setSize(size); + this._templateDao.update(template.getId(), template); } else{ s_logger.warn("Failed to get size for template" + template.getName()); @@ -510,6 +555,8 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor long size = -1; if(volumeHost!=null){ size = volumeHost.getPhysicalSize(); + volume.setSize(size); + this._volumeDao.update(volume.getId(), volume); } else{ s_logger.warn("Failed to get size for volume" + volume.getName()); @@ -670,7 +717,7 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor //Exists then don't download if (volumeInfos.containsKey(volume.getId())){ TemplateInfo volInfo = volumeInfos.remove(volume.getId()); - toBeDownloaded.remove(volumeHost); + toBeDownloaded.remove(volumeHost); s_logger.info("Volume Sync found " + volume.getUuid() + " already in the volume host table"); if (volumeHost.getDownloadState() != Status.DOWNLOADED) { volumeHost.setErrorString(""); @@ -688,13 +735,19 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor } } else { // Put them in right status - volumeHost.setDownloadPercent(100); - volumeHost.setDownloadState(Status.DOWNLOADED); - volumeHost.setInstallPath(volInfo.getInstallPath()); - volumeHost.setSize(volInfo.getSize()); - volumeHost.setPhysicalSize(volInfo.getPhysicalSize()); - volumeHost.setLastUpdated(new Date()); - _volumeHostDao.update(volumeHost.getId(), volumeHost); + volumeHost.setDownloadPercent(100); + volumeHost.setDownloadState(Status.DOWNLOADED); + volumeHost.setInstallPath(volInfo.getInstallPath()); + volumeHost.setSize(volInfo.getSize()); + volumeHost.setPhysicalSize(volInfo.getPhysicalSize()); + volumeHost.setLastUpdated(new Date()); + _volumeHostDao.update(volumeHost.getId(), volumeHost); + + if (volume.getSize() == 0) { + // Set volume size in volumes table + volume.setSize(volInfo.getSize()); + _volumeDao.update(volumeHost.getVolumeId(), volume); + } } continue; } @@ -884,17 +937,21 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor for (String uniqueName : templateInfos.keySet()) { TemplateInfo tInfo = templateInfos.get(uniqueName); - DeleteTemplateCommand dtCommand = new DeleteTemplateCommand(ssHost.getStorageUrl(), tInfo.getInstallPath()); - try { - _agentMgr.sendToSecStorage(ssHost, dtCommand, null); - } catch (AgentUnavailableException e) { - String err = "Failed to delete " + tInfo.getTemplateName() + " on secondary storage " + sserverId + " which isn't in the database"; - s_logger.error(err); - return; - } + List userVmUsingIso = _userVmDao.listByIsoId(tInfo.getId()); + //check if there is any Vm using this ISO. + if (userVmUsingIso == null || userVmUsingIso.isEmpty()) { + DeleteTemplateCommand dtCommand = new DeleteTemplateCommand(ssHost.getStorageUrl(), tInfo.getInstallPath()); + try { + _agentMgr.sendToSecStorage(ssHost, dtCommand, null); + } catch (AgentUnavailableException e) { + String err = "Failed to delete " + tInfo.getTemplateName() + " on secondary storage " + sserverId + " which isn't in the database"; + s_logger.error(err); + return; + } - String description = "Deleted template " + tInfo.getTemplateName() + " on secondary storage " + sserverId + " since it isn't in the database"; - s_logger.info(description); + String description = "Deleted template " + tInfo.getTemplateName() + " on secondary storage " + sserverId + " since it isn't in the database"; + s_logger.info(description); + } } } @@ -925,7 +982,7 @@ public class DownloadMonitorImpl extends ManagerBase implements DownloadMonitor s_logger.debug("Found " +templateHostRefList.size()+ " templates with no checksum. Will ask for computation"); for(VMTemplateHostVO templateHostRef : templateHostRefList){ s_logger.debug("Getting checksum for template - " + templateHostRef.getTemplateId()); - String checksum = _vmMgr.getChecksum(hostId, templateHostRef.getInstallPath()); + String checksum = this.templateMgr.getChecksum(hostId, templateHostRef.getInstallPath()); VMTemplateVO template = _templateDao.findById(templateHostRef.getTemplateId()); s_logger.debug("Setting checksum " +checksum+ " for template - " + template.getName()); template.setChecksum(checksum); diff --git a/server/src/com/cloud/storage/listener/SnapshotStateListener.java b/server/src/com/cloud/storage/listener/SnapshotStateListener.java index 17ccce54c82..21fcf11930b 100644 --- a/server/src/com/cloud/storage/listener/SnapshotStateListener.java +++ b/server/src/com/cloud/storage/listener/SnapshotStateListener.java @@ -17,27 +17,29 @@ package com.cloud.storage.listener; -import com.cloud.event.EventCategory; -import com.cloud.storage.Snapshot; -import com.cloud.storage.Snapshot.Event; -import com.cloud.storage.Snapshot.State; -import com.cloud.server.ManagementServer; -import com.cloud.utils.fsm.StateListener; +import javax.inject.Inject; import org.apache.cloudstack.framework.events.EventBus; import org.apache.cloudstack.framework.events.EventBusException; import org.apache.log4j.Logger; +import org.springframework.beans.factory.NoSuchBeanDefinitionException; + +import com.cloud.event.EventCategory; +import com.cloud.server.ManagementServer; +import com.cloud.storage.Snapshot; +import com.cloud.storage.Snapshot.State; +import com.cloud.storage.Snapshot.Event; +import com.cloud.storage.Snapshot.State; +import com.cloud.storage.SnapshotVO; +import com.cloud.utils.fsm.StateListener; +import com.cloud.utils.component.ComponentContext; -import java.util.Enumeration; import java.util.HashMap; import java.util.Map; -import javax.inject.Inject; +public class SnapshotStateListener implements StateListener { -public class SnapshotStateListener implements StateListener { - - // get the event bus provider if configured - @Inject protected EventBus _eventBus; + protected static EventBus _eventBus = null; private static final Logger s_logger = Logger.getLogger(VolumeStateListener.class); @@ -46,21 +48,23 @@ public class SnapshotStateListener implements StateListener pools = _poolDao.listBy(host.getDataCenterId(), host.getPodId(), host.getClusterId()); + List pools = _poolDao.listBy(host.getDataCenterId(), host.getPodId(), host.getClusterId(), ScopeType.CLUSTER); + pools.addAll(_poolDao.findZoneWideStoragePoolsByTags(host.getDataCenterId(), null)); for (StoragePoolVO pool : pools) { if (pool.getStatus() != StoragePoolStatus.Up) { continue; } - if (!pool.getPoolType().isShared()) { + if (!pool.isShared()) { continue; } @@ -91,8 +93,8 @@ public class StoragePoolMonitor implements Listener { Long hostId = host.getId(); s_logger.debug("Host " + hostId + " connected, sending down storage pool information ..."); try { - _storageManager.connectHostToSharedPool(hostId, pool); - _storageManager.createCapacityEntry(pool); + _storageManager.connectHostToSharedPool(hostId, pool.getId()); + _storageManager.createCapacityEntry(pool.getId()); } catch (Exception e) { s_logger.warn("Unable to connect host " + hostId + " to pool " + pool + " due to " + e.toString(), e); } diff --git a/server/src/com/cloud/storage/listener/VolumeStateListener.java b/server/src/com/cloud/storage/listener/VolumeStateListener.java index ee715e0131d..6fa56a5009d 100644 --- a/server/src/com/cloud/storage/listener/VolumeStateListener.java +++ b/server/src/com/cloud/storage/listener/VolumeStateListener.java @@ -18,24 +18,23 @@ package com.cloud.storage.listener; import com.cloud.event.EventCategory; +import com.cloud.server.ManagementServer; import com.cloud.storage.Volume; import com.cloud.storage.Volume.Event; import com.cloud.storage.Volume.State; -import com.cloud.server.ManagementServer; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.fsm.StateListener; import org.apache.cloudstack.framework.events.EventBus; import org.apache.cloudstack.framework.events.EventBusException; import org.apache.log4j.Logger; +import org.springframework.beans.factory.NoSuchBeanDefinitionException; import java.util.HashMap; import java.util.Map; -import javax.inject.Inject; - public class VolumeStateListener implements StateListener { - // get the event bus provider if configured - @Inject protected EventBus _eventBus = null; + protected static EventBus _eventBus = null; private static final Logger s_logger = Logger.getLogger(VolumeStateListener.class); @@ -57,8 +56,10 @@ public class VolumeStateListener implements StateListener private void pubishOnEventBus(String event, String status, Volume vo, State oldState, State newState) { - if (_eventBus == null) { - return; // no provider is configured to provide events bus, so just return + try { + _eventBus = ComponentContext.getComponent(EventBus.class); + } catch(NoSuchBeanDefinitionException nbe) { + return; // no provider is configured to provide events bus, so just return } String resourceName = getEntityFromClassName(Volume.class.getName()); diff --git a/server/src/com/cloud/storage/resource/DummySecondaryStorageResource.java b/server/src/com/cloud/storage/resource/DummySecondaryStorageResource.java index 877b97c185d..8f25514180c 100644 --- a/server/src/com/cloud/storage/resource/DummySecondaryStorageResource.java +++ b/server/src/com/cloud/storage/resource/DummySecondaryStorageResource.java @@ -46,8 +46,8 @@ import com.cloud.host.Host.Type; import com.cloud.resource.ServerResource; import com.cloud.resource.ServerResourceBase; import com.cloud.storage.Storage; -import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.template.TemplateConstants; import com.cloud.storage.template.TemplateInfo; diff --git a/server/src/com/cloud/storage/s3/S3Manager.java b/server/src/com/cloud/storage/s3/S3Manager.java index 0e47d7273d6..0f74e431376 100644 --- a/server/src/com/cloud/storage/s3/S3Manager.java +++ b/server/src/com/cloud/storage/s3/S3Manager.java @@ -23,6 +23,7 @@ import java.util.List; import com.cloud.agent.api.to.S3TO; import org.apache.cloudstack.api.command.admin.storage.AddS3Cmd; import org.apache.cloudstack.api.command.admin.storage.ListS3sCmd; + import com.cloud.dc.DataCenterVO; import com.cloud.exception.DiscoveryException; import com.cloud.storage.S3; diff --git a/server/src/com/cloud/storage/s3/S3ManagerImpl.java b/server/src/com/cloud/storage/s3/S3ManagerImpl.java index 13fe2b76ed1..61e5573394d 100644 --- a/server/src/com/cloud/storage/s3/S3ManagerImpl.java +++ b/server/src/com/cloud/storage/s3/S3ManagerImpl.java @@ -68,8 +68,8 @@ import com.cloud.storage.S3; import com.cloud.storage.S3VO; import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateS3VO; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateZoneVO; import com.cloud.storage.dao.S3Dao; import com.cloud.storage.dao.VMTemplateDao; diff --git a/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java b/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java index fca89dcb1cb..46ac7af59f8 100755 --- a/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java +++ b/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java @@ -98,8 +98,8 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; import com.cloud.storage.VMTemplateHostVO; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VMTemplateDao; diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManager.java b/server/src/com/cloud/storage/snapshot/SnapshotManager.java index a7692de7107..818133002c9 100755 --- a/server/src/com/cloud/storage/snapshot/SnapshotManager.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManager.java @@ -18,12 +18,19 @@ package com.cloud.storage.snapshot; import java.util.List; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; import com.cloud.exception.ResourceAllocationException; import com.cloud.host.HostVO; +import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotPolicyVO; import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.utils.db.Filter; +import com.cloud.utils.fsm.NoTransitionException; /** * @@ -37,66 +44,6 @@ public interface SnapshotManager { public static final int MONTHLYMAX = 12; public static final int DELTAMAX = 16; - /** - * After successfully creating a snapshot of a volume, copy the snapshot to the secondary storage for 1) reliability 2) So - * that storage space on Primary is conserved. - * - * @param snapshot - * Info about the created snapshot on primary storage. - * @param startEventId - * event id of the scheduled event for this snapshot - * @return True if the snapshot was successfully backed up. - */ - public boolean backupSnapshotToSecondaryStorage(SnapshotVO snapshot); - - /** - * Once a snapshot has completed, 1) If success, update the database entries 2) If success and there are excess snapshots - * for any of the policies given, delete the oldest one. 3) Schedule the next recurring snapshot. - * - * @param volumeId - * The volume for which the snapshot is being taken - * @param snapshotId - * The snapshot which has just completed - * @param policyIds - * The list of policyIds to which this snapshot belongs to - * @param backedUp - * If true, the snapshot has been successfully created. - */ - void postCreateSnapshot(Long volumeId, Long snapshotId, Long policyId, boolean backedUp); - - /** - * Destroys the specified snapshot from secondary storage - */ - boolean destroySnapshot(long userId, long snapshotId, long policyId); - - /** - * Deletes snapshot scheduling policy. Delete will fail if this policy is assigned to one or more volumes - */ - boolean deletePolicy(long userId, Long policyId); - - /** - * Lists all snapshots for the volume which are created using schedule of the specified policy - */ - /* - * List listSnapsforPolicy(long policyId, Filter filter); - */ - /** - * List all policies which are assigned to the specified volume - */ - List listPoliciesforVolume(long volumeId); - - /** - * List all policies to which a specified snapshot belongs. For ex: A snapshot may belong to a hourly snapshot and a daily - * snapshot run at the same time - */ - /* - * List listPoliciesforSnapshot(long snapshotId); - */ - /** - * List all snapshots for a specified volume irrespective of the policy which created the snapshot - */ - List listSnapsforVolume(long volumeId); - void deletePoliciesForVolume(Long volumeId); /** @@ -108,35 +55,20 @@ public interface SnapshotManager { * The account which is to be deleted. */ boolean deleteSnapshotDirsForAccount(long accountId); - - SnapshotPolicyVO getPolicyForVolume(long volumeId); - - boolean destroySnapshotBackUp(long snapshotId); - - /** - * Create a snapshot of a volume - * - * @param cmd - * the API command wrapping the parameters for creating the snapshot (mainly volumeId) - * @return the Snapshot that was created - */ - SnapshotVO createSnapshotOnPrimary(VolumeVO volume, Long polocyId, Long snapshotId) throws ResourceAllocationException; - - List listPoliciesforSnapshot(long snapshotId); - - List listSnapsforPolicy(long policyId, Filter filter); - + void downloadSnapshotsFromSwift(SnapshotVO ss); void downloadSnapshotsFromS3(SnapshotVO snapshot); - HostVO getSecondaryStorageHost(SnapshotVO snapshot); - String getSecondaryStorageURL(SnapshotVO snapshot); - void deleteSnapshotsForVolume (String secondaryStoragePoolUrl, Long dcId, Long accountId, Long volumeId ); - void deleteSnapshotsDirForVolume(String secondaryStoragePoolUrl, Long dcId, Long accountId, Long volumeId); - boolean canOperateOnVolume(VolumeVO volume); + boolean canOperateOnVolume(Volume volume); + + Answer sendToPool(Volume vol, Command cmd); + + SnapshotVO getParentSnapshot(VolumeInfo volume, Snapshot snapshot); + + Snapshot backupSnapshot(Long snapshotId); } diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java index e06da75580c..bacca019294 100755 --- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -28,12 +28,27 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.api.command.user.snapshot.CreateSnapshotPolicyCmd; +import org.apache.cloudstack.api.command.user.snapshot.DeleteSnapshotPoliciesCmd; +import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotPoliciesCmd; import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotsCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; -import com.cloud.agent.api.*; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.DeleteSnapshotBackupCommand; +import com.cloud.agent.api.DeleteSnapshotsDirCommand; +import com.cloud.agent.api.DownloadSnapshotFromS3Command; +import com.cloud.agent.api.downloadSnapshotFromSwiftCommand; import com.cloud.agent.api.to.S3TO; import com.cloud.agent.api.to.SwiftTO; import com.cloud.alert.AlertManager; @@ -46,9 +61,12 @@ import com.cloud.dc.DataCenter; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.domain.dao.DomainDao; -import com.cloud.event.*; +import com.cloud.event.ActionEvent; +import com.cloud.event.ActionEventUtils; +import com.cloud.event.EventTypes; +import com.cloud.event.EventVO; +import com.cloud.event.UsageEventUtils; import com.cloud.event.dao.EventDao; -import com.cloud.event.dao.UsageEventDao; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceAllocationException; @@ -58,43 +76,54 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.org.Grouping; import com.cloud.projects.Project.ListProjectResourcesCriteria; -import com.cloud.resource.ResourceManager; import com.cloud.server.ResourceTag.TaggedResourceType; -import com.cloud.storage.*; +import com.cloud.storage.Snapshot; import com.cloud.storage.Snapshot.Type; -import com.cloud.storage.Storage.StoragePoolType; -import com.cloud.storage.dao.*; -import com.cloud.storage.listener.SnapshotStateListener; +import com.cloud.storage.SnapshotPolicyVO; +import com.cloud.storage.SnapshotScheduleVO; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeManager; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.SnapshotPolicyDao; +import com.cloud.storage.dao.SnapshotScheduleDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.s3.S3Manager; import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.storage.swift.SwiftManager; import com.cloud.tags.ResourceTagVO; import com.cloud.tags.dao.ResourceTagDao; -import com.cloud.user.*; +import com.cloud.template.TemplateManager; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountVO; +import com.cloud.user.DomainManager; +import com.cloud.user.ResourceLimitService; +import com.cloud.user.User; +import com.cloud.user.UserContext; import com.cloud.user.dao.AccountDao; import com.cloud.utils.DateUtil; import com.cloud.utils.DateUtil.IntervalType; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; - -import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.db.*; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.fsm.NoTransitionException; -import com.cloud.utils.fsm.StateMachine2; -import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.UserVmDao; + import org.apache.cloudstack.api.command.user.snapshot.CreateSnapshotPolicyCmd; import org.apache.cloudstack.api.command.user.snapshot.DeleteSnapshotPoliciesCmd; import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotPoliciesCmd; @@ -105,9 +134,9 @@ import javax.ejb.Local; import javax.naming.ConfigurationException; import java.util.*; import com.cloud.vm.snapshot.VMSnapshot; -import com.cloud.vm.snapshot.VMSnapshotVO; import com.cloud.vm.snapshot.dao.VMSnapshotDao; + @Component @Local(value = { SnapshotManager.class, SnapshotService.class }) public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, SnapshotService { @@ -129,7 +158,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, @Inject protected SnapshotDao _snapshotDao; @Inject - protected StoragePoolDao _storagePoolDao; + protected PrimaryDataStoreDao _storagePoolDao; @Inject protected EventDao _eventDao; @Inject @@ -151,8 +180,6 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, @Inject protected ClusterDao _clusterDao; @Inject - private UsageEventDao _usageEventDao; - @Inject private ResourceLimitService _resourceLimitMgr; @Inject private SwiftManager _swiftMgr; @@ -161,33 +188,34 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, @Inject private SecondaryStorageVmManager _ssvmMgr; @Inject - private ResourceManager _resourceMgr; - @Inject private DomainManager _domainMgr; @Inject - private VolumeDao _volumeDao; - @Inject private ResourceTagDao _resourceTagDao; @Inject private ConfigurationDao _configDao; + @Inject private VMSnapshotDao _vmSnapshotDao; String _name; + + @Inject TemplateManager templateMgr; + @Inject VolumeManager volumeMgr; + @Inject DataStoreManager dataStoreMgr; + @Inject List snapshotStrategies; + @Inject VolumeDataFactory volFactory; + @Inject SnapshotDataFactory snapshotFactory; + + private int _totalRetries; private int _pauseInterval; - private int _deltaSnapshotMax; private int _backupsnapshotwait; - private StateMachine2 _snapshotFsm; - protected SearchBuilder PolicySnapshotSearch; protected SearchBuilder PoliciesForSnapSearch; - - - protected Answer sendToPool(Volume vol, Command cmd) { - StoragePool pool = _storagePoolDao.findById(vol.getPoolId()); - + @Override + public Answer sendToPool(Volume vol, Command cmd) { + StoragePool pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(vol.getPoolId()); long[] hostIdsToTryFirst = null; Long vmHostId = getHostIdForSnapshotOperation(vol); @@ -238,128 +266,11 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return null; } - @Override - public SnapshotVO createSnapshotOnPrimary(VolumeVO volume, Long policyId, Long snapshotId) { - SnapshotVO snapshot = _snapshotDao.findById(snapshotId); - if (snapshot == null) { - throw new CloudRuntimeException("Can not find snapshot " + snapshotId); - } - - try { - stateTransitTo(snapshot, Snapshot.Event.CreateRequested); - } catch (NoTransitionException nte) { - s_logger.debug("Failed to update snapshot state due to " + nte.getMessage()); - } - - // Send a ManageSnapshotCommand to the agent - String vmName = _storageMgr.getVmNameOnVolume(volume); - long volumeId = volume.getId(); - long preId = _snapshotDao.getLastSnapshot(volumeId, snapshotId); - - String preSnapshotPath = null; - SnapshotVO preSnapshotVO = null; - if (preId != 0 && !(volume.getLastPoolId() != null && !volume.getLastPoolId().equals(volume.getPoolId()))) { - preSnapshotVO = _snapshotDao.findByIdIncludingRemoved(preId); - if (preSnapshotVO != null && preSnapshotVO.getBackupSnapshotId() != null) { - preSnapshotPath = preSnapshotVO.getPath(); - } - } - StoragePoolVO srcPool = _storagePoolDao.findById(volume.getPoolId()); - - // RBD volumes do not support snapshotting in the way CloudStack does it. - // For now we leave the snapshot feature disabled for RBD volumes - if (srcPool.getPoolType() == StoragePoolType.RBD) { - throw new CloudRuntimeException("RBD volumes do not support snapshotting"); - } - - ManageSnapshotCommand cmd = new ManageSnapshotCommand(snapshotId, volume.getPath(), srcPool, preSnapshotPath, snapshot.getName(), vmName); - - ManageSnapshotAnswer answer = (ManageSnapshotAnswer) sendToPool(volume, cmd); - // Update the snapshot in the database - if ((answer != null) && answer.getResult()) { - // The snapshot was successfully created - if (preSnapshotPath != null && preSnapshotPath.equals(answer.getSnapshotPath())) { - // empty snapshot - s_logger.debug("CreateSnapshot: this is empty snapshot "); - try { - snapshot.setPath(preSnapshotPath); - snapshot.setBackupSnapshotId(preSnapshotVO.getBackupSnapshotId()); - snapshot.setSwiftId(preSnapshotVO.getSwiftId()); - snapshot.setPrevSnapshotId(preId); - snapshot.setSecHostId(preSnapshotVO.getSecHostId()); - stateTransitTo(snapshot, Snapshot.Event.OperationNotPerformed); - } catch (NoTransitionException nte) { - s_logger.debug("CreateSnapshot: failed to update state of snapshot due to " + nte.getMessage()); - } - } else { - long preSnapshotId = 0; - - if (preSnapshotVO != null && preSnapshotVO.getBackupSnapshotId() != null) { - preSnapshotId = preId; - // default delta snap number is 16 - int deltaSnap = _deltaSnapshotMax; - - int i; - for (i = 1; i < deltaSnap; i++) { - String prevBackupUuid = preSnapshotVO.getBackupSnapshotId(); - // previous snapshot doesn't have backup, create a full snapshot - if (prevBackupUuid == null) { - preSnapshotId = 0; - break; - } - long preSSId = preSnapshotVO.getPrevSnapshotId(); - if (preSSId == 0) { - break; - } - preSnapshotVO = _snapshotDao.findByIdIncludingRemoved(preSSId); - } - if (i >= deltaSnap) { - preSnapshotId = 0; - } - } - - //If the volume is moved around, backup a full snapshot to secondary storage - if (volume.getLastPoolId() != null && !volume.getLastPoolId().equals(volume.getPoolId())) { - preSnapshotId = 0; - volume.setLastPoolId(volume.getPoolId()); - _volumeDao.update(volume.getId(), volume); - } - snapshot = updateDBOnCreate(snapshotId, answer.getSnapshotPath(), preSnapshotId); - } - // Get the snapshot_schedule table entry for this snapshot and - // policy id. - // Set the snapshotId to retrieve it back later. - if (policyId != Snapshot.MANUAL_POLICY_ID) { - SnapshotScheduleVO snapshotSchedule = _snapshotScheduleDao.getCurrentSchedule(volumeId, policyId, true); - assert snapshotSchedule != null; - snapshotSchedule.setSnapshotId(snapshotId); - _snapshotScheduleDao.update(snapshotSchedule.getId(), snapshotSchedule); - } - - } else { - if (answer != null) { - s_logger.error(answer.getDetails()); - } - try { - stateTransitTo(snapshot, Snapshot.Event.OperationFailed); - } catch (NoTransitionException nte) { - s_logger.debug("Failed to update snapshot state due to " + nte.getMessage()); - } - throw new CloudRuntimeException("Creating snapshot for volume " + volumeId + " on primary storage failed."); - } - - return snapshot; - } - - public SnapshotVO createSnapshotImpl(long volumeId, long policyId) throws ResourceAllocationException { - return null; - } - @Override @DB @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_CREATE, eventDescription = "creating snapshot", async = true) - public SnapshotVO createSnapshot(Long volumeId, Long policyId, Long snapshotId, Account snapshotOwner) { - VolumeVO volume = _volsDao.findById(volumeId); + public Snapshot createSnapshot(Long volumeId, Long policyId, Long snapshotId, Account snapshotOwner) { + VolumeInfo volume = this.volFactory.getVolume(volumeId); if (volume == null) { throw new InvalidParameterValueException("No such volume exist"); } @@ -368,120 +279,50 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, throw new InvalidParameterValueException("Volume is not in ready state"); } - SnapshotVO snapshot = null; + SnapshotInfo snapshot = null; boolean backedUp = false; - UserVmVO uservm = null; // does the caller have the authority to act on this volume _accountMgr.checkAccess(UserContext.current().getCaller(), null, true, volume); + + SnapshotInfo snap = this.snapshotFactory.getSnapshot(snapshotId); + SnapshotStrategy strategy = null; + for (SnapshotStrategy st : snapshotStrategies) { + if (st.canHandle(snap)) { + strategy = st; + break; + } + } + try { - - Long poolId = volume.getPoolId(); - if (poolId == null) { - throw new CloudRuntimeException("You cannot take a snapshot of a volume until it has been attached to an instance"); - } - - if (_volsDao.getHypervisorType(volume.getId()).equals(HypervisorType.KVM)) { - uservm = _vmDao.findById(volume.getInstanceId()); - if (uservm != null && uservm.getType() != VirtualMachine.Type.User) { - throw new CloudRuntimeException("Can't take a snapshot on system vm "); - } - - StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId()); - ClusterVO cluster = _clusterDao.findById(storagePool.getClusterId()); - List hosts = _resourceMgr.listAllHostsInCluster(cluster.getId()); - if (hosts != null && !hosts.isEmpty()) { - HostVO host = hosts.get(0); - if (!hostSupportSnapsthot(host)) { - throw new CloudRuntimeException("KVM Snapshot is not supported on cluster: " + host.getId()); - } - } - } - - // if volume is attached to a vm in destroyed or expunging state; disallow - // if volume is attached to a vm in taking vm snapshot; disallow - if (volume.getInstanceId() != null) { - UserVmVO userVm = _vmDao.findById(volume.getInstanceId()); - if (userVm != null) { - if (userVm.getState().equals(State.Destroyed) || userVm.getState().equals(State.Expunging)) { - throw new CloudRuntimeException("Creating snapshot failed due to volume:" + volumeId + " is associated with vm:" + userVm.getInstanceName() + " is in " - + userVm.getState().toString() + " state"); - } - - if(userVm.getHypervisorType() == HypervisorType.VMware || userVm.getHypervisorType() == HypervisorType.KVM) { - List activeSnapshots = _snapshotDao.listByInstanceId(volume.getInstanceId(), Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp); - if(activeSnapshots.size() > 1) - throw new CloudRuntimeException("There is other active snapshot tasks on the instance to which the volume is attached, please try again later"); - } - List activeVMSnapshots = _vmSnapshotDao.listByInstanceId(userVm.getId(), - VMSnapshot.State.Creating, VMSnapshot.State.Reverting, VMSnapshot.State.Expunging); - if (activeVMSnapshots.size() > 0) { - throw new CloudRuntimeException( - "There is other active vm snapshot tasks on the instance to which the volume is attached, please try again later"); - } - } - } - - snapshot = createSnapshotOnPrimary(volume, policyId, snapshotId); - if (snapshot != null) { - if (snapshot.getState() == Snapshot.State.CreatedOnPrimary) { - backedUp = backupSnapshotToSecondaryStorage(snapshot); - } else if (snapshot.getState() == Snapshot.State.BackedUp) { - // For empty snapshot we set status to BackedUp in createSnapshotOnPrimary - backedUp = true; - } else { - throw new CloudRuntimeException("Failed to create snapshot: " + snapshot + " on primary storage"); - } - if (!backedUp) { - throw new CloudRuntimeException("Created snapshot: " + snapshot + " on primary but failed to backup on secondary"); - } - } else { - throw new CloudRuntimeException("Failed to create snapshot: " + snapshot + " on primary storage"); - } - } finally { - // Cleanup jobs to do after the snapshot has been created; decrement resource count - if (snapshot != null) { - postCreateSnapshot(volumeId, snapshot.getId(), policyId, backedUp); - //Check if the snapshot was removed while backingUp. If yes, do not log snapshot create usage event - SnapshotVO freshSnapshot = _snapshotDao.findById(snapshot.getId()); - if ((freshSnapshot != null) && backedUp) { - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_CREATE, snapshot.getAccountId(), - snapshot.getDataCenterId(), snapshotId, snapshot.getName(), null, null, - volume.getSize(), snapshot.getClass().getName(), snapshot.getUuid()); - } - if( !backedUp ) { - - } else { - _resourceLimitMgr.incrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot); - } - } - - /* - try { - _storageMgr.stateTransitTo(volume, Volume.Event.OperationSucceeded); - } catch (NoTransitionException e) { - s_logger.debug("Failed to transit volume state: " + e.toString()); - }*/ + snapshot = strategy.takeSnapshot(volume, snapshotId); + if (snapshot != null) { + postCreateSnapshot(volumeId, snapshot.getId(), policyId); + //Check if the snapshot was removed while backingUp. If yes, do not log snapshot create usage event + SnapshotVO freshSnapshot = _snapshotDao.findById(snapshot.getId()); + if ((freshSnapshot != null) && backedUp) { + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_CREATE, snapshot.getAccountId(), + snapshot.getDataCenterId(), snapshotId, snapshot.getName(), null, null, + volume.getSize(), snapshot.getClass().getName(), snapshot.getUuid()); + } + _resourceLimitMgr.incrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot); + } + + Boolean backup = Boolean.parseBoolean(this._configDao.getValue(Config.BackupSnapshotAferTakingSnapshot.toString())); + if (backup) { + this.backupSnapshot(snapshotId); + } + } catch(Exception e) { + s_logger.debug("Failed to create snapshot", e); + throw new CloudRuntimeException("Failed to create snapshot", e); } return snapshot; } - private SnapshotVO updateDBOnCreate(Long id, String snapshotPath, long preSnapshotId) { - SnapshotVO createdSnapshot = _snapshotDao.findByIdIncludingRemoved(id); - createdSnapshot.setPath(snapshotPath); - createdSnapshot.setPrevSnapshotId(preSnapshotId); - try { - stateTransitTo(createdSnapshot, Snapshot.Event.OperationSucceeded); - } catch (NoTransitionException nte) { - s_logger.debug("Faile to update state of snapshot due to " + nte.getMessage()); - } - return createdSnapshot; - } - - private static void checkObjectStorageConfiguration(SwiftTO swift, S3TO s3) { + private void checkObjectStorageConfiguration(SwiftTO swift, S3TO s3) { if (swift != null && s3 != null) { throw new CloudRuntimeException( @@ -490,26 +331,6 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } - @Override - public void deleteSnapshotsForVolume (String secondaryStoragePoolUrl, Long dcId, Long accountId, Long volumeId ){ - SwiftTO swift = _swiftMgr.getSwiftTO(); - S3TO s3 = _s3Mgr.getS3TO(); - - checkObjectStorageConfiguration(swift, s3); - - DeleteSnapshotBackupCommand cmd = new DeleteSnapshotBackupCommand( - swift, s3, secondaryStoragePoolUrl, dcId, accountId, volumeId, - null, true); - try { - Answer ans = _agentMgr.sendToSSVM(dcId, cmd); - if ( ans == null || !ans.getResult() ) { - s_logger.warn("DeleteSnapshotBackupCommand failed due to " + ans.getDetails() + " volume id: " + volumeId); - } - } catch (Exception e) { - s_logger.warn("DeleteSnapshotBackupCommand failed due to" + e.toString() + " volume id: " + volumeId); - } - } - @Override public void deleteSnapshotsDirForVolume(String secondaryStoragePoolUrl, Long dcId, Long accountId, Long volumeId) { DeleteSnapshotsDirCommand cmd = new DeleteSnapshotsDirCommand(secondaryStoragePoolUrl, dcId, accountId, volumeId); @@ -523,6 +344,27 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } } + @Override + public Snapshot backupSnapshot(Long snapshotId) { + SnapshotInfo snapshot = this.snapshotFactory.getSnapshot(snapshotId); + if (snapshot == null) { + throw new CloudRuntimeException("Can't find snapshot:" + snapshotId); + } + + if (snapshot.getState() == Snapshot.State.BackedUp) { + return snapshot; + } + + SnapshotStrategy strategy = null; + for (SnapshotStrategy st : snapshotStrategies) { + if (st.canHandle(snapshot)) { + strategy = st; + break; + } + } + + return strategy.backupSnapshot(snapshot); + } @Override public void downloadSnapshotsFromSwift(SnapshotVO ss) { @@ -530,7 +372,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, VolumeVO volume = _volsDao.findById(volumeId); Long dcId = volume.getDataCenterId(); Long accountId = volume.getAccountId(); - HostVO secHost = _storageMgr.getSecondaryStorageHost(dcId); + HostVO secHost = this.templateMgr.getSecondaryStorageHost(dcId); String secondaryStoragePoolUrl = secHost.getStorageUrl(); Long swiftId = ss.getSwiftId(); @@ -581,7 +423,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, final VolumeVO volume = _volsDao.findById(snapshot.getVolumeId()); final Long zoneId = volume.getDataCenterId(); - final HostVO secHost = _storageMgr.getSecondaryStorageHost(zoneId); + final HostVO secHost = this.templateMgr.getSecondaryStorageHost(zoneId); final S3TO s3 = _s3Mgr.getS3TO(snapshot.getS3Id()); final List backupUuids = determineBackupUuids(snapshot); @@ -609,133 +451,17 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } } - + @Override - @DB - public boolean backupSnapshotToSecondaryStorage(SnapshotVO ss) { - long snapshotId = ss.getId(); - SnapshotVO snapshot = _snapshotDao.acquireInLockTable(snapshotId); - if (snapshot == null) { - throw new CloudRuntimeException("Can not acquire lock for snapshot: " + ss); - } - try { - try { - stateTransitTo(snapshot, Snapshot.Event.BackupToSecondary); - } catch (NoTransitionException nte) { - s_logger.debug("Failed to update the state of snapshot while backing up snapshot"); - } + public SnapshotVO getParentSnapshot(VolumeInfo volume, Snapshot snapshot) { + long preId = _snapshotDao.getLastSnapshot(volume.getId(), snapshot.getId()); - long volumeId = snapshot.getVolumeId(); - VolumeVO volume = _volsDao.lockRow(volumeId, true); - - Long dcId = volume.getDataCenterId(); - Long accountId = volume.getAccountId(); - - HostVO secHost = getSecHost(volumeId, volume.getDataCenterId()); - - String secondaryStoragePoolUrl = secHost.getStorageUrl(); - String snapshotUuid = snapshot.getPath(); - // In order to verify that the snapshot is not empty, - // we check if the parent of the snapshot is not the same as the parent of the previous snapshot. - // We pass the uuid of the previous snapshot to the plugin to verify this. - SnapshotVO prevSnapshot = null; - String prevSnapshotUuid = null; - String prevBackupUuid = null; - - - SwiftTO swift = _swiftMgr.getSwiftTO(); - S3TO s3 = _s3Mgr.getS3TO(); - - checkObjectStorageConfiguration(swift, s3); - - long prevSnapshotId = snapshot.getPrevSnapshotId(); - if (prevSnapshotId > 0) { - prevSnapshot = _snapshotDao.findByIdIncludingRemoved(prevSnapshotId); - if ( prevSnapshot.getBackupSnapshotId() != null && swift == null) { - if (prevSnapshot.getVersion() != null && prevSnapshot.getVersion().equals("2.2")) { - prevBackupUuid = prevSnapshot.getBackupSnapshotId(); - prevSnapshotUuid = prevSnapshot.getPath(); - } - } else if ((prevSnapshot.getSwiftId() != null && swift != null) - || (prevSnapshot.getS3Id() != null && s3 != null)) { - prevBackupUuid = prevSnapshot.getBackupSnapshotId(); - prevSnapshotUuid = prevSnapshot.getPath(); - } - } - boolean isVolumeInactive = _storageMgr.volumeInactive(volume); - String vmName = _storageMgr.getVmNameOnVolume(volume); - StoragePoolVO srcPool = _storagePoolDao.findById(volume.getPoolId()); - BackupSnapshotCommand backupSnapshotCommand = new BackupSnapshotCommand(secondaryStoragePoolUrl, dcId, accountId, volumeId, snapshot.getId(), volume.getPath(), srcPool, snapshotUuid, - snapshot.getName(), prevSnapshotUuid, prevBackupUuid, isVolumeInactive, vmName, _backupsnapshotwait); - - if ( swift != null ) { - backupSnapshotCommand.setSwift(swift); - } else if (s3 != null) { - backupSnapshotCommand.setS3(s3); - } - - String backedUpSnapshotUuid = null; - // By default, assume failed. - boolean backedUp = false; - BackupSnapshotAnswer answer = (BackupSnapshotAnswer) sendToPool(volume, backupSnapshotCommand); - if (answer != null && answer.getResult()) { - backedUpSnapshotUuid = answer.getBackupSnapshotName(); - if (backedUpSnapshotUuid != null) { - backedUp = true; - } - } else if (answer != null) { - s_logger.error(answer.getDetails()); - } - // Update the status in all cases. - Transaction txn = Transaction.currentTxn(); - txn.start(); - - if (backedUp) { - if (backupSnapshotCommand.getSwift() != null ) { - snapshot.setSwiftId(swift.getId()); - snapshot.setBackupSnapshotId(backedUpSnapshotUuid); - } else if (backupSnapshotCommand.getS3() != null) { - snapshot.setS3Id(s3.getId()); - snapshot.setBackupSnapshotId(backedUpSnapshotUuid); - } else { - snapshot.setSecHostId(secHost.getId()); - snapshot.setBackupSnapshotId(backedUpSnapshotUuid); - } - if (answer.isFull()) { - snapshot.setPrevSnapshotId(0); - } - try { - stateTransitTo(snapshot, Snapshot.Event.OperationSucceeded); - } catch (NoTransitionException nte) { - s_logger.debug("Failed to update the state of snapshot while backing up snapshot"); - } - - } else { - try { - stateTransitTo(snapshot, Snapshot.Event.OperationFailed); - } catch (NoTransitionException nte) { - s_logger.debug("Failed to update the state of snapshot while backing up snapshot"); - } - s_logger.warn("Failed to back up snapshot on secondary storage, deleting the record from the DB"); - _snapshotDao.remove(snapshotId); - } - txn.commit(); - - return backedUp; - } finally { - if (snapshot != null) { - _snapshotDao.releaseFromLockTable(snapshotId); - } - } - - } - - private HostVO getSecHost(long volumeId, long dcId) { - Long id = _snapshotDao.getSecHostId(volumeId); - if ( id != null) { - return _hostDao.findById(id); - } - return _storageMgr.getSecondaryStorageHost(dcId); + SnapshotVO preSnapshotVO = null; + if (preId != 0 && !(volume.getLastPoolId() != null && !volume.getLastPoolId().equals(volume.getPoolId()))) { + preSnapshotVO = _snapshotDao.findByIdIncludingRemoved(preId); + } + + return preSnapshotVO; } private Long getSnapshotUserId() { @@ -746,11 +472,15 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return userId; } - @Override - @DB - public void postCreateSnapshot(Long volumeId, Long snapshotId, Long policyId, boolean backedUp) { + private void postCreateSnapshot(Long volumeId, Long snapshotId, Long policyId) { Long userId = getSnapshotUserId(); SnapshotVO snapshot = _snapshotDao.findById(snapshotId); + if (policyId != Snapshot.MANUAL_POLICY_ID) { + SnapshotScheduleVO snapshotSchedule = _snapshotScheduleDao.getCurrentSchedule(volumeId, policyId, true); + assert snapshotSchedule != null; + snapshotSchedule.setSnapshotId(snapshotId); + _snapshotScheduleDao.update(snapshotSchedule.getId(), snapshotSchedule); + } if (snapshot != null && snapshot.isRecursive()) { postCreateRecurringSnapshotForPolicy(userId, volumeId, snapshotId, policyId); @@ -760,7 +490,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, private void postCreateRecurringSnapshotForPolicy(long userId, long volumeId, long snapshotId, long policyId) { // Use count query SnapshotVO spstVO = _snapshotDao.findById(snapshotId); - Type type = spstVO.getType(); + Type type = spstVO.getRecurringType(); int maxSnaps = type.getMax(); List snaps = listSnapsforVolumeType(volumeId, type); @@ -772,7 +502,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, SnapshotVO oldestSnapshot = snaps.get(0); long oldSnapId = oldestSnapshot.getId(); s_logger.debug("Max snaps: " + policy.getMaxSnaps() + " exceeded for snapshot policy with Id: " + policyId + ". Deleting oldest snapshot: " + oldSnapId); - if(deleteSnapshotInternal(oldSnapId)){ + if(deleteSnapshot(oldSnapId)){ //log Snapshot delete event ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, oldestSnapshot.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_SNAPSHOT_DELETE, "Successfully deleted oldest snapshot: " + oldSnapId, 0); } @@ -787,104 +517,44 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, Account caller = UserContext.current().getCaller(); // Verify parameters - Snapshot snapshotCheck = _snapshotDao.findById(snapshotId); + SnapshotInfo snapshotCheck = this.snapshotFactory.getSnapshot(snapshotId); if (snapshotCheck == null) { throw new InvalidParameterValueException("unable to find a snapshot with id " + snapshotId); } _accountMgr.checkAccess(caller, null, true, snapshotCheck); - if( !Snapshot.State.BackedUp.equals(snapshotCheck.getState() ) ) { - throw new InvalidParameterValueException("Can't delete snapshotshot " + snapshotId + " due to it is not in BackedUp Status"); + SnapshotStrategy strategy = null; + for (SnapshotStrategy st : snapshotStrategies) { + if (st.canHandle(snapshotCheck)) { + strategy = st; + break; + } + } + try { + boolean result = strategy.deleteSnapshot(snapshotCheck); + if (result) { + if (snapshotCheck.getState() == Snapshot.State.BackedUp) { + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_DELETE, snapshotCheck.getAccountId(), + snapshotCheck.getDataCenterId(), snapshotId, snapshotCheck.getName(), null, null, 0L, + snapshotCheck.getClass().getName(), snapshotCheck.getUuid()); + } + _resourceLimitMgr.decrementResourceCount(snapshotCheck.getAccountId(), ResourceType.snapshot); + } + return result; + } catch (Exception e) { + s_logger.debug("Failed to delete snapshot: " + snapshotCheck.getId() + ":" + e.toString()); + throw new CloudRuntimeException("Failed to delete snapshot:" + e.toString()); } - - return deleteSnapshotInternal(snapshotId); } - @DB - private boolean deleteSnapshotInternal(Long snapshotId) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Calling deleteSnapshot for snapshotId: " + snapshotId); - } - SnapshotVO lastSnapshot = null; - SnapshotVO snapshot = _snapshotDao.findById(snapshotId); - if (snapshot.getBackupSnapshotId() != null) { - List snaps = _snapshotDao.listByBackupUuid(snapshot.getVolumeId(), snapshot.getBackupSnapshotId()); - if (snaps != null && snaps.size() > 1) { - snapshot.setBackupSnapshotId(null); - _snapshotDao.update(snapshot.getId(), snapshot); - } - } - - Transaction txn = Transaction.currentTxn(); - txn.start(); - _snapshotDao.remove(snapshotId); - if (snapshot.getState() == Snapshot.State.BackedUp) { - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_DELETE, snapshot.getAccountId(), - snapshot.getDataCenterId(), snapshotId, snapshot.getName(), null, null, 0L, - snapshot.getClass().getName(), snapshot.getUuid()); - } - _resourceLimitMgr.decrementResourceCount(snapshot.getAccountId(), ResourceType.snapshot); - txn.commit(); - - long lastId = snapshotId; - boolean destroy = false; - while (true) { - lastSnapshot = _snapshotDao.findNextSnapshot(lastId); - if (lastSnapshot == null) { - // if all snapshots after this snapshot in this chain are removed, remove those snapshots. - destroy = true; - break; - } - if (lastSnapshot.getRemoved() == null) { - // if there is one child not removed, then can not remove back up snapshot. - break; - } - lastId = lastSnapshot.getId(); - } - if (destroy) { - lastSnapshot = _snapshotDao.findByIdIncludingRemoved(lastId); - while (lastSnapshot.getRemoved() != null) { - String BackupSnapshotId = lastSnapshot.getBackupSnapshotId(); - if (BackupSnapshotId != null) { - List snaps = _snapshotDao.listByBackupUuid(lastSnapshot.getVolumeId(), BackupSnapshotId); - if (snaps != null && snaps.size() > 1) { - lastSnapshot.setBackupSnapshotId(null); - _snapshotDao.update(lastSnapshot.getId(), lastSnapshot); - } else { - if (destroySnapshotBackUp(lastId)) { - - } else { - s_logger.debug("Destroying snapshot backup failed " + lastSnapshot); - break; - } - } - } - lastId = lastSnapshot.getPrevSnapshotId(); - if (lastId == 0) { - break; - } - lastSnapshot = _snapshotDao.findByIdIncludingRemoved(lastId); - } - } - return true; - } - - @Override - @DB - public boolean destroySnapshot(long userId, long snapshotId, long policyId) { - return true; - } - - - @Override - public HostVO getSecondaryStorageHost(SnapshotVO snapshot) { + private HostVO getSecondaryStorageHost(SnapshotVO snapshot) { HostVO secHost = null; if( snapshot.getSwiftId() == null || snapshot.getSwiftId() == 0) { secHost = _hostDao.findById(snapshot.getSecHostId()); } else { Long dcId = snapshot.getDataCenterId(); - secHost = _storageMgr.getSecondaryStorageHost(dcId); + secHost = this.templateMgr.getSecondaryStorageHost(dcId); } return secHost; } @@ -898,51 +568,6 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, throw new CloudRuntimeException("Can not find secondary storage"); } - @Override - @DB - public boolean destroySnapshotBackUp(long snapshotId) { - boolean success = false; - String details; - SnapshotVO snapshot = _snapshotDao.findByIdIncludingRemoved(snapshotId); - if (snapshot == null) { - throw new CloudRuntimeException("Destroying snapshot " + snapshotId + " backup failed due to unable to find snapshot "); - } - String secondaryStoragePoolUrl = getSecondaryStorageURL(snapshot); - Long dcId = snapshot.getDataCenterId(); - Long accountId = snapshot.getAccountId(); - Long volumeId = snapshot.getVolumeId(); - - String backupOfSnapshot = snapshot.getBackupSnapshotId(); - if (backupOfSnapshot == null) { - return true; - } - SwiftTO swift = _swiftMgr.getSwiftTO(snapshot.getSwiftId()); - S3TO s3 = _s3Mgr.getS3TO(); - - checkObjectStorageConfiguration(swift, s3); - - DeleteSnapshotBackupCommand cmd = new DeleteSnapshotBackupCommand( - swift, s3, secondaryStoragePoolUrl, dcId, accountId, volumeId, - backupOfSnapshot, false); - Answer answer = _agentMgr.sendToSSVM(dcId, cmd); - - if ((answer != null) && answer.getResult()) { - snapshot.setBackupSnapshotId(null); - _snapshotDao.update(snapshotId, snapshot); - success = true; - details = "Successfully deleted snapshot " + snapshotId + " for volumeId: " + volumeId; - s_logger.debug(details); - } else if (answer != null) { - details = "Failed to destroy snapshot id:" + snapshotId + " for volume: " + volumeId + " due to "; - if (answer.getDetails() != null) { - details += answer.getDetails(); - } - s_logger.error(details); - } - return success; - - } - @Override public Pair, Integer> listSnapshots(ListSnapshotsCmd cmd) { Long volumeId = cmd.getVolumeId(); @@ -1118,7 +743,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, List snapshots = listSnapsforVolume(volumeId); for (SnapshotVO snapshot : snapshots) { if (_snapshotDao.expunge(snapshot.getId())) { - if (snapshot.getType() == Type.MANUAL) { + if (snapshot.getRecurringType() == Type.MANUAL) { _resourceLimitMgr.decrementResourceCount(accountId, ResourceType.snapshot); } @@ -1228,8 +853,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return policy; } - @Override - public boolean deletePolicy(long userId, Long policyId) { + protected boolean deletePolicy(long userId, Long policyId) { SnapshotPolicyVO snapshotPolicy = _snapshotPolicyDao.findById(policyId); _snapSchedMgr.removeSchedule(snapshotPolicy.getVolumeId(), snapshotPolicy.getId()); return _snapshotPolicyDao.remove(policyId); @@ -1247,31 +871,16 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return new Pair, Integer>(result.first(), result.second()); } - @Override - public List listPoliciesforVolume(long volumeId) { + + private List listPoliciesforVolume(long volumeId) { return _snapshotPolicyDao.listByVolumeId(volumeId); } - - @Override - public List listPoliciesforSnapshot(long snapshotId) { - SearchCriteria sc = PoliciesForSnapSearch.create(); - sc.setJoinParameters("policyRef", "snapshotId", snapshotId); - return _snapshotPolicyDao.search(sc, null); - } - - @Override - public List listSnapsforPolicy(long policyId, Filter filter) { - SearchCriteria sc = PolicySnapshotSearch.create(); - sc.setJoinParameters("policy", "policyId", policyId); - return _snapshotDao.search(sc, filter); - } - - @Override - public List listSnapsforVolume(long volumeId) { + + private List listSnapsforVolume(long volumeId) { return _snapshotDao.listByVolumeId(volumeId); } - public List listSnapsforVolumeType(long volumeId, Type type) { + private List listSnapsforVolumeType(long volumeId, Type type) { return _snapshotDao.listByVolumeIdType(volumeId, type); } @@ -1290,9 +899,6 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } } - /** - * {@inheritDoc} - */ @Override public List findRecurringSnapshotSchedule(ListRecurringSnapshotScheduleCmd cmd) { Long volumeId = cmd.getVolumeId(); @@ -1331,12 +937,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return snapshotSchedules; } - @Override - public SnapshotPolicyVO getPolicyForVolume(long volumeId) { - return _snapshotPolicyDao.findOneByVolume(volumeId); - } - - public Type getSnapshotType(Long policyId) { + private Type getSnapshotType(Long policyId) { if (policyId.equals(Snapshot.MANUAL_POLICY_ID)) { return Type.MANUAL; } else { @@ -1346,7 +947,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } } - public Type getSnapshotType(IntervalType intvType) { + private Type getSnapshotType(IntervalType intvType) { if (intvType.equals(IntervalType.HOURLY)) { return Type.HOURLY; } else if (intvType.equals(IntervalType.DAILY)) { @@ -1446,15 +1047,11 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, Type.DAILY.setMax(NumbersUtil.parseInt(_configDao.getValue("snapshot.max.daily"), DAILYMAX)); Type.WEEKLY.setMax(NumbersUtil.parseInt(_configDao.getValue("snapshot.max.weekly"), WEEKLYMAX)); Type.MONTHLY.setMax(NumbersUtil.parseInt(_configDao.getValue("snapshot.max.monthly"), MONTHLYMAX)); - _deltaSnapshotMax = NumbersUtil.parseInt(_configDao.getValue("snapshot.delta.max"), DELTAMAX); _totalRetries = NumbersUtil.parseInt(_configDao.getValue("total.retries"), 4); _pauseInterval = 2 * NumbersUtil.parseInt(_configDao.getValue("ping.interval"), 60); s_logger.info("Snapshot Manager is configured."); - _snapshotFsm = Snapshot.State.getStateMachine(); - _snapshotFsm.registerListener(new SnapshotStateListener()); - return true; } @@ -1515,27 +1112,9 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return success; } - - private boolean hostSupportSnapsthot(HostVO host) { - if (host.getHypervisorType() != HypervisorType.KVM) { - return true; - } - // Determine host capabilities - String caps = host.getCapabilities(); - - if (caps != null) { - String[] tokens = caps.split(","); - for (String token : tokens) { - if (token.contains("snapshot")) { - return true; - } - } - } - return false; - } @Override - public boolean canOperateOnVolume(VolumeVO volume) { + public boolean canOperateOnVolume(Volume volume) { List snapshots = _snapshotDao.listByStatus(volume.getId(), Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp); if (snapshots.size() > 0) { @@ -1543,8 +1122,4 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } return true; } - - protected boolean stateTransitTo(Snapshot snapshot, Snapshot.Event e) throws NoTransitionException { - return _snapshotFsm.transitTo(snapshot, e, null, _snapshotDao); - } } diff --git a/server/src/com/cloud/storage/upload/UploadMonitor.java b/server/src/com/cloud/storage/upload/UploadMonitor.java index aada1f43c41..1c3590e91e2 100755 --- a/server/src/com/cloud/storage/upload/UploadMonitor.java +++ b/server/src/com/cloud/storage/upload/UploadMonitor.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.storage.upload; + import com.cloud.async.AsyncJobManager; import com.cloud.host.HostVO; import com.cloud.storage.Upload.Mode; diff --git a/server/src/com/cloud/tags/dao/ResourceTagsDaoImpl.java b/server/src/com/cloud/tags/dao/ResourceTagsDaoImpl.java index 97639564967..a8e1393d6da 100644 --- a/server/src/com/cloud/tags/dao/ResourceTagsDaoImpl.java +++ b/server/src/com/cloud/tags/dao/ResourceTagsDaoImpl.java @@ -17,6 +17,7 @@ package com.cloud.tags.dao; import java.util.List; + import javax.ejb.Local; import org.springframework.stereotype.Component; diff --git a/server/src/com/cloud/template/HyervisorTemplateAdapter.java b/server/src/com/cloud/template/HypervisorTemplateAdapter.java similarity index 86% rename from server/src/com/cloud/template/HyervisorTemplateAdapter.java rename to server/src/com/cloud/template/HypervisorTemplateAdapter.java index fe6bc2a86f0..142642193b6 100755 --- a/server/src/com/cloud/template/HyervisorTemplateAdapter.java +++ b/server/src/com/cloud/template/HypervisorTemplateAdapter.java @@ -22,12 +22,22 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.UnknownHostException; import java.util.List; +import java.util.concurrent.ExecutionException; import javax.ejb.Local; import javax.inject.Inject; import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; +import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd; +import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageService; +import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -43,9 +53,9 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.host.HostVO; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; +import com.cloud.storage.TemplateProfile; import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; -import com.cloud.storage.TemplateProfile; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateZoneVO; import com.cloud.storage.download.DownloadMonitor; @@ -53,6 +63,8 @@ import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.user.Account; import com.cloud.utils.db.DB; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.UserVmVO; + import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd; @@ -63,14 +75,18 @@ import javax.ejb.Local; import java.net.*; import java.util.List; -@Component @Local(value=TemplateAdapter.class) -public class HyervisorTemplateAdapter extends TemplateAdapterBase implements TemplateAdapter { - private final static Logger s_logger = Logger.getLogger(HyervisorTemplateAdapter.class); +public class HypervisorTemplateAdapter extends TemplateAdapterBase implements TemplateAdapter { + private final static Logger s_logger = Logger.getLogger(HypervisorTemplateAdapter.class); @Inject DownloadMonitor _downloadMonitor; @Inject SecondaryStorageVmManager _ssvmMgr; @Inject AgentManager _agentMgr; + @Inject DataStoreManager storeMgr; + @Inject ImageService imageService; + @Inject ImageDataFactory imageFactory; + @Inject TemplateManager templateMgr; + @Override public String getName() { return TemplateAdapterType.Hypervisor.getName(); @@ -79,7 +95,7 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem private String validateUrl(String url) { try { URI uri = new URI(url); - if ((uri.getScheme() == null) || (!uri.getScheme().equalsIgnoreCase("http") + if ((uri.getScheme() == null) || (!uri.getScheme().equalsIgnoreCase("http") && !uri.getScheme().equalsIgnoreCase("https") && !uri.getScheme().equalsIgnoreCase("file"))) { throw new IllegalArgumentException("Unsupported scheme for url: " + url); } @@ -100,34 +116,34 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem } catch (UnknownHostException uhe) { throw new IllegalArgumentException("Unable to resolve " + host); } - + return uri.toString(); } catch (URISyntaxException e) { throw new IllegalArgumentException("Invalid URL " + url); } } - + @Override public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationException { TemplateProfile profile = super.prepare(cmd); String url = profile.getUrl(); - + if((!url.toLowerCase().endsWith("iso"))&&(!url.toLowerCase().endsWith("iso.zip"))&&(!url.toLowerCase().endsWith("iso.bz2")) &&(!url.toLowerCase().endsWith("iso.gz"))){ throw new InvalidParameterValueException("Please specify a valid iso"); } - + profile.setUrl(validateUrl(url)); return profile; } - + @Override public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocationException { TemplateProfile profile = super.prepare(cmd); String url = profile.getUrl(); - + if((!url.toLowerCase().endsWith("vhd"))&&(!url.toLowerCase().endsWith("vhd.zip")) - &&(!url.toLowerCase().endsWith("vhd.bz2"))&&(!url.toLowerCase().endsWith("vhd.gz")) + &&(!url.toLowerCase().endsWith("vhd.bz2"))&&(!url.toLowerCase().endsWith("vhd.gz")) &&(!url.toLowerCase().endsWith("qcow2"))&&(!url.toLowerCase().endsWith("qcow2.zip")) &&(!url.toLowerCase().endsWith("qcow2.bz2"))&&(!url.toLowerCase().endsWith("qcow2.gz")) &&(!url.toLowerCase().endsWith("ova"))&&(!url.toLowerCase().endsWith("ova.zip")) @@ -135,40 +151,51 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem &&(!url.toLowerCase().endsWith("img"))&&(!url.toLowerCase().endsWith("raw"))){ throw new InvalidParameterValueException("Please specify a valid "+ cmd.getFormat().toLowerCase()); } - + if ((cmd.getFormat().equalsIgnoreCase("vhd") && (!url.toLowerCase().endsWith("vhd") && !url.toLowerCase().endsWith("vhd.zip") && !url.toLowerCase().endsWith("vhd.bz2") && !url.toLowerCase().endsWith("vhd.gz") )) || (cmd.getFormat().equalsIgnoreCase("qcow2") && (!url.toLowerCase().endsWith("qcow2") && !url.toLowerCase().endsWith("qcow2.zip") && !url.toLowerCase().endsWith("qcow2.bz2") && !url.toLowerCase().endsWith("qcow2.gz") )) || (cmd.getFormat().equalsIgnoreCase("ova") && (!url.toLowerCase().endsWith("ova") && !url.toLowerCase().endsWith("ova.zip") && !url.toLowerCase().endsWith("ova.bz2") && !url.toLowerCase().endsWith("ova.gz"))) || (cmd.getFormat().equalsIgnoreCase("raw") && (!url.toLowerCase().endsWith("img") && !url.toLowerCase().endsWith("raw")))) { throw new InvalidParameterValueException("Please specify a valid URL. URL:" + url + " is an invalid for the format " + cmd.getFormat().toLowerCase()); } - + profile.setUrl(validateUrl(url)); return profile; } - + @Override public VMTemplateVO create(TemplateProfile profile) { VMTemplateVO template = persistTemplate(profile); - + if (template == null) { throw new CloudRuntimeException("Unable to persist the template " + profile.getTemplate()); } + + DataStore imageStore = this.storeMgr.getDataStore(profile.getImageStoreId(), DataStoreRole.Image); - _downloadMonitor.downloadTemplateToStorage(template, profile.getZoneId()); + AsyncCallFuture future = this.imageService.createTemplateAsync(this.imageFactory.getTemplate(template.getId()), imageStore); + try { + future.get(); + } catch (InterruptedException e) { + s_logger.debug("create template Failed", e); + throw new CloudRuntimeException("create template Failed", e); + } catch (ExecutionException e) { + s_logger.debug("create template Failed", e); + throw new CloudRuntimeException("create template Failed", e); + } _resourceLimitMgr.incrementResourceCount(profile.getAccountId(), ResourceType.template); - + return template; } @Override @DB public boolean delete(TemplateProfile profile) { boolean success = true; - + VMTemplateVO template = (VMTemplateVO)profile.getTemplate(); Long zoneId = profile.getZoneId(); Long templateId = template.getId(); - + String zoneName; List secondaryStorageHosts; if (!template.isCrossZones() && zoneId != null) { @@ -179,9 +206,9 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem zoneName = "(all zones)"; secondaryStorageHosts = _ssvmMgr.listSecondaryStorageHostsInAllZones(); } - + s_logger.debug("Attempting to mark template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); - + // Make sure the template is downloaded to all the necessary secondary storage hosts for (HostVO secondaryStorageHost : secondaryStorageHosts) { long hostId = secondaryStorageHost.getId(); @@ -194,16 +221,16 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem } } } - + Account account = _accountDao.findByIdIncludingRemoved(template.getAccountId()); String eventType = ""; - + if (template.getFormat().equals(ImageFormat.ISO)){ eventType = EventTypes.EVENT_ISO_DELETE; } else { eventType = EventTypes.EVENT_TEMPLATE_DELETE; } - + // Iterate through all necessary secondary storage hosts and mark the template on each host as destroyed for (HostVO secondaryStorageHost : secondaryStorageHosts) { long hostId = secondaryStorageHost.getId(); @@ -221,6 +248,9 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem templateHostVO.setDestroyed(true); _tmpltHostDao.update(templateHostVO.getId(), templateHostVO); String installPath = templateHostVO.getInstallPath(); + List userVmUsingIso = _userVmDao.listByIsoId(templateId); + //check if there is any VM using this ISO. + if (userVmUsingIso == null || userVmUsingIso.isEmpty()) { if (installPath != null) { Answer answer = _agentMgr.sendToSecStorage(secondaryStorageHost, new DeleteTemplateCommand(secondaryStorageHost.getStorageUrl(), installPath)); @@ -232,9 +262,10 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem } } else { _tmpltHostDao.remove(templateHostVO.getId()); + } } VMTemplateZoneVO templateZone = _tmpltZoneDao.findByZoneTemplate(sZoneId, templateId); - + if (templateZone != null) { _tmpltZoneDao.remove(templateZone.getId()); } @@ -244,18 +275,18 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem } } } - + if (!success) { break; } } - + s_logger.debug("Successfully marked template host refs for template: " + template.getName() + " as destroyed in zone: " + zoneName); - + // If there are no more non-destroyed template host entries for this template, delete it if (success && (_tmpltHostDao.listByTemplateId(templateId).size() == 0)) { long accountId = template.getAccountId(); - + VMTemplateVO lock = _tmpltDao.acquireInLockTable(templateId); try { @@ -272,18 +303,18 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem _tmpltDao.releaseFromLockTable(lock.getId()); } } - + s_logger.debug("Removed template: " + template.getName() + " because all of its template host refs were marked as destroyed."); } - + return success; } - + public TemplateProfile prepareDelete(DeleteTemplateCmd cmd) { TemplateProfile profile = super.prepareDelete(cmd); VMTemplateVO template = (VMTemplateVO)profile.getTemplate(); Long zoneId = profile.getZoneId(); - + if (template.getTemplateType() == TemplateType.SYSTEM) { throw new InvalidParameterValueException("The DomR template cannot be deleted."); } @@ -291,18 +322,18 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem if (zoneId != null && (_ssvmMgr.findSecondaryStorageHost(zoneId) == null)) { throw new InvalidParameterValueException("Failed to find a secondary storage host in the specified zone."); } - + return profile; } - + public TemplateProfile prepareDelete(DeleteIsoCmd cmd) { TemplateProfile profile = super.prepareDelete(cmd); Long zoneId = profile.getZoneId(); - + if (zoneId != null && (_ssvmMgr.findSecondaryStorageHost(zoneId) == null)) { throw new InvalidParameterValueException("Failed to find a secondary storage host in the specified zone."); } - + return profile; } } diff --git a/server/src/com/cloud/template/TemplateAdapter.java b/server/src/com/cloud/template/TemplateAdapter.java index 19cfef039de..1f8f491cb25 100755 --- a/server/src/com/cloud/template/TemplateAdapter.java +++ b/server/src/com/cloud/template/TemplateAdapter.java @@ -22,6 +22,7 @@ import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd; import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; + import com.cloud.exception.ResourceAllocationException; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.TemplateProfile; @@ -65,5 +66,5 @@ public interface TemplateAdapter extends Adapter { public TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, Long zoneId, HypervisorType hypervisorType, - String chksum, Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshKeyEnabled) throws ResourceAllocationException; + String chksum, Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshKeyEnabled, String imageStoreUuid) throws ResourceAllocationException; } diff --git a/server/src/com/cloud/template/TemplateAdapterBase.java b/server/src/com/cloud/template/TemplateAdapterBase.java index fa677acdc5c..1b114250621 100755 --- a/server/src/com/cloud/template/TemplateAdapterBase.java +++ b/server/src/com/cloud/template/TemplateAdapterBase.java @@ -20,16 +20,18 @@ import java.util.List; import java.util.Map; import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; -import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; -import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; -import org.apache.log4j.Logger; import org.apache.cloudstack.api.ApiConstants; -import com.cloud.api.ApiDBUtils; +import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; +import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd; +import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.log4j.Logger; + +import com.cloud.api.ApiDBUtils; import com.cloud.configuration.Resource.ResourceType; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.DataCenterVO; @@ -43,9 +45,9 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.org.Grouping; import com.cloud.storage.GuestOS; -import com.cloud.storage.TemplateProfile; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; +import com.cloud.storage.TemplateProfile; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateHostDao; @@ -61,6 +63,7 @@ import com.cloud.utils.EnumUtils; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.UserVmVO; +import com.cloud.vm.dao.UserVmDao; public abstract class TemplateAdapterBase extends AdapterBase implements TemplateAdapter { private final static Logger s_logger = Logger.getLogger(TemplateAdapterBase.class); @@ -75,7 +78,10 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat protected @Inject VMTemplateZoneDao _tmpltZoneDao; protected @Inject UsageEventDao _usageEventDao; protected @Inject HostDao _hostDao; + protected @Inject UserVmDao _userVmDao; protected @Inject ResourceLimitService _resourceLimitMgr; + protected @Inject DataStoreManager storeMgr; + @Inject TemplateManager templateMgr; @Override public boolean stop() { @@ -94,16 +100,26 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat Boolean isExtractable, String format, Long guestOSId, Long zoneId, HypervisorType hypervisorType, String accountName, Long domainId, String chksum, Boolean bootable, Map details) throws ResourceAllocationException { return prepare(isIso, userId, name, displayText, bits, passwordEnabled, requiresHVM, url, isPublic, featured, isExtractable, format, guestOSId, zoneId, hypervisorType, - chksum, bootable, null, null, details, false); + chksum, bootable, null, null, details, false, null); } public TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, Long zoneId, HypervisorType hypervisorType, - String chksum, Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshkeyEnabled) throws ResourceAllocationException { + String chksum, Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshkeyEnabled, + String imageStoreUuid) throws ResourceAllocationException { //Long accountId = null; // parameters verification + String storeUuid = imageStoreUuid; + if (storeUuid != null) { + DataStore store = this.storeMgr.getDataStore(storeUuid, DataStoreRole.Image); + if (store == null) { + throw new InvalidParameterValueException("invalide image store uuid" + storeUuid); + } + + } + if (isPublic == null) { isPublic = Boolean.FALSE; } @@ -197,10 +213,16 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat } } + DataStore imageStore = this.templateMgr.getImageStore(imageStoreUuid, zoneId); + if (imageStore == null) { + throw new IllegalArgumentException("Cann't find an image store"); + } + Long imageStoreId = imageStore.getId(); + Long id = _tmpltDao.getNextInSequence(Long.class, "id"); UserContext.current().setEventDetails("Id: " +id+ " name: " + name); return new TemplateProfile(id, userId, name, displayText, bits, passwordEnabled, requiresHVM, url, isPublic, - featured, isExtractable, imgfmt, guestOSId, zoneId, hypervisorType, templateOwner.getAccountName(), templateOwner.getDomainId(), templateOwner.getAccountId(), chksum, bootable, templateTag, details, sshkeyEnabled); + featured, isExtractable, imgfmt, guestOSId, zoneId, hypervisorType, templateOwner.getAccountName(), templateOwner.getDomainId(), templateOwner.getAccountId(), chksum, bootable, templateTag, details, sshkeyEnabled, imageStoreId); } @Override @@ -210,10 +232,12 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat Account owner = _accountMgr.getAccount(cmd.getEntityOwnerId()); _accountMgr.checkAccess(caller, null, true, owner); + + return prepare(false, UserContext.current().getCallerUserId(), cmd.getTemplateName(), cmd.getDisplayText(), cmd.getBits(), cmd.isPasswordEnabled(), cmd.getRequiresHvm(), cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), cmd.getFormat(), cmd.getOsTypeId(), cmd.getZoneId(), HypervisorType.getType(cmd.getHypervisor()), - cmd.getChecksum(), true, cmd.getTemplateTag(), owner, cmd.getDetails(), cmd.isSshKeyEnabled()); + cmd.getChecksum(), true, cmd.getTemplateTag(), owner, cmd.getDetails(), cmd.isSshKeyEnabled(), cmd.getImageStoreUuid()); } public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationException { @@ -224,7 +248,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat return prepare(true, UserContext.current().getCallerUserId(), cmd.getIsoName(), cmd.getDisplayText(), 64, false, true, cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), ImageFormat.ISO.toString(), cmd.getOsTypeId(), - cmd.getZoneId(), HypervisorType.None, cmd.getChecksum(), cmd.isBootable(), null, owner, null, false); + cmd.getZoneId(), HypervisorType.None, cmd.getChecksum(), cmd.isBootable(), null, owner, null, false, cmd.getImageStoreUuid()); } protected VMTemplateVO persistTemplate(TemplateProfile profile) { @@ -234,7 +258,8 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat profile.getBits(), profile.getAccountId(), profile.getCheckSum(), profile.getDisplayText(), profile.getPasswordEnabled(), profile.getGuestOsId(), profile.getBootable(), profile.getHypervisorType(), profile.getTemplateTag(), profile.getDetails(), profile.getSshKeyEnabled()); - + + template.setImageDataStoreId(profile.getImageStoreId()); if (zoneId == null || zoneId.longValue() == -1) { List dcs = _dcDao.listAll(); diff --git a/server/src/com/cloud/template/TemplateManager.java b/server/src/com/cloud/template/TemplateManager.java index ad145a911bf..19ba3b52734 100755 --- a/server/src/com/cloud/template/TemplateManager.java +++ b/server/src/com/cloud/template/TemplateManager.java @@ -18,16 +18,19 @@ package com.cloud.template; import java.util.List; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + import com.cloud.dc.DataCenterVO; import com.cloud.exception.InternalErrorException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.HostVO; import com.cloud.storage.StoragePool; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateVO; +import com.cloud.utils.Pair; /** * TemplateManager manages the templates stored on secondary storage. It is responsible for creating private/public templates. @@ -91,4 +94,27 @@ public interface TemplateManager extends TemplateService{ VMTemplateHostVO prepareISOForCreate(VMTemplateVO template, StoragePool pool); + + VMTemplateHostVO findVmTemplateHost(long templateId, + StoragePool pool); + + Pair getAbsoluteIsoPath(long templateId, long dataCenterId); + + String getSecondaryStorageURL(long zoneId); + + HostVO getSecondaryStorageHost(long zoneId, long tmpltId); + + VMTemplateHostVO getTemplateHostRef(long zoneId, long tmpltId, + boolean readyOnly); + + HostVO getSecondaryStorageHost(long zoneId); + + List getSecondaryStorageHosts(long zoneId); + + Long getTemplateSize(long templateId, long zoneId); + + DataStore getImageStore(String storeUuid, Long zoneId); + + String getChecksum(Long hostId, String templatePath); + } diff --git a/server/src/com/cloud/template/TemplateManagerImpl.java b/server/src/com/cloud/template/TemplateManagerImpl.java index f9cf277842d..d843dbc9b8d 100755 --- a/server/src/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/com/cloud/template/TemplateManagerImpl.java @@ -26,6 +26,9 @@ import java.util.Collections; import java.util.Date; import java.util.List; import java.util.Map; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -35,22 +38,51 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.BaseListTemplateOrIsoPermissionsCmd; import org.apache.cloudstack.api.BaseUpdateTemplateOrIsoPermissionsCmd; -import org.apache.cloudstack.api.command.user.iso.*; -import org.apache.cloudstack.api.command.user.template.*; +import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; +import org.apache.cloudstack.api.command.user.iso.ExtractIsoCmd; +import org.apache.cloudstack.api.command.user.iso.ListIsoPermissionsCmd; +import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; +import org.apache.cloudstack.api.command.user.iso.UpdateIsoPermissionsCmd; +import org.apache.cloudstack.api.command.user.template.CopyTemplateCmd; +import org.apache.cloudstack.api.command.user.template.CreateTemplateCmd; +import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd; +import org.apache.cloudstack.api.command.user.template.ExtractTemplateCmd; +import org.apache.cloudstack.api.command.user.template.ListTemplatePermissionsCmd; +import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; +import org.apache.cloudstack.api.command.user.template.UpdateTemplatePermissionsCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.ImageService; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import org.apache.cloudstack.acl.SecurityChecker.AccessType; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.AttachIsoCommand; +import com.cloud.agent.api.ComputeChecksumCommand; import com.cloud.agent.api.downloadTemplateFromSwiftToSecondaryStorageCommand; +import com.cloud.agent.api.uploadTemplateToSwiftFromSecondaryStorageCommand; import com.cloud.agent.api.storage.DestroyCommand; import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer; import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand; import com.cloud.agent.api.to.SwiftTO; -import com.cloud.agent.api.uploadTemplateToSwiftFromSecondaryStorageCommand; + +import com.cloud.api.ApiDBUtils; import com.cloud.async.AsyncJobManager; import com.cloud.async.AsyncJobVO; import com.cloud.configuration.Config; @@ -64,12 +96,14 @@ import com.cloud.domain.dao.DomainDao; import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; +import com.cloud.event.UsageEventVO; import com.cloud.event.dao.EventDao; import com.cloud.event.dao.UsageEventDao; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.StorageUnavailableException; +import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; @@ -77,34 +111,71 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorGuruManager; import com.cloud.projects.Project; import com.cloud.projects.ProjectManager; -import com.cloud.storage.*; + +import com.cloud.resource.ResourceManager; +import com.cloud.storage.GuestOSVO; +import com.cloud.storage.LaunchPermissionVO; +import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage; + import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.StoragePoolVO; import com.cloud.storage.TemplateProfile; import com.cloud.storage.Upload; import com.cloud.storage.Upload.Type; + +import com.cloud.storage.UploadVO; +import com.cloud.storage.VMTemplateHostVO; +import com.cloud.storage.VMTemplateS3VO; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; -import com.cloud.storage.dao.*; +import com.cloud.storage.VMTemplateSwiftVO; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VMTemplateZoneVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeManager; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.LaunchPermissionDao; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.UploadDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateDetailsDao; +import com.cloud.storage.dao.VMTemplateHostDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VMTemplateS3Dao; +import com.cloud.storage.dao.VMTemplateSwiftDao; +import com.cloud.storage.dao.VMTemplateZoneDao; +import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.download.DownloadMonitor; import com.cloud.storage.s3.S3Manager; import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.storage.swift.SwiftManager; import com.cloud.storage.upload.UploadMonitor; import com.cloud.template.TemplateAdapter.TemplateAdapterType; -import com.cloud.user.*; + +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountService; +import com.cloud.user.AccountVO; +import com.cloud.user.ResourceLimitService; +import com.cloud.user.User; +import com.cloud.user.UserContext; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserAccountDao; import com.cloud.user.dao.UserDao; import com.cloud.uservm.UserVm; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.component.ManagerBase; - import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.*; import com.cloud.utils.exception.CloudRuntimeException; @@ -123,8 +194,10 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, @Inject VMTemplateHostDao _tmpltHostDao; @Inject VMTemplatePoolDao _tmpltPoolDao; @Inject VMTemplateZoneDao _tmpltZoneDao; + @Inject + protected VMTemplateDetailsDao _templateDetailsDao; @Inject VMInstanceDao _vmInstanceDao; - @Inject StoragePoolDao _poolDao; + @Inject PrimaryDataStoreDao _poolDao; @Inject StoragePoolHostDao _poolHostDao; @Inject EventDao _eventDao; @Inject DownloadMonitor _downloadMonitor; @@ -153,6 +226,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, ClusterDao _clusterDao; @Inject DomainDao _domainDao; @Inject UploadDao _uploadDao; + @Inject + protected GuestOSDao _guestOSDao; long _routerTemplateId = -1; @Inject StorageManager _storageMgr; @Inject AsyncJobManager _asyncMgr; @@ -164,6 +239,20 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, @Inject SecondaryStorageVmManager _ssvmMgr; @Inject LaunchPermissionDao _launchPermissionDao; @Inject ProjectManager _projectMgr; + @Inject + VolumeDataFactory volFactory; + @Inject + ImageDataFactory tmplFactory; + @Inject + SnapshotDataFactory snapshotFactory; + @Inject + ImageService imageSvr; + @Inject + DataStoreManager dataStoreMgr; + @Inject + protected ResourceManager _resourceMgr; + @Inject VolumeManager volumeMgr; + @Inject VMTemplateHostDao templateHostDao; int _primaryStorageDownloadWait; @@ -217,7 +306,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, if(!_accountService.isRootAdmin(account.getType())){ throw new PermissionDeniedException("Parameter templatetag can only be specified by a Root Admin, permission denied"); } - } + } + TemplateAdapter adapter = getAdapter(HypervisorType.getType(cmd.getHypervisor())); TemplateProfile profile = adapter.prepare(cmd); VMTemplateVO template = adapter.create(profile); @@ -228,6 +318,22 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, throw new CloudRuntimeException("Failed to create a template"); } } + + @Override + public DataStore getImageStore(String storeUuid, Long zoneId) { + DataStore imageStore = null; + if (storeUuid != null) { + imageStore = this.dataStoreMgr.getDataStore(storeUuid, DataStoreRole.Image); + } else { + List stores = this.dataStoreMgr.getImageStores(new ZoneScope(zoneId)); + if (stores.size() > 1) { + throw new CloudRuntimeException("multiple image stores, don't know which one to use"); + } + imageStore = stores.get(0); + } + + return imageStore; + } @Override @ActionEvent(eventType = EventTypes.EVENT_ISO_EXTRACT, eventDescription = "extracting ISO", async = true) @@ -330,7 +436,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, _accountMgr.checkAccess(caller, AccessType.ModifyEntry, true, template); - List sservers = _storageMgr.getSecondaryStorageHosts(zoneId); + List sservers = getSecondaryStorageHosts(zoneId); VMTemplateHostVO tmpltHostRef = null; if (sservers != null) { @@ -425,7 +531,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, private void reallyRun() { s_logger.info("Start to preload template " + template.getId() + " into primary storage " + pool.getId()); - prepareTemplateForCreate(template, pool); + StoragePool pol = (StoragePool)dataStoreMgr.getPrimaryDataStore(pool.getId()); + prepareTemplateForCreate(template, pol); s_logger.info("End of preloading template " + template.getId() + " into primary storage " + pool.getId()); } }); @@ -539,8 +646,8 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } @Override @DB - public VMTemplateStoragePoolVO prepareTemplateForCreate(VMTemplateVO template, StoragePool pool) { - template = _tmpltDao.findById(template.getId(), true); + public VMTemplateStoragePoolVO prepareTemplateForCreate(VMTemplateVO templ, StoragePool pool) { + VMTemplateVO template = _tmpltDao.findById(templ.getId(), true); long poolId = pool.getId(); long templateId = template.getId(); @@ -564,7 +671,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } } - templateHostRef = _storageMgr.findVmTemplateHost(templateId, pool); + templateHostRef = findVmTemplateHost(templateId, pool); if (templateHostRef == null || templateHostRef.getDownloadState() != Status.DOWNLOADED) { String result = downloadTemplateFromSwiftToSecondaryStorage(dcId, templateId); @@ -578,7 +685,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, s_logger.error("Unable to find a secondary storage host who has completely downloaded the template."); return null; } - templateHostRef = _storageMgr.findVmTemplateHost(templateId, pool); + templateHostRef = findVmTemplateHost(templateId, pool); if (templateHostRef == null || templateHostRef.getDownloadState() != Status.DOWNLOADED) { s_logger.error("Unable to find a secondary storage host who has completely downloaded the template."); return null; @@ -671,6 +778,61 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, return null; } + + + + @Override + public VMTemplateHostVO findVmTemplateHost(long templateId, + StoragePool pool) { + long dcId = pool.getDataCenterId(); + Long podId = pool.getPodId(); + + List secHosts = _ssvmMgr + .listSecondaryStorageHostsInOneZone(dcId); + + + if (secHosts.size() == 1) { + VMTemplateHostVO templateHostVO = this._tmpltHostDao + .findByHostTemplate(secHosts.get(0).getId(), templateId); + return templateHostVO; + } + if (podId != null) { + List templHosts = this._tmpltHostDao + .listByTemplateStatus(templateId, dcId, podId, + VMTemplateStorageResourceAssoc.Status.DOWNLOADED); + if (templHosts != null && !templHosts.isEmpty()) { + Collections.shuffle(templHosts); + return templHosts.get(0); + } + } + List templHosts = this._tmpltHostDao + .listByTemplateStatus(templateId, dcId, + VMTemplateStorageResourceAssoc.Status.DOWNLOADED); + if (templHosts != null && !templHosts.isEmpty()) { + Collections.shuffle(templHosts); + return templHosts.get(0); + } + return null; + } + + @Override + public String getChecksum(Long hostId, String templatePath) { + HostVO ssHost = _hostDao.findById(hostId); + Host.Type type = ssHost.getType(); + if (type != Host.Type.SecondaryStorage + && type != Host.Type.LocalSecondaryStorage) { + return null; + } + String secUrl = ssHost.getStorageUrl(); + Answer answer; + answer = _agentMgr.sendToSecStorage(ssHost, new ComputeChecksumCommand( + secUrl, templatePath)); + if (answer != null && answer.getResult()) { + return answer.getDetails(); + } + return null; + } + @Override @DB public VMTemplateHostVO prepareISOForCreate(VMTemplateVO template, StoragePool pool) { @@ -684,7 +846,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, long templateStoragePoolRefId; String origUrl = null; - templateHostRef = _storageMgr.findVmTemplateHost(templateId, pool); + templateHostRef = findVmTemplateHost(templateId, pool); if (templateHostRef == null || templateHostRef.getDownloadState() != Status.DOWNLOADED) { String result = downloadTemplateFromSwiftToSecondaryStorage(dcId, templateId); @@ -698,7 +860,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, s_logger.error("Unable to find a secondary storage host who has completely downloaded the template."); return null; } - templateHostRef = _storageMgr.findVmTemplateHost(templateId, pool); + templateHostRef = findVmTemplateHost(templateId, pool); if (templateHostRef == null || templateHostRef.getDownloadState() != Status.DOWNLOADED) { s_logger.error("Unable to find a secondary storage host who has completely downloaded the template."); return null; @@ -839,13 +1001,13 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, throw new InvalidParameterValueException("Unable to find template with id"); } - HostVO dstSecHost = _storageMgr.getSecondaryStorageHost(destZoneId, templateId); + HostVO dstSecHost = getSecondaryStorageHost(destZoneId, templateId); if ( dstSecHost != null ) { s_logger.debug("There is template " + templateId + " in secondary storage " + dstSecHost.getId() + " in zone " + destZoneId + " , don't need to copy"); return template; } - HostVO srcSecHost = _storageMgr.getSecondaryStorageHost(sourceZoneId, templateId); + HostVO srcSecHost = getSecondaryStorageHost(sourceZoneId, templateId); if ( srcSecHost == null ) { throw new InvalidParameterValueException("There is no template " + templateId + " in zone " + sourceZoneId ); } @@ -900,7 +1062,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, @Override public void evictTemplateFromStoragePool(VMTemplateStoragePoolVO templatePoolVO) { - StoragePoolVO pool = _poolDao.findById(templatePoolVO.getPoolId()); + StoragePool pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(templatePoolVO.getPoolId()); VMTemplateVO template = _tmpltDao.findByIdIncludingRemoved(templatePoolVO.getTemplateId()); @@ -1065,32 +1227,37 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } @Override - public boolean templateIsDeleteable(VMTemplateHostVO templateHostRef) { - VMTemplateVO template = _tmpltDao.findByIdIncludingRemoved(templateHostRef.getTemplateId()); - long templateId = template.getId(); - HostVO secondaryStorageHost = _hostDao.findById(templateHostRef.getHostId()); - long zoneId = secondaryStorageHost.getDataCenterId(); - DataCenterVO zone = _dcDao.findById(zoneId); - - // Check if there are VMs running in the template host ref's zone that use the template - List nonExpungedVms = _vmInstanceDao.listNonExpungedByZoneAndTemplate(zoneId, templateId); - - if (!nonExpungedVms.isEmpty()) { - s_logger.debug("Template " + template.getName() + " in zone " + zone.getName() + " is not deleteable because there are non-expunged VMs deployed from this template."); - return false; - } - - // Check if there are any snapshots for the template in the template host ref's zone - List volumes = _volumeDao.findByTemplateAndZone(templateId, zoneId); - for (VolumeVO volume : volumes) { - List snapshots = _snapshotDao.listByVolumeIdVersion(volume.getId(), "2.1"); - if (!snapshots.isEmpty()) { - s_logger.debug("Template " + template.getName() + " in zone " + zone.getName() + " is not deleteable because there are 2.1 snapshots using this template."); - return false; - } - } - - return true; + public boolean templateIsDeleteable(VMTemplateHostVO templateHostRef) { + VMTemplateVO template = _tmpltDao.findByIdIncludingRemoved(templateHostRef.getTemplateId()); + long templateId = template.getId(); + HostVO secondaryStorageHost = _hostDao.findById(templateHostRef.getHostId()); + long zoneId = secondaryStorageHost.getDataCenterId(); + DataCenterVO zone = _dcDao.findById(zoneId); + + // Check if there are VMs running in the template host ref's zone that use the template + List nonExpungedVms = _vmInstanceDao.listNonExpungedByZoneAndTemplate(zoneId, templateId); + + if (!nonExpungedVms.isEmpty()) { + s_logger.debug("Template " + template.getName() + " in zone " + zone.getName() + " is not deleteable because there are non-expunged VMs deployed from this template."); + return false; + } + List userVmUsingIso = _userVmDao.listByIsoId(templateId); + //check if there is any VM using this ISO. + if (!userVmUsingIso.isEmpty()) { + s_logger.debug("ISO " + template.getName() + " in zone " + zone.getName() + " is not deleteable because it is attached to " + userVmUsingIso.size() + " VMs"); + return false; + } + // Check if there are any snapshots for the template in the template host ref's zone + List volumes = _volumeDao.findByTemplateAndZone(templateId, zoneId); + for (VolumeVO volume : volumes) { + List snapshots = _snapshotDao.listByVolumeIdVersion(volume.getId(), "2.1"); + if (!snapshots.isEmpty()) { + s_logger.debug("Template " + template.getName() + " in zone " + zone.getName() + " is not deleteable because there are 2.1 snapshots using this template."); + return false; + } + } + + return true; } @Override @@ -1176,12 +1343,57 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, throw new CloudRuntimeException("Failed to attach iso"); } } + + private boolean attachISOToVM(long vmId, long isoId, boolean attach) { + UserVmVO vm = this._userVmDao.findById(vmId); + + if (vm == null) { + return false; + } else if (vm.getState() != State.Running) { + return true; + } + String isoPath; + VMTemplateVO tmplt = this._tmpltDao.findById(isoId); + if (tmplt == null) { + s_logger.warn("ISO: " + isoId + " does not exist"); + return false; + } + // Get the path of the ISO + Pair isoPathPair = null; + if (tmplt.getTemplateType() == TemplateType.PERHOST) { + isoPath = tmplt.getName(); + } else { + isoPathPair = getAbsoluteIsoPath(isoId, + vm.getDataCenterId()); + if (isoPathPair == null) { + s_logger.warn("Couldn't get absolute iso path"); + return false; + } else { + isoPath = isoPathPair.first(); + } + } + + String vmName = vm.getInstanceName(); + + HostVO host = _hostDao.findById(vm.getHostId()); + if (host == null) { + s_logger.warn("Host: " + vm.getHostId() + " does not exist"); + return false; + } + AttachIsoCommand cmd = new AttachIsoCommand(vmName, isoPath, attach); + if (isoPathPair != null) { + cmd.setStoreUrl(isoPathPair.second()); + } + Answer a = _agentMgr.easySend(vm.getHostId(), cmd); + + return (a != null && a.getResult()); + } private boolean attachISOToVM(long vmId, long userId, long isoId, boolean attach) { UserVmVO vm = _userVmDao.findById(vmId); VMTemplateVO iso = _tmpltDao.findById(isoId); - boolean success = _vmMgr.attachISOToVM(vmId, isoId, attach); + boolean success = attachISOToVM(vmId, isoId, attach); if ( success && attach) { vm.setIsoId(iso.getId()); _userVmDao.update(vmId, vm); @@ -1475,4 +1687,456 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, return true; } + + private String getRandomPrivateTemplateName() { + return UUID.randomUUID().toString(); + } + + + + + + @Override + @DB + @ActionEvent(eventType = EventTypes.EVENT_TEMPLATE_CREATE, eventDescription = "creating template", async = true) + public VirtualMachineTemplate createPrivateTemplate(CreateTemplateCmd command) + throws CloudRuntimeException { + Long userId = UserContext.current().getCallerUserId(); + if (userId == null) { + userId = User.UID_SYSTEM; + } + long templateId = command.getEntityId(); + Long volumeId = command.getVolumeId(); + Long snapshotId = command.getSnapshotId(); + VMTemplateVO privateTemplate = null; + Long accountId = null; + SnapshotVO snapshot = null; + + try { + TemplateInfo tmplInfo = this.tmplFactory.getTemplate(templateId); + snapshot = _snapshotDao.findById(snapshotId); + ZoneScope scope = new ZoneScope(snapshot.getDataCenterId()); + List store = this.dataStoreMgr.getImageStores(scope); + if (store.size() > 1) { + throw new CloudRuntimeException("muliple image data store, don't know which one to use"); + } + AsyncCallFuture future = null; + if (snapshotId != null) { + SnapshotInfo snapInfo = this.snapshotFactory.getSnapshot(snapshotId); + future = this.imageSvr.createTemplateFromSnapshotAsync(snapInfo, tmplInfo, store.get(0)); + } else if (volumeId != null) { + VolumeInfo volInfo = this.volFactory.getVolume(volumeId); + future = this.imageSvr.createTemplateFromVolumeAsync(volInfo, tmplInfo, store.get(0)); + } else { + throw new CloudRuntimeException( + "Creating private Template need to specify snapshotId or volumeId"); + } + + CommandResult result = null; + try { + result = future.get(); + if (result.isFailed()) { + privateTemplate = null; + s_logger.debug("Failed to create template" + result.getResult()); + throw new CloudRuntimeException("Failed to create template" + result.getResult()); + } + + privateTemplate = this._tmpltDao.findById(templateId); + UsageEventVO usageEvent = new UsageEventVO( + EventTypes.EVENT_TEMPLATE_CREATE, + privateTemplate.getAccountId(), + snapshot.getDataCenterId(), + privateTemplate.getId(), privateTemplate.getName(), + null, privateTemplate.getSourceTemplateId(), + privateTemplate.getSize()); + _usageEventDao.persist(usageEvent); + } catch (InterruptedException e) { + s_logger.debug("Failed to create template", e); + throw new CloudRuntimeException("Failed to create template", e); + } catch (ExecutionException e) { + s_logger.debug("Failed to create template", e); + throw new CloudRuntimeException("Failed to create template", e); + } + + } finally { + /*if (snapshot != null && snapshot.getSwiftId() != null + && secondaryStorageURL != null && zoneId != null + && accountId != null && volumeId != null) { + _snapshotMgr.deleteSnapshotsForVolume(secondaryStorageURL, + zoneId, accountId, volumeId); + }*/ + if (privateTemplate == null) { + Transaction txn = Transaction.currentTxn(); + txn.start(); + // Remove the template record + this._tmpltDao.expunge(templateId); + + // decrement resource count + if (accountId != null) { + _resourceLimitMgr.decrementResourceCount(accountId, + ResourceType.template); + } + txn.commit(); + } + } + + if (privateTemplate != null) { + return privateTemplate; + } else { + throw new CloudRuntimeException("Failed to create a template"); + } + } + + private static boolean isAdmin(short accountType) { + return ((accountType == Account.ACCOUNT_TYPE_ADMIN) + || (accountType == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) + || (accountType == Account.ACCOUNT_TYPE_DOMAIN_ADMIN) || (accountType == Account.ACCOUNT_TYPE_READ_ONLY_ADMIN)); + } + @Override + @ActionEvent(eventType = EventTypes.EVENT_TEMPLATE_CREATE, eventDescription = "creating template", create = true) + public VMTemplateVO createPrivateTemplateRecord(CreateTemplateCmd cmd, + Account templateOwner) throws ResourceAllocationException { + Long userId = UserContext.current().getCallerUserId(); + + Account caller = UserContext.current().getCaller(); + boolean isAdmin = (isAdmin(caller.getType())); + + _accountMgr.checkAccess(caller, null, true, templateOwner); + + String name = cmd.getTemplateName(); + if ((name == null) || (name.length() > 32)) { + throw new InvalidParameterValueException( + "Template name cannot be null and should be less than 32 characters"); + } + + if (cmd.getTemplateTag() != null) { + if (!_accountService.isRootAdmin(caller.getType())) { + throw new PermissionDeniedException( + "Parameter templatetag can only be specified by a Root Admin, permission denied"); + } + } + + // do some parameter defaulting + Integer bits = cmd.getBits(); + Boolean requiresHvm = cmd.getRequiresHvm(); + Boolean passwordEnabled = cmd.isPasswordEnabled(); + Boolean isPublic = cmd.isPublic(); + Boolean featured = cmd.isFeatured(); + int bitsValue = ((bits == null) ? 64 : bits.intValue()); + boolean requiresHvmValue = ((requiresHvm == null) ? true : requiresHvm + .booleanValue()); + boolean passwordEnabledValue = ((passwordEnabled == null) ? false + : passwordEnabled.booleanValue()); + if (isPublic == null) { + isPublic = Boolean.FALSE; + } + boolean allowPublicUserTemplates = Boolean.parseBoolean(_configDao + .getValue("allow.public.user.templates")); + if (!isAdmin && !allowPublicUserTemplates && isPublic) { + throw new PermissionDeniedException("Failed to create template " + + name + ", only private templates can be created."); + } + + Long volumeId = cmd.getVolumeId(); + Long snapshotId = cmd.getSnapshotId(); + if ((volumeId == null) && (snapshotId == null)) { + throw new InvalidParameterValueException( + "Failed to create private template record, neither volume ID nor snapshot ID were specified."); + } + if ((volumeId != null) && (snapshotId != null)) { + throw new InvalidParameterValueException( + "Failed to create private template record, please specify only one of volume ID (" + + volumeId + + ") and snapshot ID (" + + snapshotId + + ")"); + } + + HypervisorType hyperType; + VolumeVO volume = null; + VMTemplateVO privateTemplate = null; + if (volumeId != null) { // create template from volume + volume = this._volumeDao.findById(volumeId); + if (volume == null) { + throw new InvalidParameterValueException( + "Failed to create private template record, unable to find volume " + + volumeId); + } + // check permissions + _accountMgr.checkAccess(caller, null, true, volume); + + // If private template is created from Volume, check that the volume + // will not be active when the private template is + // created + if (!this.volumeMgr.volumeInactive(volume)) { + String msg = "Unable to create private template for volume: " + + volume.getName() + + "; volume is attached to a non-stopped VM, please stop the VM first"; + if (s_logger.isInfoEnabled()) { + s_logger.info(msg); + } + throw new CloudRuntimeException(msg); + } + hyperType = this._volumeDao.getHypervisorType(volumeId); + } else { // create template from snapshot + SnapshotVO snapshot = _snapshotDao.findById(snapshotId); + if (snapshot == null) { + throw new InvalidParameterValueException( + "Failed to create private template record, unable to find snapshot " + + snapshotId); + } + + volume = this._volumeDao.findById(snapshot.getVolumeId()); + VolumeVO snapshotVolume = this._volumeDao + .findByIdIncludingRemoved(snapshot.getVolumeId()); + + // check permissions + _accountMgr.checkAccess(caller, null, true, snapshot); + + if (snapshot.getState() != Snapshot.State.BackedUp) { + throw new InvalidParameterValueException("Snapshot id=" + + snapshotId + " is not in " + Snapshot.State.BackedUp + + " state yet and can't be used for template creation"); + } + + /* + * // bug #11428. Operation not supported if vmware and snapshots + * parent volume = ROOT if(snapshot.getHypervisorType() == + * HypervisorType.VMware && snapshotVolume.getVolumeType() == + * Type.DATADISK){ throw new UnsupportedServiceException( + * "operation not supported, snapshot with id " + snapshotId + + * " is created from Data Disk"); } + */ + + hyperType = snapshot.getHypervisorType(); + } + + _resourceLimitMgr.checkResourceLimit(templateOwner, + ResourceType.template); + + if (!isAdmin || featured == null) { + featured = Boolean.FALSE; + } + Long guestOSId = cmd.getOsTypeId(); + GuestOSVO guestOS = this._guestOSDao.findById(guestOSId); + if (guestOS == null) { + throw new InvalidParameterValueException("GuestOS with ID: " + + guestOSId + " does not exist."); + } + + String uniqueName = Long.valueOf((userId == null) ? 1 : userId) + .toString() + + UUID.nameUUIDFromBytes(name.getBytes()).toString(); + Long nextTemplateId = this._tmpltDao.getNextInSequence(Long.class, "id"); + String description = cmd.getDisplayText(); + boolean isExtractable = false; + Long sourceTemplateId = null; + if (volume != null) { + VMTemplateVO template = ApiDBUtils.findTemplateById(volume + .getTemplateId()); + isExtractable = template != null + && template.isExtractable() + && template.getTemplateType() != Storage.TemplateType.SYSTEM; + if (template != null) { + sourceTemplateId = template.getId(); + } else if (volume.getVolumeType() == Volume.Type.ROOT) { // vm created out + // of blank + // template + UserVm userVm = ApiDBUtils.findUserVmById(volume + .getInstanceId()); + sourceTemplateId = userVm.getIsoId(); + } + } + String templateTag = cmd.getTemplateTag(); + if (templateTag != null) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Adding template tag: " + templateTag); + } + } + privateTemplate = new VMTemplateVO(nextTemplateId, uniqueName, name, + ImageFormat.RAW, isPublic, featured, isExtractable, + TemplateType.USER, null, null, requiresHvmValue, bitsValue, + templateOwner.getId(), null, description, passwordEnabledValue, + guestOS.getId(), true, hyperType, templateTag, cmd.getDetails()); + if (sourceTemplateId != null) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("This template is getting created from other template, setting source template Id to: " + + sourceTemplateId); + } + } + privateTemplate.setSourceTemplateId(sourceTemplateId); + + VMTemplateVO template = this._tmpltDao.persist(privateTemplate); + // Increment the number of templates + if (template != null) { + if (cmd.getDetails() != null) { + this._templateDetailsDao.persist(template.getId(), cmd.getDetails()); + } + + _resourceLimitMgr.incrementResourceCount(templateOwner.getId(), + ResourceType.template); + } + + if (template != null) { + return template; + } else { + throw new CloudRuntimeException("Failed to create a template"); + } + + } + + @Override + public Pair getAbsoluteIsoPath(long templateId, + long dataCenterId) { + String isoPath = null; + + List storageHosts = _resourceMgr.listAllHostsInOneZoneByType( + Host.Type.SecondaryStorage, dataCenterId); + if (storageHosts != null) { + for (HostVO storageHost : storageHosts) { + List templateHostVOs = this._tmpltHostDao + .listByTemplateHostStatus( + templateId, + storageHost.getId(), + VMTemplateStorageResourceAssoc.Status.DOWNLOADED); + if (templateHostVOs != null && !templateHostVOs.isEmpty()) { + VMTemplateHostVO tmpHostVO = templateHostVOs.get(0); + isoPath = storageHost.getStorageUrl() + "/" + + tmpHostVO.getInstallPath(); + return new Pair(isoPath, + storageHost.getStorageUrl()); + } + } + } + s_logger.warn("Unable to find secondary storage in zone id=" + + dataCenterId); + return null; + } + + @Override + public String getSecondaryStorageURL(long zoneId) { + // Determine the secondary storage URL + HostVO secondaryStorageHost = getSecondaryStorageHost(zoneId); + + if (secondaryStorageHost == null) { + return null; + } + + return secondaryStorageHost.getStorageUrl(); + } + + @Override + public HostVO getSecondaryStorageHost(long zoneId, long tmpltId) { + List hosts = _ssvmMgr + .listSecondaryStorageHostsInOneZone(zoneId); + if (hosts == null || hosts.size() == 0) { + return null; + } + for (HostVO host : hosts) { + VMTemplateHostVO tmpltHost = this._tmpltHostDao.findByHostTemplate( + host.getId(), tmpltId); + if (tmpltHost != null + && !tmpltHost.getDestroyed() + && tmpltHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { + return host; + } + } + return null; + } + + @Override + public VMTemplateHostVO getTemplateHostRef(long zoneId, long tmpltId, + boolean readyOnly) { + List hosts = _ssvmMgr + .listSecondaryStorageHostsInOneZone(zoneId); + if (hosts == null || hosts.size() == 0) { + return null; + } + VMTemplateHostVO inProgress = null; + VMTemplateHostVO other = null; + for (HostVO host : hosts) { + VMTemplateHostVO tmpltHost = this._tmpltHostDao.findByHostTemplate( + host.getId(), tmpltId); + if (tmpltHost != null && !tmpltHost.getDestroyed()) { + if (tmpltHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { + return tmpltHost; + } else if (tmpltHost.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOAD_IN_PROGRESS) { + inProgress = tmpltHost; + } else { + other = tmpltHost; + } + } + } + if (inProgress != null) { + return inProgress; + } + return other; + } + + @Override + public HostVO getSecondaryStorageHost(long zoneId) { + List hosts = _ssvmMgr + .listSecondaryStorageHostsInOneZone(zoneId); + if (hosts == null || hosts.size() == 0) { + hosts = _ssvmMgr.listLocalSecondaryStorageHostsInOneZone(zoneId); + if (hosts.isEmpty()) { + return null; + } + } + + int size = hosts.size(); + Random rn = new Random(); + int index = rn.nextInt(size); + return hosts.get(index); + } + + @Override + public List getSecondaryStorageHosts(long zoneId) { + List hosts = _ssvmMgr + .listSecondaryStorageHostsInOneZone(zoneId); + if (hosts == null || hosts.size() == 0) { + hosts = _ssvmMgr.listLocalSecondaryStorageHostsInOneZone(zoneId); + if (hosts.isEmpty()) { + return new ArrayList(); + } + } + return hosts; + } + + @Override + public Long getTemplateSize(long templateId, long zoneId) { + SearchCriteria sc = HostTemplateStatesSearch.create(); + sc.setParameters("id", templateId); + sc.setParameters( + "state", + com.cloud.storage.VMTemplateStorageResourceAssoc.Status.DOWNLOADED); + sc.setJoinParameters("host", "dcId", zoneId); + List tsvs = _tmpltSwiftDao + .listByTemplateId(templateId); + Long size = null; + if (tsvs != null && tsvs.size() > 0) { + size = tsvs.get(0).getSize(); + } + + if (size == null && _s3Mgr.isS3Enabled()) { + VMTemplateS3VO vmTemplateS3VO = _vmS3TemplateDao + .findOneByTemplateId(templateId); + if (vmTemplateS3VO != null) { + size = vmTemplateS3VO.getSize(); + } + } + + if (size == null) { + List sss = this.templateHostDao.search(sc, null); + if (sss == null || sss.size() == 0) { + throw new CloudRuntimeException("Template " + + templateId + + " has not been completely downloaded to zone " + + zoneId); + } + size = sss.get(0).getSize(); + } + return size; + } + } diff --git a/server/src/com/cloud/upgrade/DatabaseCreator.java b/server/src/com/cloud/upgrade/DatabaseCreator.java index 4d706344707..9841faeb94b 100755 --- a/server/src/com/cloud/upgrade/DatabaseCreator.java +++ b/server/src/com/cloud/upgrade/DatabaseCreator.java @@ -29,6 +29,7 @@ import java.util.Properties; import com.cloud.utils.PropertiesUtil; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.SystemIntegrityChecker; import com.cloud.utils.db.ScriptRunner; import com.cloud.utils.db.Transaction; @@ -192,6 +193,8 @@ public class DatabaseCreator { } } + Transaction txn = Transaction.open(Transaction.CLOUD_DB); + try { // Process db upgrade classes for (String upgradeClass: upgradeClasses) { System.out.println("========> Processing upgrade: " + upgradeClass); @@ -202,13 +205,22 @@ public class DatabaseCreator { System.err.println("The class must be of SystemIntegrityChecker: " + clazz.getName()); System.exit(1); } + SystemIntegrityChecker checker = (SystemIntegrityChecker)clazz.newInstance(); + checker.check(); } catch (ClassNotFoundException e) { System.err.println("Unable to find " + upgradeClass + ": " + e.getMessage()); System.exit(1); + } catch (InstantiationException e) { + System.err.println("Unable to instantiate " + upgradeClass + ": " + e.getMessage()); + System.exit(1); + } catch (IllegalAccessException e) { + System.err.println("Unable to access " + upgradeClass + ": " + e.getMessage()); + System.exit(1); } - //SystemIntegrityChecker checker = (SystemIntegrityChecker)ComponentLocator.inject(clazz); - //checker.check(); + } + } finally { + txn.close(); } } } diff --git a/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java b/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java index f831a032385..5bd749fe842 100755 --- a/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -31,7 +31,6 @@ import java.util.List; import java.util.TreeMap; import javax.ejb.Local; -import javax.inject.Inject; import org.apache.log4j.Logger; @@ -57,6 +56,7 @@ import com.cloud.upgrade.dao.Upgrade301to302; import com.cloud.upgrade.dao.Upgrade302to40; import com.cloud.upgrade.dao.Upgrade30to301; import com.cloud.upgrade.dao.Upgrade40to41; +import com.cloud.upgrade.dao.Upgrade410to420; import com.cloud.upgrade.dao.UpgradeSnapshot217to224; import com.cloud.upgrade.dao.UpgradeSnapshot223to224; import com.cloud.upgrade.dao.VersionDao; @@ -76,90 +76,93 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { protected HashMap _upgradeMap = new HashMap(); - @Inject VersionDao _dao; + VersionDao _dao; public DatabaseUpgradeChecker() { + _dao = new VersionDaoImpl(); _upgradeMap.put("2.1.7", new DbUpgrade[] { new Upgrade217to218(), new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), - new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.1.8", new DbUpgrade[] { new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.1.9", new DbUpgrade[] { new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), - new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.1", new DbUpgrade[] { new Upgrade221to222(), new UpgradeSnapshot223to224(), new Upgrade222to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), - new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.2", new DbUpgrade[] { new Upgrade222to224(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.3", new DbUpgrade[] { new Upgrade222to224(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.4", new DbUpgrade[] { new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), - new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), - new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), + new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.5", new DbUpgrade[] { new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), - new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), - new Upgrade302to40() }); + new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), + new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.6", new DbUpgrade[] { new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.7", new DbUpgrade[] { new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), - new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.8", new DbUpgrade[] { new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30() - , new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + , new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.9", new DbUpgrade[] { new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), - new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.10", new DbUpgrade[] { new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), - new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.11", new DbUpgrade[] { new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.12", new DbUpgrade[] { new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), - new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.13", new DbUpgrade[] { new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), - new Upgrade301to302(), new Upgrade302to40() }); + new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); _upgradeMap.put("2.2.14", new DbUpgrade[] { new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), - new Upgrade302to40() }); + new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); - _upgradeMap.put("3.0.0", new DbUpgrade[] { new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40() }); + _upgradeMap.put("3.0.0", new DbUpgrade[] { new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); - _upgradeMap.put("3.0.1", new DbUpgrade[] { new Upgrade301to302(), new Upgrade302to40() }); + _upgradeMap.put("3.0.1", new DbUpgrade[] { new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); - _upgradeMap.put("3.0.2", new DbUpgrade[] { new Upgrade302to40() }); - - _upgradeMap.put("4.0.0", new DbUpgrade[] { new Upgrade40to41() }); + _upgradeMap.put("3.0.2", new DbUpgrade[] { new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420() }); + + _upgradeMap.put("4.0.0", new DbUpgrade[] { new Upgrade40to41(), new Upgrade410to420() }); + + _upgradeMap.put("4.1.0", new DbUpgrade[] { new Upgrade410to420() }); } protected void runScript(Connection conn, File file) { diff --git a/server/src/com/cloud/upgrade/dao/Upgrade2214to30.java b/server/src/com/cloud/upgrade/dao/Upgrade2214to30.java index 88370c10c8c..c0f827e655e 100755 --- a/server/src/com/cloud/upgrade/dao/Upgrade2214to30.java +++ b/server/src/com/cloud/upgrade/dao/Upgrade2214to30.java @@ -266,8 +266,19 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { addPhysicalNtwk_To_Ntwk_IP_Vlan(conn, physicalNetworkId,networkId); } pstmt3.close(); + + // add the reference to this physical network for the default public network entries in vlan / user_ip_address tables // add first physicalNetworkId to op_dc_vnet_alloc for this zone - just a placeholder since direct networking dont need this if(isFirstPhysicalNtwk){ + s_logger.debug("Adding PhysicalNetwork to default Public network entries in vlan and user_ip_address"); + pstmt3 = conn.prepareStatement("SELECT id FROM `cloud`.`networks` where traffic_type = 'Public' and data_center_id = "+zoneId); + ResultSet rsPubNet = pstmt3.executeQuery(); + if(rsPubNet.next()){ + Long publicNetworkId = rsPubNet.getLong(1); + addPhysicalNtwk_To_Ntwk_IP_Vlan(conn, physicalNetworkId,publicNetworkId); + } + pstmt3.close(); + s_logger.debug("Adding PhysicalNetwork to op_dc_vnet_alloc"); String updateVnet = "UPDATE `cloud`.`op_dc_vnet_alloc` SET physical_network_id = " + physicalNetworkId + " WHERE data_center_id = " + zoneId; pstmtUpdate = conn.prepareStatement(updateVnet); diff --git a/server/src/com/cloud/upgrade/dao/Upgrade40to41.java b/server/src/com/cloud/upgrade/dao/Upgrade40to41.java index d3a8cd5a9d3..92687643185 100644 --- a/server/src/com/cloud/upgrade/dao/Upgrade40to41.java +++ b/server/src/com/cloud/upgrade/dao/Upgrade40to41.java @@ -17,6 +17,7 @@ package com.cloud.upgrade.dao; +import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; @@ -32,73 +33,96 @@ import java.util.UUID; import org.apache.log4j.Logger; -/** - * @author htrippaers - * - */ public class Upgrade40to41 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade40to41.class); + final static Logger s_logger = Logger.getLogger(Upgrade40to41.class); - /** - * - */ - public Upgrade40to41() { - // TODO Auto-generated constructor stub - } + @Override + public String[] getUpgradableVersionRange() { + return new String[] { "4.0.0", "4.1.0" }; + } - /* (non-Javadoc) - * @see com.cloud.upgrade.dao.DbUpgrade#getUpgradableVersionRange() - */ - @Override - public String[] getUpgradableVersionRange() { - return new String[] { "4.0.0", "4.1.0" }; - } + @Override + public String getUpgradedVersion() { + return "4.1.0"; + } - /* (non-Javadoc) - * @see com.cloud.upgrade.dao.DbUpgrade#getUpgradedVersion() - */ - @Override - public String getUpgradedVersion() { - return "4.1.0"; - } + @Override + public boolean supportsRollingUpgrade() { + return false; + } - /* (non-Javadoc) - * @see com.cloud.upgrade.dao.DbUpgrade#supportsRollingUpgrade() - */ - @Override - public boolean supportsRollingUpgrade() { - return false; - } - - /* (non-Javadoc) - * @see com.cloud.upgrade.dao.DbUpgrade#getPrepareScripts() - */ - @Override - public File[] getPrepareScripts() { - String script = Script.findScript("", "db/schema-40to410.sql"); + @Override + public File[] getPrepareScripts() { + String script = Script.findScript("", "db/schema-40to410.sql"); if (script == null) { throw new CloudRuntimeException("Unable to find db/schema-40to410.sql"); } return new File[] { new File(script) }; - } + } - /* (non-Javadoc) - * @see com.cloud.upgrade.dao.DbUpgrade#performDataMigration(java.sql.Connection) - */ - @Override - public void performDataMigration(Connection conn) { + @Override + public void performDataMigration(Connection conn) { + updateRegionEntries(conn); upgradeEIPNetworkOfferings(conn); upgradeEgressFirewallRules(conn); - } + } - /* (non-Javadoc) - * @see com.cloud.upgrade.dao.DbUpgrade#getCleanupScripts() - */ - @Override - public File[] getCleanupScripts() { - return new File[0]; - } + @Override + public File[] getCleanupScripts() { + String script = Script.findScript("", "db/schema-40to410-cleanup.sql"); + if (script == null) { + throw new CloudRuntimeException("Unable to find db/schema-40to410-cleanup.sql"); + } + + return new File[] { new File(script) }; + } + + private void updateRegionEntries(Connection conn) { + int region_id = Transaction.s_region_id; + PreparedStatement pstmt = null; + try { + //Update regionId in region table + s_logger.debug("Updating region table with Id: "+region_id); + pstmt = conn.prepareStatement("update `cloud`.`region` set id = ?"); + pstmt.setInt(1, region_id); + pstmt.executeUpdate(); + + //Update regionId in account table + s_logger.debug("Updating account table with Id: "+region_id); + pstmt = conn.prepareStatement("update `cloud`.`account` set region_id = ?"); + pstmt.setInt(1, region_id); + pstmt.executeUpdate(); + + //Update regionId in user table + s_logger.debug("Updating user table with Id: "+region_id); + pstmt = conn.prepareStatement("update `cloud`.`user` set region_id = ?"); + pstmt.setInt(1, region_id); + pstmt.executeUpdate(); + + //Update regionId in domain table + s_logger.debug("Updating domain table with Id: "+region_id); + pstmt = conn.prepareStatement("update `cloud`.`domain` set region_id = ?"); + pstmt.setInt(1, region_id); + pstmt.executeUpdate(); + + //Update regionId in cloud_usage account table + s_logger.debug("Updating cloud_usage account table with Id: "+region_id); + pstmt = conn.prepareStatement("update `cloud_usage`.`account` set region_id = ?"); + pstmt.setInt(1, region_id); + pstmt.executeUpdate(); + s_logger.debug("Successfully updated region entries with regionId: "+region_id); + } catch (SQLException e) { + throw new CloudRuntimeException("Error while updating region entries", e); + } finally { + try { + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + } private void upgradeEIPNetworkOfferings(Connection conn) { PreparedStatement pstmt = null; @@ -133,7 +157,6 @@ public class Upgrade40to41 implements DbUpgrade { } } - private void upgradeEgressFirewallRules(Connection conn) { PreparedStatement pstmt = null; ResultSet rs = null; diff --git a/server/src/com/cloud/upgrade/dao/Upgrade410to420.java b/server/src/com/cloud/upgrade/dao/Upgrade410to420.java new file mode 100644 index 00000000000..db562b1c17a --- /dev/null +++ b/server/src/com/cloud/upgrade/dao/Upgrade410to420.java @@ -0,0 +1,161 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; + +import java.io.File; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.UUID; + +import org.apache.log4j.Logger; + +public class Upgrade410to420 implements DbUpgrade { + final static Logger s_logger = Logger.getLogger(Upgrade410to420.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] { "4.1.0", "4.2.0" }; + } + + @Override + public String getUpgradedVersion() { + return "4.2.0"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public File[] getPrepareScripts() { + String script = Script.findScript("", "db/schema-410to420.sql"); + if (script == null) { + throw new CloudRuntimeException("Unable to find db/schema-410to420.sql"); + } + + return new File[] { new File(script) }; + } + + @Override + public void performDataMigration(Connection conn) { + upgradeVmwareLabels(conn); + PreparedStatement sql = null; + try { + sql = conn.prepareStatement("update vm_template set image_data_store_id = 1 where type = 'SYSTEM' or type = 'BUILTIN'"); + sql.executeUpdate(); + } catch (SQLException e) { + throw new CloudRuntimeException("Failed to upgrade vm template data store uuid: " + e.toString()); + } finally { + if (sql != null) { + try { + sql.close(); + } catch (SQLException e) { + } + } + } + } + + @Override + public File[] getCleanupScripts() { + String script = Script.findScript("", "db/schema-410to420-cleanup.sql"); + if (script == null) { + throw new CloudRuntimeException("Unable to find db/schema-410to420-cleanup.sql"); + } + + return new File[] { new File(script) }; + } + + private String getNewLabel(ResultSet rs, String oldParamValue) { + int separatorIndex; + String oldGuestLabel; + String newGuestLabel = oldParamValue; + try { + // No need to iterate because the global param setting applies to all physical networks irrespective of traffic type + if (rs.next()) { + oldGuestLabel = rs.getString("vmware_network_label"); + // guestLabel is in format [[],VLANID] + separatorIndex = oldGuestLabel.indexOf(","); + if(separatorIndex > -1) { + newGuestLabel += oldGuestLabel.substring(separatorIndex); + } + } + } catch (SQLException e) { + s_logger.error(new CloudRuntimeException("Failed to read vmware_network_label : " + e)); + } finally { + try { + if (rs != null) { + rs.close(); + } + } catch (SQLException e) { + } + } + return newGuestLabel; + } + + private void upgradeVmwareLabels(Connection conn) { + PreparedStatement pstmt = null; + ResultSet rsParams = null; + ResultSet rsLabel = null; + String newLabel; + String trafficType = null; + String trafficTypeVswitchParam; + String trafficTypeVswitchParamValue; + + try { + // update the existing vmware traffic labels + pstmt = conn.prepareStatement("select name,value from `cloud`.`configuration` where category='Hidden' and value is not NULL and name REGEXP 'vmware\\.*\\.vswitch';"); + rsParams = pstmt.executeQuery(); + while (rsParams.next()) { + trafficTypeVswitchParam = rsParams.getString("name"); + trafficTypeVswitchParamValue = rsParams.getString("value"); + // When upgraded from 4.0 to 4.1 update physical network traffic label with trafficTypeVswitchParam + if (trafficTypeVswitchParam.equals("vmware.private.vswitch")) { + trafficType = "Management"; //TODO(sateesh): Ignore storage traffic, as required physical network already implemented, anything else tobe done? + } else if (trafficTypeVswitchParam.equals("vmware.public.vswitch")) { + trafficType = "Public"; + } else if (trafficTypeVswitchParam.equals("vmware.guest.vswitch")) { + trafficType = "Guest"; + } + s_logger.debug("Updating vmware label for " + trafficType + " traffic. Update SQL statement is " + pstmt); + pstmt = conn.prepareStatement("select physical_network_id, traffic_type, vmware_network_label from physical_network_traffic_types where vmware_network_label is not NULL and traffic_type='" + trafficType + "';"); + rsLabel = pstmt.executeQuery(); + newLabel = getNewLabel(rsLabel, trafficTypeVswitchParamValue); + pstmt = conn.prepareStatement("update physical_network_traffic_types set vmware_network_label = " + newLabel + " where traffic_type = '" + trafficType + "' and vmware_network_label is not NULL;"); + pstmt.executeUpdate(); + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to set vmware traffic labels ", e); + } finally { + try { + if (rsParams != null) { + rsParams.close(); + } + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + } +} diff --git a/server/src/com/cloud/upgrade/dao/VersionDaoImpl.java b/server/src/com/cloud/upgrade/dao/VersionDaoImpl.java index 5e6e7bc75b8..7c5c9ccccf0 100644 --- a/server/src/com/cloud/upgrade/dao/VersionDaoImpl.java +++ b/server/src/com/cloud/upgrade/dao/VersionDaoImpl.java @@ -47,7 +47,7 @@ public class VersionDaoImpl extends GenericDaoBase implements V final GenericSearchBuilder CurrentVersionSearch; final SearchBuilder AllFieldsSearch; - protected VersionDaoImpl() { + public VersionDaoImpl() { super(); CurrentVersionSearch = createSearchBuilder(String.class); diff --git a/server/src/com/cloud/user/AccountManagerImpl.java b/server/src/com/cloud/user/AccountManagerImpl.java index a3f9505c3df..9b916024cbb 100755 --- a/server/src/com/cloud/user/AccountManagerImpl.java +++ b/server/src/com/cloud/user/AccountManagerImpl.java @@ -104,6 +104,7 @@ import com.cloud.server.auth.UserAuthenticator; import com.cloud.storage.StorageManager; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; +import com.cloud.storage.VolumeManager; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.VMTemplateDao; @@ -118,7 +119,6 @@ import com.cloud.user.dao.UserDao; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; - import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; @@ -226,6 +226,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M Site2SiteVpnManager _vpnMgr; @Inject private AutoScaleManager _autoscaleMgr; + @Inject VolumeManager volumeMgr; @Inject private List _userAuthenticators; @@ -576,7 +577,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M for (VolumeVO volume : volumes) { if (!volume.getState().equals(Volume.State.Destroy)) { try { - _storageMgr.deleteVolume(volume.getId(), caller); + this.volumeMgr.deleteVolume(volume.getId(), caller); } catch (Exception ex) { s_logger.warn("Failed to cleanup volumes as a part of account id=" + accountId + " cleanup due to Exception: ", ex); accountCleanupNeeded = true; @@ -762,8 +763,8 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M @Override @DB @ActionEvent(eventType = EventTypes.EVENT_ACCOUNT_CREATE, eventDescription = "creating Account") - public UserAccount createUserAccount(String userName, String password, String firstName, String lastName, String email, String timezone, String accountName, short accountType, Long domainId, String networkDomain, - Map details, String accountUUID, String userUUID, Integer regionId) { + public UserAccount createUserAccount(String userName, String password, String firstName, String lastName, String email, String timezone, String accountName, short accountType, + Long domainId, String networkDomain, Map details) { if (accountName == null) { accountName = userName; @@ -805,55 +806,30 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } } - if(regionId == null){ - Transaction txn = Transaction.currentTxn(); - txn.start(); + Transaction txn = Transaction.currentTxn(); + txn.start(); - // create account - AccountVO account = createAccount(accountName, accountType, domainId, networkDomain, details, UUID.randomUUID().toString(), _regionMgr.getId()); - long accountId = account.getId(); + // create account + AccountVO account = createAccount(accountName, accountType, domainId, networkDomain, details, UUID.randomUUID().toString(), _regionMgr.getId()); + long accountId = account.getId(); - // create the first user for the account - UserVO user = createUser(accountId, userName, password, firstName, lastName, email, timezone); + // create the first user for the account + UserVO user = createUser(accountId, userName, password, firstName, lastName, email, timezone); - if (accountType == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) { - // set registration token - byte[] bytes = (domainId + accountName + userName + System.currentTimeMillis()).getBytes(); - String registrationToken = UUID.nameUUIDFromBytes(bytes).toString(); - user.setRegistrationToken(registrationToken); - } - txn.commit(); - //Propagate Add account to other Regions - _regionMgr.propagateAddAccount(userName, password, firstName, lastName, email, timezone, accountName, accountType, domainId, - networkDomain, details, account.getUuid(), user.getUuid()); - //check success - return _userAccountDao.findById(user.getId()); - } else { - // Account is propagated from another Region - - Transaction txn = Transaction.currentTxn(); - txn.start(); - - // create account - AccountVO account = createAccount(accountName, accountType, domainId, networkDomain, details, accountUUID, regionId); - long accountId = account.getId(); - - // create the first user for the account - UserVO user = createUser(accountId, userName, password, firstName, lastName, email, timezone, userUUID, regionId); - - if (accountType == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) { - // set registration token - byte[] bytes = (domainId + accountName + userName + System.currentTimeMillis()).getBytes(); - String registrationToken = UUID.nameUUIDFromBytes(bytes).toString(); - user.setRegistrationToken(registrationToken); - } - txn.commit(); - return _userAccountDao.findById(user.getId()); + if (accountType == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) { + // set registration token + byte[] bytes = (domainId + accountName + userName + System.currentTimeMillis()).getBytes(); + String registrationToken = UUID.nameUUIDFromBytes(bytes).toString(); + user.setRegistrationToken(registrationToken); } + txn.commit(); + + //check success + return _userAccountDao.findById(user.getId()); } @Override - public UserVO createUser(String userName, String password, String firstName, String lastName, String email, String timeZone, String accountName, Long domainId, String userUUID, Integer regionId) { + public UserVO createUser(String userName, String password, String firstName, String lastName, String email, String timeZone, String accountName, Long domainId) { // default domain to ROOT if not specified if (domainId == null) { @@ -882,13 +858,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M throw new CloudRuntimeException("The user " + userName + " already exists in domain " + domainId); } UserVO user = null; - if(regionId == null){ - user = createUser(account.getId(), userName, password, firstName, lastName, email, timeZone); - //Propagate Add user to peer Regions - _regionMgr.propagateAddUser(userName, password, firstName, lastName, email, timeZone, accountName, domain.getUuid(), user.getUuid()); - } else { - user = createUser(account.getId(), userName, password, firstName, lastName, email, timeZone, userUUID, regionId); - } + user = createUser(account.getId(), userName, password, firstName, lastName, email, timeZone); return user; } @@ -974,7 +944,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } } if (encodedPassword == null) { - throw new CloudRuntimeException("Failed to encode password"); + throw new CloudRuntimeException("Failed to encode password"); } user.setPassword(encodedPassword); } @@ -1296,7 +1266,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M String newAccountName = cmd.getNewName(); String networkDomain = cmd.getNetworkDomain(); Map details = cmd.getDetails(); - + boolean success = false; Account account = null; if (accountId != null) { @@ -1748,7 +1718,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M if (s_logger.isDebugEnabled()) { s_logger.debug("Creating user: " + userName + ", accountId: " + accountId + " timezone:" + timezone); } - + String encodedPassword = null; for (UserAuthenticator authenticator : _userAuthenticators) { encodedPassword = authenticator.encode(password); @@ -1757,9 +1727,9 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } } if (encodedPassword == null) { - throw new CloudRuntimeException("Failed to encode password"); + throw new CloudRuntimeException("Failed to encode password"); } - + UserVO user = _userDao.persist(new UserVO(accountId, userName, encodedPassword, firstName, lastName, email, timezone, UUID.randomUUID().toString(), _regionMgr.getId())); return user; @@ -1780,9 +1750,9 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } } if (encodedPassword == null) { - throw new CloudRuntimeException("Failed to encode password"); + throw new CloudRuntimeException("Failed to encode password"); } - + UserVO user = _userDao.persist(new UserVO(accountId, userName, encodedPassword, firstName, lastName, email, timezone, uuid, regionId)); return user; @@ -2013,7 +1983,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M @Override @DB public String[] createApiKeyAndSecretKey(RegisterCmd cmd) { - //Send keys to other Regions + //Send keys to other Regions Long userId = cmd.getId(); User user = getUserIncludingRemoved(userId); @@ -2242,7 +2212,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } } - @Override + @Override public void buildACLViewSearchBuilder(SearchBuilder sb, Long domainId, boolean isRecursive, List permittedAccounts, ListProjectResourcesCriteria listProjectResourcesCriteria) { diff --git a/server/src/com/cloud/user/DomainManager.java b/server/src/com/cloud/user/DomainManager.java index af102e20a5c..f268f6ec4eb 100644 --- a/server/src/com/cloud/user/DomainManager.java +++ b/server/src/com/cloud/user/DomainManager.java @@ -27,7 +27,7 @@ import com.cloud.domain.DomainVO; public interface DomainManager extends DomainService { Set getDomainChildrenIds(String parentDomainPath); - Domain createDomain(String name, Long parentId, Long ownerId, String networkDomain, String domainUUID, Integer regionId); + Domain createDomain(String name, Long parentId, Long ownerId, String networkDomain); Set getDomainParentIds(long domainId); diff --git a/server/src/com/cloud/user/DomainManagerImpl.java b/server/src/com/cloud/user/DomainManagerImpl.java index 8ad9f5b160e..babaed37494 100644 --- a/server/src/com/cloud/user/DomainManagerImpl.java +++ b/server/src/com/cloud/user/DomainManagerImpl.java @@ -118,7 +118,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom @Override @ActionEvent(eventType = EventTypes.EVENT_DOMAIN_CREATE, eventDescription = "creating Domain") - public Domain createDomain(String name, Long parentId, String networkDomain, String domainUUID, Integer regionId) { + public Domain createDomain(String name, Long parentId, String networkDomain) { Account caller = UserContext.current().getCaller(); if (parentId == null) { @@ -136,13 +136,13 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom _accountMgr.checkAccess(caller, parentDomain); - return createDomain(name, parentId, caller.getId(), networkDomain, domainUUID, regionId); + return createDomain(name, parentId, caller.getId(), networkDomain); } @Override @DB - public Domain createDomain(String name, Long parentId, Long ownerId, String networkDomain, String domainUUID, Integer regionId) { + public Domain createDomain(String name, Long parentId, Long ownerId, String networkDomain) { // Verify network domain if (networkDomain != null) { if (!NetUtils.verifyDomainName(networkDomain)) { @@ -161,28 +161,13 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom throw new InvalidParameterValueException("Domain with name " + name + " already exists for the parent id=" + parentId); } - if(regionId == null){ Transaction txn = Transaction.currentTxn(); txn.start(); - DomainVO domain = _domainDao.create(new DomainVO(name, ownerId, parentId, networkDomain, _regionMgr.getId())); + DomainVO domain = _domainDao.create(new DomainVO(name, ownerId, parentId, networkDomain, _regionMgr.getId())); _resourceCountDao.createResourceCounts(domain.getId(), ResourceLimit.ResourceOwnerType.Domain); txn.commit(); - //Propagate domain creation to peer Regions - _regionMgr.propagateAddDomain(name, parentId, networkDomain, domain.getUuid()); - return domain; - } else { - Transaction txn = Transaction.currentTxn(); - txn.start(); - - DomainVO domain = _domainDao.create(new DomainVO(name, ownerId, parentId, networkDomain, domainUUID, regionId)); - _resourceCountDao.createResourceCounts(domain.getId(), ResourceLimit.ResourceOwnerType.Domain); - - txn.commit(); return domain; - - } - } @Override @@ -380,7 +365,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom SearchBuilder sb = _domainDao.createSearchBuilder(); sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); - sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.EQ); sb.and("level", sb.entity().getLevel(), SearchCriteria.Op.EQ); sb.and("path", sb.entity().getPath(), SearchCriteria.Op.LIKE); sb.and("state", sb.entity().getState(), SearchCriteria.Op.EQ); @@ -394,7 +379,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom } if (domainName != null) { - sc.setParameters("name", "%" + domainName + "%"); + sc.setParameters("name", domainName); } if (level != null) { @@ -485,8 +470,8 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom // check if domain exists in the system DomainVO domain = _domainDao.findById(domainId); if (domain == null) { - InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find domain with specified domain id"); - ex.addProxyObject(domain, domainId, "domainId"); + InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find domain with specified domain id"); + ex.addProxyObject(domain, domainId, "domainId"); throw ex; } else if (domain.getParent() == null && domainName != null) { // check if domain is ROOT domain - and deny to edit it with the new name @@ -507,8 +492,8 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom if (!domains.isEmpty() && !sameDomain) { InvalidParameterValueException ex = new InvalidParameterValueException("Failed to update specified domain id with name '" + domainName + "' since it already exists in the system"); - ex.addProxyObject(domain, domainId, "domainId"); - throw ex; + ex.addProxyObject(domain, domainId, "domainId"); + throw ex; } } @@ -566,5 +551,5 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom _domainDao.update(dom.getId(), dom); } } - + } diff --git a/server/src/com/cloud/user/dao/UserStatisticsDaoImpl.java b/server/src/com/cloud/user/dao/UserStatisticsDaoImpl.java index 913ec070dfa..4a1a51c6d9f 100644 --- a/server/src/com/cloud/user/dao/UserStatisticsDaoImpl.java +++ b/server/src/com/cloud/user/dao/UserStatisticsDaoImpl.java @@ -44,17 +44,17 @@ public class UserStatisticsDaoImpl extends GenericDaoBase= ?) " + "ORDER BY us.id"; private static final String UPDATED_STATS_SEARCH = "SELECT id, current_bytes_received, current_bytes_sent, net_bytes_received, net_bytes_sent, agg_bytes_received, agg_bytes_sent from user_statistics " + - "where (agg_bytes_received < net_bytes_received + current_bytes_received) OR (agg_bytes_sent < net_bytes_sent + current_bytes_sent)"; + "where (agg_bytes_received < net_bytes_received + current_bytes_received) OR (agg_bytes_sent < net_bytes_sent + current_bytes_sent)"; private final SearchBuilder AllFieldsSearch; private final SearchBuilder AccountSearch; - - - public UserStatisticsDaoImpl() { - AccountSearch = createSearchBuilder(); - AccountSearch.and("account", AccountSearch.entity().getAccountId(), SearchCriteria.Op.EQ); - AccountSearch.done(); - AllFieldsSearch = createSearchBuilder(); + + public UserStatisticsDaoImpl() { + AccountSearch = createSearchBuilder(); + AccountSearch.and("account", AccountSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + AccountSearch.done(); + + AllFieldsSearch = createSearchBuilder(); AllFieldsSearch.and("account", AllFieldsSearch.entity().getAccountId(), SearchCriteria.Op.EQ); AllFieldsSearch.and("dc", AllFieldsSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); AllFieldsSearch.and("network", AllFieldsSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); @@ -63,7 +63,7 @@ public class UserStatisticsDaoImpl extends GenericDaoBase sc = AllFieldsSearch.create(); @@ -133,5 +133,5 @@ public class UserStatisticsDaoImpl extends GenericDaoBase, UserVmServi * @return VirtualMachine */ UserVmVO getVirtualMachine(long vmId); - - /** - * Attaches an ISO to the virtual CDROM device of the specified VM. Will eject any existing virtual CDROM if isoPath is null. - * @param vmId - * @param isoId - * @param attach whether to attach or detach the given iso - * @return - */ - boolean attachISOToVM(long vmId, long isoId, boolean attach); - + /** * Stops the virtual machine * @param userId the id of the user performing the action @@ -101,8 +92,6 @@ public interface UserVmManager extends VirtualMachineGuru, UserVmServi */ Pair, Integer> searchForUserVMs(Criteria c, Account caller, Long domainId, boolean isRecursive, List permittedAccounts, boolean listAll, ListProjectResourcesCriteria listProjectResourcesCriteria, Map tags); - String getChecksum(Long hostId, String templatePath); - Pair> startVirtualMachine(long vmId, Long hostId, Map additionalParams) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException; } diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java old mode 100644 new mode 100755 index 19887ff9e25..88086ced461 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -34,10 +34,8 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; -import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; import org.apache.cloudstack.api.command.admin.vm.RecoverVMCmd; -import org.apache.cloudstack.api.command.user.template.CreateTemplateCmd; import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd; import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; import org.apache.cloudstack.api.command.user.vm.DestroyVMCmd; @@ -52,48 +50,32 @@ import org.apache.cloudstack.api.command.user.vm.UpdateVMCmd; import org.apache.cloudstack.api.command.user.vm.UpgradeVMCmd; import org.apache.cloudstack.api.command.user.vmgroup.CreateVMGroupCmd; import org.apache.cloudstack.api.command.user.vmgroup.DeleteVMGroupCmd; -import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; -import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; - import org.apache.cloudstack.engine.cloud.entity.api.VirtualMachineEntity; import org.apache.cloudstack.engine.service.api.OrchestrationService; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.commons.codec.binary.Base64; import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; +import com.cloud.agent.AgentManager.OnError; import com.cloud.agent.api.Answer; -import com.cloud.agent.api.AttachIsoCommand; -import com.cloud.agent.api.AttachVolumeAnswer; -import com.cloud.agent.api.AttachVolumeCommand; -import com.cloud.agent.api.ComputeChecksumCommand; -import com.cloud.agent.api.CreatePrivateTemplateFromSnapshotCommand; -import com.cloud.agent.api.CreatePrivateTemplateFromVolumeCommand; import com.cloud.agent.api.GetVmStatsAnswer; import com.cloud.agent.api.GetVmStatsCommand; -import com.cloud.agent.api.SnapshotCommand; +import com.cloud.agent.api.PlugNicAnswer; +import com.cloud.agent.api.PlugNicCommand; import com.cloud.agent.api.StartAnswer; import com.cloud.agent.api.StopAnswer; -import com.cloud.agent.api.UpgradeSnapshotCommand; +import com.cloud.agent.api.UnPlugNicAnswer; +import com.cloud.agent.api.UnPlugNicCommand; import com.cloud.agent.api.VmStatsEntry; -import com.cloud.agent.AgentManager.OnError; -import com.cloud.agent.api.*; -import com.cloud.agent.api.storage.CreatePrivateTemplateAnswer; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.agent.api.to.VolumeTO; -import com.cloud.agent.api.PlugNicAnswer; -import com.cloud.agent.api.PlugNicCommand; -import com.cloud.agent.api.UnPlugNicAnswer; -import com.cloud.agent.api.UnPlugNicCommand; import com.cloud.agent.manager.Commands; import com.cloud.alert.AlertManager; -import com.cloud.api.ApiDBUtils; import com.cloud.api.query.dao.UserVmJoinDao; import com.cloud.api.query.vo.UserVmJoinVO; -import com.cloud.async.AsyncJobExecutor; import com.cloud.async.AsyncJobManager; -import com.cloud.async.AsyncJobVO; -import com.cloud.async.BaseAsyncJobExecutor; import com.cloud.capacity.CapacityManager; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; @@ -108,6 +90,7 @@ import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeployDestination; +import com.cloud.deploy.DeployPlannerSelector; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; @@ -134,7 +117,6 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.network.Network; -import com.cloud.network.*; import com.cloud.network.Network.IpAddresses; import com.cloud.network.Network.Provider; import com.cloud.network.Network.Service; @@ -182,7 +164,6 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.GuestOSCategoryVO; import com.cloud.storage.GuestOSVO; -import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; @@ -191,20 +172,15 @@ import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.StoragePoolVO; -import com.cloud.storage.VMTemplateHostVO; -import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateZoneVO; import com.cloud.storage.Volume; -import com.cloud.storage.Volume.Type; -import com.cloud.storage.VolumeHostVO; +import com.cloud.storage.VolumeManager; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.GuestOSCategoryDao; import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.SnapshotDao; -import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateDetailsDao; import com.cloud.storage.dao.VMTemplateHostDao; @@ -213,6 +189,7 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeHostDao; import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.tags.dao.ResourceTagDao; +import com.cloud.template.TemplateManager; import com.cloud.template.VirtualMachineTemplate; import com.cloud.template.VirtualMachineTemplate.BootloaderType; import com.cloud.user.Account; @@ -233,7 +210,6 @@ import com.cloud.utils.Journal; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.PasswordGenerator; -import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.crypt.RSAHelper; @@ -249,36 +225,14 @@ import com.cloud.utils.exception.ExecutionException; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.net.NetUtils; import com.cloud.vm.VirtualMachine.State; -import com.cloud.vm.dao.*; -import org.apache.cloudstack.acl.ControlledEntity.ACLType; -import org.apache.cloudstack.acl.SecurityChecker.AccessType; -import org.apache.cloudstack.api.BaseCmd; -import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; -import org.apache.cloudstack.api.command.admin.vm.RecoverVMCmd; -import org.apache.cloudstack.api.command.user.template.CreateTemplateCmd; -import org.apache.cloudstack.api.command.user.vm.*; -import org.apache.cloudstack.api.command.user.vmgroup.CreateVMGroupCmd; -import org.apache.cloudstack.api.command.user.vmgroup.DeleteVMGroupCmd; -import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; -import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; -import org.apache.commons.codec.binary.Base64; -import org.apache.log4j.Logger; - -import javax.ejb.Local; -import javax.naming.ConfigurationException; -import java.util.*; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; import com.cloud.vm.dao.InstanceGroupDao; import com.cloud.vm.dao.InstanceGroupVMMapDao; import com.cloud.vm.dao.NicDao; +import com.cloud.vm.dao.UserVmCloneSettingDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.UserVmDetailsDao; import com.cloud.vm.dao.VMInstanceDao; -import com.cloud.vm.snapshot.VMSnapshot; import com.cloud.vm.snapshot.VMSnapshotManager; -import com.cloud.vm.snapshot.VMSnapshotVO; import com.cloud.vm.snapshot.dao.VMSnapshotDao; @Local(value = { UserVmManager.class, UserVmService.class }) @@ -289,6 +243,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use private static final int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 3; // 3 // seconds + public enum UserVmCloneType { + full, + linked + } + @Inject protected HostDao _hostDao = null; @Inject @@ -306,6 +265,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use @Inject protected DomainDao _domainDao = null; @Inject + protected UserVmCloneSettingDao _vmCloneSettingDao = null; + @Inject protected UserVmDao _vmDao = null; @Inject protected UserVmJoinDao _vmJoinDao = null; @@ -356,7 +317,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use @Inject protected ClusterDao _clusterDao; @Inject - protected StoragePoolDao _storagePoolDao; + protected PrimaryDataStoreDao _storagePoolDao; @Inject protected SecurityGroupManager _securityGroupMgr; @Inject @@ -399,6 +360,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use protected ProjectManager _projectMgr; @Inject protected ResourceManager _resourceMgr; + @Inject protected NetworkServiceMapDao _ntwkSrvcDao; @Inject @@ -414,14 +376,19 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use @Inject VpcManager _vpcMgr; @Inject + TemplateManager templateMgr; + @Inject protected GuestOSCategoryDao _guestOSCategoryDao; @Inject UsageEventDao _usageEventDao; @Inject protected VMSnapshotDao _vmSnapshotDao; - @Inject + @Inject protected VMSnapshotManager _vmSnapshotMgr; - + + @Inject + List plannerSelectors; + protected ScheduledExecutorService _executor = null; protected int _expungeInterval; protected int _expungeDelay; @@ -429,14 +396,18 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use protected String _name; protected String _instance; protected String _zone; + protected boolean _instanceNameFlag; @Inject ConfigurationDao _configDao; private int _createprivatetemplatefromvolumewait; private int _createprivatetemplatefromsnapshotwait; + private final int MAX_VM_NAME_LEN = 80; @Inject protected OrchestrationService _orchSrvc; + @Inject VolumeManager volumeMgr; + @Override public UserVmVO getVirtualMachine(long vmId) { return _vmDao.findById(vmId); @@ -737,592 +708,21 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } } - private int getMaxDataVolumesSupported(UserVmVO vm) { - Long hostId = vm.getHostId(); - if (hostId == null) { - hostId = vm.getLastHostId(); - } - HostVO host = _hostDao.findById(hostId); - Integer maxDataVolumesSupported = null; - if (host != null) { - _hostDao.loadDetails(host); - maxDataVolumesSupported = _hypervisorCapabilitiesDao - .getMaxDataVolumesLimit(host.getHypervisorType(), - host.getDetail("product_version")); - } - if (maxDataVolumesSupported == null) { - maxDataVolumesSupported = 6; // 6 data disks by default if nothing - // is specified in - // 'hypervisor_capabilities' table - } - return maxDataVolumesSupported.intValue(); - } - @Override - @ActionEvent(eventType = EventTypes.EVENT_VOLUME_ATTACH, eventDescription = "attaching volume", async = true) - public Volume attachVolumeToVM(AttachVolumeCmd command) { - Long vmId = command.getVirtualMachineId(); - Long volumeId = command.getId(); - Long deviceId = command.getDeviceId(); - Account caller = UserContext.current().getCaller(); - - // Check that the volume ID is valid - VolumeVO volume = _volsDao.findById(volumeId); - // Check that the volume is a data volume - if (volume == null || volume.getVolumeType() != Volume.Type.DATADISK) { - throw new InvalidParameterValueException( - "Please specify a valid data volume."); - } - - // Check that the volume is not currently attached to any VM - if (volume.getInstanceId() != null) { - throw new InvalidParameterValueException( - "Please specify a volume that is not attached to any VM."); - } - - // Check that the volume is not destroyed - if (volume.getState() == Volume.State.Destroy) { - throw new InvalidParameterValueException( - "Please specify a volume that is not destroyed."); - } - - // Check that the virtual machine ID is valid and it's a user vm - UserVmVO vm = _vmDao.findById(vmId); - if (vm == null || vm.getType() != VirtualMachine.Type.User) { - throw new InvalidParameterValueException( - "Please specify a valid User VM."); - } - - // Check that the VM is in the correct state - if (vm.getState() != State.Running && vm.getState() != State.Stopped) { - throw new InvalidParameterValueException( - "Please specify a VM that is either running or stopped."); - } - - // Check that the device ID is valid - if (deviceId != null) { - if (deviceId.longValue() == 0) { - throw new InvalidParameterValueException( - "deviceId can't be 0, which is used by Root device"); - } - } - - // Check that the number of data volumes attached to VM is less than - // that supported by hypervisor - List existingDataVolumes = _volsDao.findByInstanceAndType( - vmId, Volume.Type.DATADISK); - int maxDataVolumesSupported = getMaxDataVolumesSupported(vm); - if (existingDataVolumes.size() >= maxDataVolumesSupported) { - throw new InvalidParameterValueException( - "The specified VM already has the maximum number of data disks (" - + maxDataVolumesSupported - + "). Please specify another VM."); - } - - // Check that the VM and the volume are in the same zone - if (vm.getDataCenterId() != volume.getDataCenterId()) { - throw new InvalidParameterValueException( - "Please specify a VM that is in the same zone as the volume."); - } - - // If local storage is disabled then attaching a volume with local disk - // offering not allowed - DataCenterVO dataCenter = _dcDao.findById(volume.getDataCenterId()); - if (!dataCenter.isLocalStorageEnabled()) { - DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume - .getDiskOfferingId()); - if (diskOffering.getUseLocalStorage()) { - throw new InvalidParameterValueException( - "Zone is not configured to use local storage but volume's disk offering " - + diskOffering.getName() + " uses it"); - } - } - - // permission check - _accountMgr.checkAccess(caller, null, true, volume, vm); - - //check if vm has snapshot, if true: can't attache volume - boolean attach = true; - checkVMSnapshots(vm, volumeId, attach); - - // Check if volume is stored on secondary Storage. - //Check if volume is stored on secondary Storage. - boolean isVolumeOnSec = false; - VolumeHostVO volHostVO = _volumeHostDao.findByVolumeId(volume.getId()); - if (volHostVO != null) { - isVolumeOnSec = true; - if (!(volHostVO.getDownloadState() == Status.DOWNLOADED)) { - throw new InvalidParameterValueException( - "Volume is not uploaded yet. Please try this operation once the volume is uploaded"); - } - } - - if (!(Volume.State.Allocated.equals(volume.getState()) - || Volume.State.Ready.equals(volume.getState()) || Volume.State.UploadOp - .equals(volume.getState()))) { - throw new InvalidParameterValueException( - "Volume state must be in Allocated, Ready or in Uploaded state"); - } - - VolumeVO rootVolumeOfVm = null; - List rootVolumesOfVm = _volsDao.findByInstanceAndType(vmId, - Volume.Type.ROOT); - if (rootVolumesOfVm.size() != 1) { - throw new CloudRuntimeException( - "The VM " - + vm.getHostName() - + " has more than one ROOT volume and is in an invalid state."); - } else { - rootVolumeOfVm = rootVolumesOfVm.get(0); - } - - HypervisorType rootDiskHyperType = vm.getHypervisorType(); - - HypervisorType dataDiskHyperType = _volsDao.getHypervisorType(volume - .getId()); - if (dataDiskHyperType != HypervisorType.None - && rootDiskHyperType != dataDiskHyperType) { - throw new InvalidParameterValueException( - "Can't attach a volume created by: " + dataDiskHyperType - + " to a " + rootDiskHyperType + " vm"); - } - - // allocate deviceId - List vols = _volsDao.findByInstance(vmId); - if (deviceId != null) { - if (deviceId.longValue() > 15 || deviceId.longValue() == 0 - || deviceId.longValue() == 3) { - throw new RuntimeException("deviceId should be 1,2,4-15"); - } - for (VolumeVO vol : vols) { - if (vol.getDeviceId().equals(deviceId)) { - throw new RuntimeException("deviceId " + deviceId - + " is used by VM " + vm.getHostName()); - } - } - } else { - // allocate deviceId here - List devIds = new ArrayList(); - for (int i = 1; i < 15; i++) { - devIds.add(String.valueOf(i)); - } - devIds.remove("3"); - for (VolumeVO vol : vols) { - devIds.remove(vol.getDeviceId().toString().trim()); - } - deviceId = Long.parseLong(devIds.iterator().next()); - } - - boolean createVolumeOnBackend = true; - if (rootVolumeOfVm.getState() == Volume.State.Allocated) { - createVolumeOnBackend = false; - if (isVolumeOnSec) { - throw new CloudRuntimeException( - "Cant attach uploaded volume to the vm which is not created. Please start it and then retry"); - } - } - - // create volume on the backend only when vm's root volume is allocated - if (createVolumeOnBackend) { - if (volume.getState().equals(Volume.State.Allocated) - || isVolumeOnSec) { - /* Need to create the volume */ - VMTemplateVO rootDiskTmplt = _templateDao.findById(vm - .getTemplateId()); - DataCenterVO dcVO = _dcDao.findById(vm - .getDataCenterId()); - HostPodVO pod = _podDao.findById(vm.getPodIdToDeployIn()); - StoragePoolVO rootDiskPool = _storagePoolDao - .findById(rootVolumeOfVm.getPoolId()); - ServiceOfferingVO svo = _serviceOfferingDao.findById(vm - .getServiceOfferingId()); - DiskOfferingVO diskVO = _diskOfferingDao.findById(volume - .getDiskOfferingId()); - Long clusterId = (rootDiskPool == null ? null : rootDiskPool - .getClusterId()); - - if (!isVolumeOnSec) { - volume = _storageMgr.createVolume(volume, vm, - rootDiskTmplt, dcVO, pod, clusterId, svo, diskVO, - new ArrayList(), volume.getSize(), - rootDiskHyperType); - } else { - try { - // Format of data disk should be the same as root disk - if (!volHostVO - .getFormat() - .getFileExtension() - .equals(_storageMgr - .getSupportedImageFormatForCluster(rootDiskPool - .getClusterId()))) { - throw new InvalidParameterValueException( - "Failed to attach volume to VM since volumes format " - + volHostVO.getFormat() - .getFileExtension() - + " is not compatible with the vm hypervisor type"); - } - - // Check that there is some shared storage. - StoragePoolVO vmRootVolumePool = _storagePoolDao - .findById(rootVolumeOfVm.getPoolId()); - List sharedVMPools = _storagePoolDao - .findPoolsByTags( - vmRootVolumePool.getDataCenterId(), - vmRootVolumePool.getPodId(), - vmRootVolumePool.getClusterId(), null, - true); - if (sharedVMPools.size() == 0) { - throw new CloudRuntimeException( - "Cannot attach volume since there are no shared storage pools in the VM's cluster to copy the uploaded volume to."); - } - - volume = _storageMgr.copyVolumeFromSecToPrimary(volume, - vm, rootDiskTmplt, dcVO, pod, - rootDiskPool.getClusterId(), svo, diskVO, - new ArrayList(), - volume.getSize(), rootDiskHyperType); - } catch (NoTransitionException e) { - throw new CloudRuntimeException( - "Unable to transition the volume ", e); - } - } - - if (volume == null) { - throw new CloudRuntimeException( - "Failed to create volume when attaching it to VM: " - + vm.getHostName()); - } - } - - StoragePoolVO vmRootVolumePool = _storagePoolDao - .findById(rootVolumeOfVm.getPoolId()); - DiskOfferingVO volumeDiskOffering = _diskOfferingDao - .findById(volume.getDiskOfferingId()); - String[] volumeTags = volumeDiskOffering.getTagsArray(); - - boolean isVolumeOnSharedPool = !volumeDiskOffering - .getUseLocalStorage(); - StoragePoolVO sourcePool = _storagePoolDao.findById(volume - .getPoolId()); - List matchingVMPools = _storagePoolDao - .findPoolsByTags(vmRootVolumePool.getDataCenterId(), - vmRootVolumePool.getPodId(), - vmRootVolumePool.getClusterId(), volumeTags, - isVolumeOnSharedPool); - boolean moveVolumeNeeded = true; - if (matchingVMPools.size() == 0) { - String poolType; - if (vmRootVolumePool.getClusterId() != null) { - poolType = "cluster"; - } else if (vmRootVolumePool.getPodId() != null) { - poolType = "pod"; - } else { - poolType = "zone"; - } - throw new CloudRuntimeException( - "There are no storage pools in the VM's " + poolType - + " with all of the volume's tags (" - + volumeDiskOffering.getTags() + ")."); - } else { - long sourcePoolId = sourcePool.getId(); - Long sourcePoolDcId = sourcePool.getDataCenterId(); - Long sourcePoolPodId = sourcePool.getPodId(); - Long sourcePoolClusterId = sourcePool.getClusterId(); - for (StoragePoolVO vmPool : matchingVMPools) { - long vmPoolId = vmPool.getId(); - Long vmPoolDcId = vmPool.getDataCenterId(); - Long vmPoolPodId = vmPool.getPodId(); - Long vmPoolClusterId = vmPool.getClusterId(); - - // Moving a volume is not required if storage pools belongs - // to same cluster in case of shared volume or - // identical storage pool in case of local - if (sourcePoolDcId == vmPoolDcId - && sourcePoolPodId == vmPoolPodId - && sourcePoolClusterId == vmPoolClusterId - && (isVolumeOnSharedPool || sourcePoolId == vmPoolId)) { - moveVolumeNeeded = false; - break; - } - } - } - - if (moveVolumeNeeded) { - if (isVolumeOnSharedPool) { - // Move the volume to a storage pool in the VM's zone, pod, - // or cluster - try { - volume = _storageMgr.moveVolume(volume, - vmRootVolumePool.getDataCenterId(), - vmRootVolumePool.getPodId(), - vmRootVolumePool.getClusterId(), - dataDiskHyperType); - } catch (ConcurrentOperationException e) { - throw new CloudRuntimeException(e.toString()); - } - } else { - throw new CloudRuntimeException( - "Failed to attach local data volume " - + volume.getName() - + " to VM " - + vm.getDisplayName() - + " as migration of local data volume is not allowed"); - } - } - } - - AsyncJobExecutor asyncExecutor = BaseAsyncJobExecutor - .getCurrentExecutor(); - if (asyncExecutor != null) { - AsyncJobVO job = asyncExecutor.getJob(); - - if (s_logger.isInfoEnabled()) { - s_logger.info("Trying to attaching volume " + volumeId - + " to vm instance:" + vm.getId() - + ", update async job-" + job.getId() - + " progress status"); - } - - _asyncMgr.updateAsyncJobAttachment(job.getId(), "volume", volumeId); - _asyncMgr.updateAsyncJobStatus(job.getId(), - BaseCmd.PROGRESS_INSTANCE_CREATED, volumeId); - } - - String errorMsg = "Failed to attach volume: " + volume.getName() - + " to VM: " + vm.getHostName(); - boolean sendCommand = (vm.getState() == State.Running); - AttachVolumeAnswer answer = null; - Long hostId = vm.getHostId(); - if (hostId == null) { - hostId = vm.getLastHostId(); - HostVO host = _hostDao.findById(hostId); - if (host != null - && host.getHypervisorType() == HypervisorType.VMware) { - sendCommand = true; - } - } - - if (sendCommand) { - StoragePoolVO volumePool = _storagePoolDao.findById(volume - .getPoolId()); - AttachVolumeCommand cmd = new AttachVolumeCommand(true, - vm.getInstanceName(), volume.getPoolType(), - volume.getFolder(), volume.getPath(), volume.getName(), - deviceId, volume.getChainInfo()); - cmd.setPoolUuid(volumePool.getUuid()); - - try { - answer = (AttachVolumeAnswer) _agentMgr.send(hostId, cmd); - } catch (Exception e) { - throw new CloudRuntimeException(errorMsg + " due to: " - + e.getMessage()); - } - } - - if (!sendCommand || (answer != null && answer.getResult())) { - // Mark the volume as attached - if (sendCommand) { - _volsDao.attachVolume(volume.getId(), vmId, - answer.getDeviceId()); - } else { - _volsDao.attachVolume(volume.getId(), vmId, deviceId); - } - return _volsDao.findById(volumeId); - } else { - if (answer != null) { - String details = answer.getDetails(); - if (details != null && !details.isEmpty()) { - errorMsg += "; " + details; - } - } - throw new CloudRuntimeException(errorMsg); - } - } private void checkVMSnapshots(UserVmVO vm, Long volumeId, boolean attach) { // Check that if vm has any VM snapshot - Long vmId = vm.getId(); + /*Long vmId = vm.getId(); List listSnapshot = _vmSnapshotDao.listByInstanceId(vmId, VMSnapshot.State.Ready, VMSnapshot.State.Creating, VMSnapshot.State.Reverting, VMSnapshot.State.Expunging); if (listSnapshot != null && listSnapshot.size() != 0) { throw new InvalidParameterValueException( "The VM has VM snapshots, do not allowed to attach volume. Please delete the VM snapshots first."); - } + }*/ } - @Override - @ActionEvent(eventType = EventTypes.EVENT_VOLUME_DETACH, eventDescription = "event_detaching_volume1", async = true) - public Volume detachVolumeFromVM(DetachVolumeCmd cmmd) { - Account caller = UserContext.current().getCaller(); - if ((cmmd.getId() == null && cmmd.getDeviceId() == null && cmmd - .getVirtualMachineId() == null) - || (cmmd.getId() != null && (cmmd.getDeviceId() != null || cmmd - .getVirtualMachineId() != null)) - || (cmmd.getId() == null && (cmmd.getDeviceId() == null || cmmd - .getVirtualMachineId() == null))) { - throw new InvalidParameterValueException( - "Please provide either a volume id, or a tuple(device id, instance id)"); - } - Long volumeId = cmmd.getId(); - VolumeVO volume = null; - - if (volumeId != null) { - volume = _volsDao.findById(volumeId); - } else { - volume = _volsDao.findByInstanceAndDeviceId( - cmmd.getVirtualMachineId(), cmmd.getDeviceId()).get(0); - } - - Long vmId = null; - - if (cmmd.getVirtualMachineId() == null) { - vmId = volume.getInstanceId(); - } else { - vmId = cmmd.getVirtualMachineId(); - } - - // Check that the volume ID is valid - if (volume == null) { - throw new InvalidParameterValueException( - "Unable to find volume with ID: " + volumeId); - } - - // Permissions check - _accountMgr.checkAccess(caller, null, true, volume); - - // Check that the volume is a data volume - if (volume.getVolumeType() != Volume.Type.DATADISK) { - throw new InvalidParameterValueException( - "Please specify a data volume."); - } - - // Check that the volume is currently attached to a VM - if (vmId == null) { - throw new InvalidParameterValueException( - "The specified volume is not attached to a VM."); - } - - // Check that the VM is in the correct state - UserVmVO vm = _vmDao.findById(vmId); - if (vm.getState() != State.Running && vm.getState() != State.Stopped - && vm.getState() != State.Destroyed) { - throw new InvalidParameterValueException( - "Please specify a VM that is either running or stopped."); - } - - // Check that if the volume has snapshot - boolean attach = false; - checkVMSnapshots(vm, volumeId, attach); - AsyncJobExecutor asyncExecutor = BaseAsyncJobExecutor.getCurrentExecutor(); - if (asyncExecutor != null) { - AsyncJobVO job = asyncExecutor.getJob(); - - if (s_logger.isInfoEnabled()) { - s_logger.info("Trying to attaching volume " + volumeId - + "to vm instance:" + vm.getId() - + ", update async job-" + job.getId() - + " progress status"); - } - - _asyncMgr.updateAsyncJobAttachment(job.getId(), "volume", volumeId); - _asyncMgr.updateAsyncJobStatus(job.getId(), - BaseCmd.PROGRESS_INSTANCE_CREATED, volumeId); - } - - String errorMsg = "Failed to detach volume: " + volume.getName() - + " from VM: " + vm.getHostName(); - boolean sendCommand = (vm.getState() == State.Running); - Answer answer = null; - - if (sendCommand) { - AttachVolumeCommand cmd = new AttachVolumeCommand(false, - vm.getInstanceName(), volume.getPoolType(), - volume.getFolder(), volume.getPath(), volume.getName(), - cmmd.getDeviceId() != null ? cmmd.getDeviceId() : volume - .getDeviceId(), volume.getChainInfo()); - - StoragePoolVO volumePool = _storagePoolDao.findById(volume - .getPoolId()); - cmd.setPoolUuid(volumePool.getUuid()); - - try { - answer = _agentMgr.send(vm.getHostId(), cmd); - } catch (Exception e) { - throw new CloudRuntimeException(errorMsg + " due to: " - + e.getMessage()); - } - } - - if (!sendCommand || (answer != null && answer.getResult())) { - // Mark the volume as detached - _volsDao.detachVolume(volume.getId()); - if (answer != null && answer instanceof AttachVolumeAnswer) { - volume.setChainInfo(((AttachVolumeAnswer) answer) - .getChainInfo()); - _volsDao.update(volume.getId(), volume); - } - - return _volsDao.findById(volumeId); - } else { - - if (answer != null) { - String details = answer.getDetails(); - if (details != null && !details.isEmpty()) { - errorMsg += "; " + details; - } - } - - throw new CloudRuntimeException(errorMsg); - } - } - - @Override - public boolean attachISOToVM(long vmId, long isoId, boolean attach) { - UserVmVO vm = _vmDao.findById(vmId); - - if (vm == null) { - return false; - } else if (vm.getState() != State.Running) { - return true; - } - String isoPath; - VMTemplateVO tmplt = _templateDao.findById(isoId); - if (tmplt == null) { - s_logger.warn("ISO: " + isoId + " does not exist"); - return false; - } - // Get the path of the ISO - Pair isoPathPair = null; - if (tmplt.getTemplateType() == TemplateType.PERHOST) { - isoPath = tmplt.getName(); - } else { - isoPathPair = _storageMgr.getAbsoluteIsoPath(isoId, - vm.getDataCenterId()); - if (isoPathPair == null) { - s_logger.warn("Couldn't get absolute iso path"); - return false; - } else { - isoPath = isoPathPair.first(); - } - } - - String vmName = vm.getInstanceName(); - - HostVO host = _hostDao.findById(vm.getHostId()); - if (host == null) { - s_logger.warn("Host: " + vm.getHostId() + " does not exist"); - return false; - } - AttachIsoCommand cmd = new AttachIsoCommand(vmName, isoPath, attach); - if (isoPathPair != null) { - cmd.setStoreUrl(isoPathPair.second()); - } - Answer a = _agentMgr.easySend(vm.getHostId(), cmd); - - return (a != null && a.getResult()); - } private UserVm rebootVirtualMachine(long userId, long vmId) throws InsufficientCapacityException, ResourceUnavailableException { @@ -1361,14 +761,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use throw new InvalidParameterValueException( "unable to find a virtual machine with id " + vmId); } - + _accountMgr.checkAccess(caller, null, true, vmInstance); // Check that the specified service offering ID is valid _itMgr.checkIfCanUpgrade(vmInstance, svcOffId); // remove diskAndMemory VM snapshots - List vmSnapshots = _vmSnapshotDao.findByVm(vmId); + /* List vmSnapshots = _vmSnapshotDao.findByVm(vmId); for (VMSnapshotVO vmSnapshotVO : vmSnapshots) { if(vmSnapshotVO.getType() == VMSnapshot.Type.DiskAndMemory){ if(!_vmSnapshotMgr.deleteAllVMSnapshots(vmId, VMSnapshot.Type.DiskAndMemory)){ @@ -1376,10 +776,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use s_logger.debug(errMsg); throw new CloudRuntimeException(errMsg); } - + } - } - + }*/ + _itMgr.upgradeVmDb(vmId, svcOffId); return _vmDao.findById(vmInstance.getId()); @@ -1402,7 +802,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } NicProfile profile = new NicProfile(null, null); if(ipAddress != null) { - profile = new NicProfile(ipAddress, null); + profile = new NicProfile(ipAddress, null); } // Perform permission check on VM @@ -1422,7 +822,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use throw new PermissionDeniedException("Unable to modify a vm using network with id " + network.getId() + ", permission denied"); } } - + //ensure network belongs in zone if (network.getDataCenterId() != vmInstance.getDataCenterId()) { throw new CloudRuntimeException(vmInstance + " is in zone:" + vmInstance.getDataCenterId() + " but " + network + " is in zone:" + network.getDataCenterId()); @@ -1438,7 +838,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use throw new CloudRuntimeException(network + " already has a vm with host name: '" + vmInstance.getHostName()); } } - + NicProfile guestNic = null; try { @@ -1499,14 +899,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use throw new PermissionDeniedException("Unable to modify a vm using network with id " + network.getId() + ", permission denied"); } } - + boolean nicremoved = false; try { nicremoved = _itMgr.removeNicFromVm(vmInstance, nic); } catch (ResourceUnavailableException e) { throw new CloudRuntimeException("Unable to remove " + network + " from " + vmInstance +": " + e); - + } catch (ConcurrentOperationException e) { throw new CloudRuntimeException("Concurrent operations on removing " + network + " from " + vmInstance + ": " + e); } @@ -1514,19 +914,19 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use if (!nicremoved) { throw new CloudRuntimeException("Unable to remove " + network + " from " + vmInstance ); } - + s_logger.debug("Successful removal of " + network + " from " + vmInstance); return _vmDao.findById(vmInstance.getId()); - + } - + @Override public UserVm updateDefaultNicForVirtualMachine(UpdateDefaultNicForVMCmd cmd) throws InvalidParameterValueException, CloudRuntimeException { Long vmId = cmd.getVmId(); Long nicId = cmd.getNicId(); Account caller = UserContext.current().getCaller(); - + UserVmVO vmInstance = _vmDao.findById(vmId); if (vmInstance == null){ throw new InvalidParameterValueException("unable to find a virtual machine with id " + vmId); @@ -1539,7 +939,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use if (network == null){ throw new InvalidParameterValueException("unable to find a network with id " + nic.getNetworkId()); } - + // Perform permission check on VM _accountMgr.checkAccess(caller, null, true, vmInstance); @@ -1551,7 +951,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use // no need to check permissions for network, we'll enumerate the ones they already have access to Network existingdefaultnet = _networkModel.getDefaultNetworkForVm(vmId); - + //check to see if nic is attached to VM if (nic.getInstanceId() != vmId) { throw new InvalidParameterValueException(nic + " is not a nic on " + vmInstance); @@ -1565,7 +965,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use if ((vmInstance.getState() != State.Running) && (vmInstance.getState() != State.Stopped)) { throw new CloudRuntimeException("refusing to set default " + vmInstance + " is not Running or Stopped"); } - + NicProfile existing = null; List nicProfiles = _networkMgr.getNicProfiles(vmInstance); for (NicProfile nicProfile : nicProfiles) { @@ -1594,26 +994,26 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use Network newdefault = null; newdefault = _networkModel.getDefaultNetworkForVm(vmId); - - if (newdefault == null){ - nic.setDefaultNic(false); - nic.setDeviceId(chosenID); - existingVO.setDefaultNic(true); - existingVO.setDeviceId(existingID); - nic = _nicDao.persist(nic); - existingVO = _nicDao.persist(existingVO); - - newdefault = _networkModel.getDefaultNetworkForVm(vmId); - if (newdefault.getId() == existingdefaultnet.getId()) { - throw new CloudRuntimeException("Setting a default nic failed, and we had no default nic, but we were able to set it back to the original"); - } - throw new CloudRuntimeException("Failed to change default nic to " + nic + " and now we have no default"); + if (newdefault == null){ + nic.setDefaultNic(false); + nic.setDeviceId(chosenID); + existingVO.setDefaultNic(true); + existingVO.setDeviceId(existingID); + + nic = _nicDao.persist(nic); + existingVO = _nicDao.persist(existingVO); + + newdefault = _networkModel.getDefaultNetworkForVm(vmId); + if (newdefault.getId() == existingdefaultnet.getId()) { + throw new CloudRuntimeException("Setting a default nic failed, and we had no default nic, but we were able to set it back to the original"); + } + throw new CloudRuntimeException("Failed to change default nic to " + nic + " and now we have no default"); } else if (newdefault.getId() == nic.getNetworkId()) { s_logger.debug("successfully set default network to " + network + " for " + vmInstance); return _vmDao.findById(vmInstance.getId()); } - + throw new CloudRuntimeException("something strange happened, new default network(" + newdefault.getId() + ") is not null, and is not equal to the network(" + nic.getNetworkId() + ") of the chosen nic"); } @@ -1809,6 +1209,15 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use VirtualMachine.State.getStateMachine().registerListener( new UserVmStateListener(_usageEventDao, _networkDao, _nicDao)); + value = _configDao.getValue(Config.SetVmInternalNameUsingDisplayName.key()); + if(value == null) { + _instanceNameFlag = false; + } + else + { + _instanceNameFlag = Boolean.parseBoolean(value); + } + s_logger.info("User VM Manager is configured."); return true; @@ -1962,474 +1371,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } } - @Override - @ActionEvent(eventType = EventTypes.EVENT_TEMPLATE_CREATE, eventDescription = "creating template", create = true) - public VMTemplateVO createPrivateTemplateRecord(CreateTemplateCmd cmd, - Account templateOwner) throws ResourceAllocationException { - Long userId = UserContext.current().getCallerUserId(); - - Account caller = UserContext.current().getCaller(); - boolean isAdmin = (isAdmin(caller.getType())); - - _accountMgr.checkAccess(caller, null, true, templateOwner); - - String name = cmd.getTemplateName(); - if ((name == null) || (name.length() > 32)) { - throw new InvalidParameterValueException( - "Template name cannot be null and should be less than 32 characters"); - } - - if (cmd.getTemplateTag() != null) { - if (!_accountService.isRootAdmin(caller.getType())) { - throw new PermissionDeniedException( - "Parameter templatetag can only be specified by a Root Admin, permission denied"); - } - } - - // do some parameter defaulting - Integer bits = cmd.getBits(); - Boolean requiresHvm = cmd.getRequiresHvm(); - Boolean passwordEnabled = cmd.isPasswordEnabled(); - Boolean isPublic = cmd.isPublic(); - Boolean featured = cmd.isFeatured(); - int bitsValue = ((bits == null) ? 64 : bits.intValue()); - boolean requiresHvmValue = ((requiresHvm == null) ? true : requiresHvm - .booleanValue()); - boolean passwordEnabledValue = ((passwordEnabled == null) ? false - : passwordEnabled.booleanValue()); - if (isPublic == null) { - isPublic = Boolean.FALSE; - } - boolean allowPublicUserTemplates = Boolean.parseBoolean(_configDao - .getValue("allow.public.user.templates")); - if (!isAdmin && !allowPublicUserTemplates && isPublic) { - throw new PermissionDeniedException("Failed to create template " - + name + ", only private templates can be created."); - } - - Long volumeId = cmd.getVolumeId(); - Long snapshotId = cmd.getSnapshotId(); - if ((volumeId == null) && (snapshotId == null)) { - throw new InvalidParameterValueException( - "Failed to create private template record, neither volume ID nor snapshot ID were specified."); - } - if ((volumeId != null) && (snapshotId != null)) { - throw new InvalidParameterValueException( - "Failed to create private template record, please specify only one of volume ID (" - + volumeId - + ") and snapshot ID (" - + snapshotId - + ")"); - } - - HypervisorType hyperType; - VolumeVO volume = null; - VMTemplateVO privateTemplate = null; - if (volumeId != null) { // create template from volume - volume = _volsDao.findById(volumeId); - if (volume == null) { - throw new InvalidParameterValueException( - "Failed to create private template record, unable to find volume " - + volumeId); - } - // check permissions - _accountMgr.checkAccess(caller, null, true, volume); - - // If private template is created from Volume, check that the volume - // will not be active when the private template is - // created - if (!_storageMgr.volumeInactive(volume)) { - String msg = "Unable to create private template for volume: " - + volume.getName() - + "; volume is attached to a non-stopped VM, please stop the VM first"; - if (s_logger.isInfoEnabled()) { - s_logger.info(msg); - } - throw new CloudRuntimeException(msg); - } - hyperType = _volsDao.getHypervisorType(volumeId); - } else { // create template from snapshot - SnapshotVO snapshot = _snapshotDao.findById(snapshotId); - if (snapshot == null) { - throw new InvalidParameterValueException( - "Failed to create private template record, unable to find snapshot " - + snapshotId); - } - - volume = _volsDao.findById(snapshot.getVolumeId()); - - // check permissions - _accountMgr.checkAccess(caller, null, true, snapshot); - - if (snapshot.getState() != Snapshot.State.BackedUp) { - throw new InvalidParameterValueException("Snapshot id=" + snapshotId + " is not in " + Snapshot.State.BackedUp + " state yet and can't be used for template creation"); - } - - /* - * // bug #11428. Operation not supported if vmware and snapshots - * parent volume = ROOT if(snapshot.getHypervisorType() == - * HypervisorType.VMware && snapshotVolume.getVolumeType() == - * Type.DATADISK){ throw new UnsupportedServiceException( - * "operation not supported, snapshot with id " + snapshotId + - * " is created from Data Disk"); } - */ - - hyperType = snapshot.getHypervisorType(); - } - - _resourceLimitMgr.checkResourceLimit(templateOwner, - ResourceType.template); - - if (!isAdmin || featured == null) { - featured = Boolean.FALSE; - } - Long guestOSId = cmd.getOsTypeId(); - GuestOSVO guestOS = _guestOSDao.findById(guestOSId); - if (guestOS == null) { - throw new InvalidParameterValueException("GuestOS with ID: " - + guestOSId + " does not exist."); - } - - String uniqueName = Long.valueOf((userId == null) ? 1 : userId) - .toString() - + UUID.nameUUIDFromBytes(name.getBytes()).toString(); - Long nextTemplateId = _templateDao.getNextInSequence(Long.class, "id"); - String description = cmd.getDisplayText(); - boolean isExtractable = false; - Long sourceTemplateId = null; - if (volume != null) { - VMTemplateVO template = ApiDBUtils.findTemplateById(volume - .getTemplateId()); - isExtractable = template != null - && template.isExtractable() - && template.getTemplateType() != Storage.TemplateType.SYSTEM; - if (template != null) { - sourceTemplateId = template.getId(); - } else if (volume.getVolumeType() == Type.ROOT) { // vm created out - // of blank - // template - UserVm userVm = ApiDBUtils.findUserVmById(volume - .getInstanceId()); - sourceTemplateId = userVm.getIsoId(); - } - } - String templateTag = cmd.getTemplateTag(); - if (templateTag != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Adding template tag: " + templateTag); - } - } - privateTemplate = new VMTemplateVO(nextTemplateId, uniqueName, name, - ImageFormat.RAW, isPublic, featured, isExtractable, - TemplateType.USER, null, null, requiresHvmValue, bitsValue, - templateOwner.getId(), null, description, passwordEnabledValue, - guestOS.getId(), true, hyperType, templateTag, cmd.getDetails()); - if (sourceTemplateId != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("This template is getting created from other template, setting source template Id to: " - + sourceTemplateId); - } - } - privateTemplate.setSourceTemplateId(sourceTemplateId); - - VMTemplateVO template = _templateDao.persist(privateTemplate); - // Increment the number of templates - if (template != null) { - if (cmd.getDetails() != null) { - _templateDetailsDao.persist(template.getId(), cmd.getDetails()); - } - - _resourceLimitMgr.incrementResourceCount(templateOwner.getId(), - ResourceType.template); - } - - if (template != null) { - return template; - } else { - throw new CloudRuntimeException("Failed to create a template"); - } - - } - - @Override - @DB - @ActionEvent(eventType = EventTypes.EVENT_TEMPLATE_CREATE, eventDescription = "creating template", async = true) - public VMTemplateVO createPrivateTemplate(CreateTemplateCmd command) - throws CloudRuntimeException { - Long userId = UserContext.current().getCallerUserId(); - if (userId == null) { - userId = User.UID_SYSTEM; - } - long templateId = command.getEntityId(); - Long volumeId = command.getVolumeId(); - Long snapshotId = command.getSnapshotId(); - SnapshotCommand cmd = null; - VMTemplateVO privateTemplate = null; - - String uniqueName = getRandomPrivateTemplateName(); - - StoragePoolVO pool = null; - HostVO secondaryStorageHost = null; - Long zoneId = null; - Long accountId = null; - SnapshotVO snapshot = null; - String secondaryStorageURL = null; - try { - if (snapshotId != null) { // create template from snapshot - snapshot = _snapshotDao.findById(snapshotId); - if (snapshot == null) { - throw new CloudRuntimeException( - "Unable to find Snapshot for Id " + snapshotId); - } - zoneId = snapshot.getDataCenterId(); - secondaryStorageHost = _snapshotMgr - .getSecondaryStorageHost(snapshot); - secondaryStorageURL = _snapshotMgr - .getSecondaryStorageURL(snapshot); - String name = command.getTemplateName(); - String backupSnapshotUUID = snapshot.getBackupSnapshotId(); - if (backupSnapshotUUID == null) { - throw new CloudRuntimeException( - "Unable to create private template from snapshot " - + snapshotId - + " due to there is no backupSnapshotUUID for this snapshot"); - } - - Long dcId = snapshot.getDataCenterId(); - accountId = snapshot.getAccountId(); - volumeId = snapshot.getVolumeId(); - - String origTemplateInstallPath = null; - List pools = _storageMgr - .ListByDataCenterHypervisor(zoneId, - snapshot.getHypervisorType()); - if (pools == null || pools.size() == 0) { - throw new CloudRuntimeException( - "Unable to find storage pools in zone " + zoneId); - } - pool = pools.get(0); - if (snapshot.getVersion() != null - && snapshot.getVersion().equalsIgnoreCase("2.1")) { - VolumeVO volume = _volsDao - .findByIdIncludingRemoved(volumeId); - if (volume == null) { - throw new CloudRuntimeException( - "failed to upgrade snapshot " - + snapshotId - + " due to unable to find orignal volume:" - + volumeId + ", try it later "); - } - if (volume.getTemplateId() == null) { - _snapshotDao.updateSnapshotVersion(volumeId, "2.1", - "2.2"); - } else { - VMTemplateVO template = _templateDao - .findByIdIncludingRemoved(volume - .getTemplateId()); - if (template == null) { - throw new CloudRuntimeException( - "failed to upgrade snapshot " - + snapshotId - + " due to unalbe to find orignal template :" - + volume.getTemplateId() - + ", try it later "); - } - Long origTemplateId = template.getId(); - Long origTmpltAccountId = template.getAccountId(); - if (!_volsDao.lockInLockTable(volumeId.toString(), 10)) { - throw new CloudRuntimeException( - "failed to upgrade snapshot " + snapshotId - + " due to volume:" + volumeId - + " is being used, try it later "); - } - cmd = new UpgradeSnapshotCommand(null, - secondaryStorageURL, dcId, accountId, volumeId, - origTemplateId, origTmpltAccountId, null, - snapshot.getBackupSnapshotId(), - snapshot.getName(), "2.1"); - if (!_volsDao.lockInLockTable(volumeId.toString(), 10)) { - throw new CloudRuntimeException( - "Creating template failed due to volume:" - + volumeId - + " is being used, try it later "); - } - Answer answer = null; - try { - answer = _storageMgr.sendToPool(pool, cmd); - cmd = null; - } catch (StorageUnavailableException e) { - } finally { - _volsDao.unlockFromLockTable(volumeId.toString()); - } - if ((answer != null) && answer.getResult()) { - _snapshotDao.updateSnapshotVersion(volumeId, "2.1", - "2.2"); - } else { - throw new CloudRuntimeException( - "Unable to upgrade snapshot"); - } - } - } - if (snapshot.getSwiftId() != null && snapshot.getSwiftId() != 0) { - _snapshotMgr.downloadSnapshotsFromSwift(snapshot); - } - cmd = new CreatePrivateTemplateFromSnapshotCommand(pool, secondaryStorageURL, dcId, accountId, snapshot.getVolumeId(), backupSnapshotUUID, snapshot.getName(), - origTemplateInstallPath, templateId, name, _createprivatetemplatefromsnapshotwait); - } else if (volumeId != null) { - VolumeVO volume = _volsDao.findById(volumeId); - if (volume == null) { - throw new CloudRuntimeException( - "Unable to find volume for Id " + volumeId); - } - accountId = volume.getAccountId(); - - if (volume.getPoolId() == null) { - _templateDao.remove(templateId); - throw new CloudRuntimeException("Volume " + volumeId - + " is empty, can't create template on it"); - } - String vmName = _storageMgr.getVmNameOnVolume(volume); - zoneId = volume.getDataCenterId(); - secondaryStorageHost = _storageMgr - .getSecondaryStorageHost(zoneId); - if (secondaryStorageHost == null) { - throw new CloudRuntimeException( - "Can not find the secondary storage for zoneId " - + zoneId); - } - secondaryStorageURL = secondaryStorageHost.getStorageUrl(); - - pool = _storagePoolDao.findById(volume.getPoolId()); - cmd = new CreatePrivateTemplateFromVolumeCommand(pool, secondaryStorageURL, templateId, accountId, command.getTemplateName(), uniqueName, volume.getPath(), vmName, _createprivatetemplatefromvolumewait); - - } else { - throw new CloudRuntimeException( - "Creating private Template need to specify snapshotId or volumeId"); - } - // FIXME: before sending the command, check if there's enough - // capacity - // on the storage server to create the template - - // This can be sent to a KVM host too. - CreatePrivateTemplateAnswer answer = null; - if (snapshotId != null) { - if (!_snapshotDao.lockInLockTable(snapshotId.toString(), 10)) { - throw new CloudRuntimeException( - "Creating template from snapshot failed due to snapshot:" - + snapshotId - + " is being used, try it later "); - } - } else { - if (!_volsDao.lockInLockTable(volumeId.toString(), 10)) { - throw new CloudRuntimeException( - "Creating template from volume failed due to volume:" - + volumeId - + " is being used, try it later "); - } - } - try { - answer = (CreatePrivateTemplateAnswer) _storageMgr.sendToPool( - pool, cmd); - } catch (StorageUnavailableException e) { - } finally { - if (snapshotId != null) { - _snapshotDao.unlockFromLockTable(snapshotId.toString()); - } else { - _volsDao.unlockFromLockTable(volumeId.toString()); - } - } - if ((answer != null) && answer.getResult()) { - privateTemplate = _templateDao.findById(templateId); - String answerUniqueName = answer.getUniqueName(); - if (answerUniqueName != null) { - privateTemplate.setUniqueName(answerUniqueName); - } else { - privateTemplate.setUniqueName(uniqueName); - } - ImageFormat format = answer.getImageFormat(); - if (format != null) { - privateTemplate.setFormat(format); - } else { - // This never occurs. - // Specify RAW format makes it unusable for snapshots. - privateTemplate.setFormat(ImageFormat.RAW); - } - - String checkSum = getChecksum(secondaryStorageHost.getId(), - answer.getPath()); - - Transaction txn = Transaction.currentTxn(); - - txn.start(); - - privateTemplate.setChecksum(checkSum); - _templateDao.update(templateId, privateTemplate); - - // add template zone ref for this template - _templateDao.addTemplateToZone(privateTemplate, zoneId); - VMTemplateHostVO templateHostVO = new VMTemplateHostVO( - secondaryStorageHost.getId(), templateId); - templateHostVO.setDownloadPercent(100); - templateHostVO.setDownloadState(Status.DOWNLOADED); - templateHostVO.setInstallPath(answer.getPath()); - templateHostVO.setLastUpdated(new Date()); - templateHostVO.setSize(answer.getVirtualSize()); - templateHostVO.setPhysicalSize(answer.getphysicalSize()); - _templateHostDao.persist(templateHostVO); - - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_TEMPLATE_CREATE, privateTemplate.getAccountId(), - secondaryStorageHost.getDataCenterId(), privateTemplate.getId(), - privateTemplate.getName(), null, privateTemplate.getSourceTemplateId(), - templateHostVO.getSize(), VirtualMachineTemplate.class.getName(), privateTemplate.getUuid()); - txn.commit(); - } - } finally { - if (snapshot != null && snapshot.getSwiftId() != null - && secondaryStorageURL != null && zoneId != null - && accountId != null && volumeId != null) { - _snapshotMgr.deleteSnapshotsForVolume(secondaryStorageURL, - zoneId, accountId, volumeId); - } - if (privateTemplate == null) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - // Remove the template record - _templateDao.expunge(templateId); - - // decrement resource count - if (accountId != null) { - _resourceLimitMgr.decrementResourceCount(accountId, - ResourceType.template); - } - txn.commit(); - } - } - - if (privateTemplate != null) { - return privateTemplate; - } else { - throw new CloudRuntimeException("Failed to create a template"); - } - } - - @Override - public String getChecksum(Long hostId, String templatePath) { - HostVO ssHost = _hostDao.findById(hostId); - Host.Type type = ssHost.getType(); - if (type != Host.Type.SecondaryStorage - && type != Host.Type.LocalSecondaryStorage) { - return null; - } - String secUrl = ssHost.getStorageUrl(); - Answer answer; - answer = _agentMgr.sendToSecStorage(ssHost, new ComputeChecksumCommand( - secUrl, templatePath)); - if (answer != null && answer.getResult()) { - return answer.getDetails(); - } - return null; - } - // used for vm transitioning to error state private void updateVmStateForFailedVmCreation(Long vmId, Long hostId) { @@ -2449,14 +1390,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use List volumesForThisVm = _volsDao .findUsableVolumesForInstance(vm.getId()); for (VolumeVO volume : volumesForThisVm) { - try { - if (volume.getState() != Volume.State.Destroy) { - _storageMgr.destroyVolume(volume); - } - } catch (ConcurrentOperationException e) { - s_logger.warn("Unable to delete volume:" - + volume.getId() + " for vm:" + vmId - + " whilst transitioning to error state"); + if (volume.getState() != Volume.State.Destroy) { + this.volumeMgr.destroyVolume(volume); } } String msg = "Failed to deploy Vm with Id: " + vmId + ", on Host with Id: " + hostId; @@ -2672,6 +1607,17 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use _accountMgr.checkAccess(caller, null, true, vmInstance); + // If the VM is Volatile in nature, on reboot discard the VM's root disk and create a new root disk for it: by calling restoreVM + long serviceOfferingId = vmInstance.getServiceOfferingId(); + ServiceOfferingVO offering = _serviceOfferingDao.findById(serviceOfferingId); + if(offering != null && offering.getRemoved() == null) { + if(offering.getVolatileVm()){ + return restoreVMInternal(caller, vmInstance, null); + } + } else { + throw new InvalidParameterValueException("Unable to find service offering: " + serviceOfferingId + " corresponding to the vm"); + } + return rebootVirtualMachine(UserContext.current().getCallerUserId(), vmId); } @@ -3002,7 +1948,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use if (isSecurityGroupEnabled) { if (networkIdList.size() > 1) { throw new InvalidParameterValueException("Can't create a vm with multiple networks one of" + - " which is Security Group enabled"); + " which is Security Group enabled"); } isSecurityGroupEnabledNetworkUsed = true; @@ -3010,7 +1956,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use if (!(network.getTrafficType() == TrafficType.Guest && network.getGuestType() == Network.GuestType.Shared)) { throw new InvalidParameterValueException("Can specify only Shared Guest networks when" + - " deploy vm in Advance Security Group enabled zone"); + " deploy vm in Advance Security Group enabled zone"); } // Perform account permission check @@ -3023,8 +1969,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use // if network is security group enabled, and no security group is specified, then add the default security group automatically if (isSecurityGroupEnabledNetworkUsed && !isVmWare && _networkModel.canAddDefaultSecurityGroup()) { - - //add the default securityGroup only if no security group is specified + + //add the default securityGroup only if no security group is specified if(securityGroupIdList == null || securityGroupIdList.isEmpty()){ if (securityGroupIdList == null) { securityGroupIdList = new ArrayList(); @@ -3148,7 +2094,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } } - + _networkModel.checkNetworkPermissions(owner, network); // don't allow to use system networks @@ -3167,10 +2113,18 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, null, group, userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, keyboard); } + + public void checkNameForRFCCompliance(String name) { + if (!NetUtils.verifyDomainNameLabel(name, true)) { + throw new InvalidParameterValueException("Invalid name. Vm name can contain ASCII letters 'a' through 'z', the digits '0' through '9', " + + "and the hyphen ('-'), must be between 1 and 63 characters long, and can't start or end with \"-\" and can't start with digit"); + } + } + @DB @ActionEvent(eventType = EventTypes.EVENT_VM_CREATE, eventDescription = "deploying Vm", create = true) protected UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, String hostName, String displayName, Account owner, Long diskOfferingId, Long diskSize, List networkList, List securityGroupIdList, String group, String userData, String sshKeyPair, HypervisorType hypervisor, Account caller, Map requestedIps, IpAddresses defaultIps, String keyboard) - throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, StorageUnavailableException, ResourceAllocationException { + throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, StorageUnavailableException, ResourceAllocationException { _accountMgr.checkAccess(caller, null, true, owner); @@ -3225,13 +2179,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } } - // check if we have available pools for vm deployment - long availablePools = _storagePoolDao - .countPoolsByStatus(StoragePoolStatus.Up); - if (availablePools < 1) { - throw new StorageUnavailableException( - "There are no available pools in the UP state for vm deployment", - -1); + if (template.getHypervisorType() != null && template.getHypervisorType() != HypervisorType.BareMetal) { + // check if we have available pools for vm deployment + long availablePools = _storagePoolDao.countPoolsByStatus(StoragePoolStatus.Up); + if (availablePools < 1) { + throw new StorageUnavailableException("There are no available pools in the UP state for vm deployment", -1); + } } if (template.getTemplateType().equals(TemplateType.SYSTEM)) { @@ -3278,7 +2231,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } List> networks = new ArrayList>(); - + Map networkNicMap = new HashMap(); short defaultNetworkNumber = 0; @@ -3295,20 +2248,20 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use if (requestedIps != null && !requestedIps.isEmpty()) { requestedIpPair = requestedIps.get(network.getId()); } - + if (requestedIpPair == null) { - requestedIpPair = new IpAddresses(null, null); + requestedIpPair = new IpAddresses(null, null); } else { - checkRequestedIpAddresses(requestedIpPair.getIp4Address(), requestedIpPair.getIp6Address()); + _networkModel.checkRequestedIpAddresses(network.getId(), requestedIpPair.getIp4Address(), requestedIpPair.getIp6Address()); } - + NicProfile profile = new NicProfile(requestedIpPair.getIp4Address(), requestedIpPair.getIp6Address()); if (defaultNetworkNumber == 0) { defaultNetworkNumber++; // if user requested specific ip for default network, add it if (defaultIps.getIp4Address() != null || defaultIps.getIp6Address() != null) { - checkRequestedIpAddresses(defaultIps.getIp4Address(), defaultIps.getIp6Address()); + _networkModel.checkRequestedIpAddresses(network.getId(), defaultIps.getIp4Address(), defaultIps.getIp6Address()); profile = new NicProfile(defaultIps.getIp4Address(), defaultIps.getIp6Address()); } @@ -3354,8 +2307,23 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use long id = _vmDao.getNextInSequence(Long.class, "id"); - String instanceName = VirtualMachineName.getVmName(id, owner.getId(), - _instance); + String instanceName; + if (_instanceNameFlag && displayName != null) { + // Check if the displayName conforms to RFC standards. + checkNameForRFCCompliance(displayName); + instanceName = VirtualMachineName.getVmName(id, owner.getId(), displayName); + if (instanceName.length() > MAX_VM_NAME_LEN) { + throw new InvalidParameterValueException("Specified display name " + displayName + " causes VM name to exceed 80 characters in length"); + } + // Search whether there is already an instance with the same instance name + // that is not in the destroyed or expunging state. + VMInstanceVO vm = _vmInstanceDao.findVMByInstanceName(instanceName); + if (vm != null && vm.getState() != VirtualMachine.State.Expunging) { + throw new InvalidParameterValueException("There already exists a VM by the display name supplied"); + } + } else { + instanceName = VirtualMachineName.getVmName(id, owner.getId(), _instance); + } String uuidName = UUID.randomUUID().toString(); @@ -3363,12 +2331,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use if (hostName == null) { hostName = uuidName; } else { - // 1) check is hostName is RFC complient - if (!NetUtils.verifyDomainNameLabel(hostName, true)) { - throw new InvalidParameterValueException( - "Invalid name. Vm name can contain ASCII letters 'a' through 'z', the digits '0' through '9', " - + "and the hyphen ('-'), must be between 1 and 63 characters long, and can't start or end with \"-\" and can't start with digit"); - } + //1) check is hostName is RFC compliant + checkNameForRFCCompliance(hostName); // 2) hostName has to be unique in the network domain Map> ntwkDomains = new HashMap>(); for (NetworkVO network : networkList) { @@ -3426,6 +2390,20 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use vm.setIsoId(template.getId()); } + // If hypervisor is vSphere, check for clone type setting. + if (hypervisorType.equals(HypervisorType.VMware)) { + // retrieve clone flag. + UserVmCloneType cloneType = UserVmCloneType.linked; + String value = _configDao.getValue(Config.VmwareCreateFullClone.key()); + if (value != null) { + if (Boolean.parseBoolean(value) == true) + cloneType = UserVmCloneType.full; + } + UserVmCloneSettingVO vmCloneSettingVO = new UserVmCloneSettingVO(id, cloneType.toString()); + _vmCloneSettingDao.persist(vmCloneSettingVO); + } + + _vmDao.persist(vm); _vmDao.saveDetails(vm); @@ -3450,7 +2428,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use VirtualMachineEntity vmEntity = _orchSrvc.createVirtualMachine(vm.getUuid(), new Long(owner.getAccountId()).toString(), new Long(template.getId()).toString(), hostName, displayName, hypervisor.name(), offering.getCpu(), offering.getSpeed(), offering.getRamSize(), diskSize, computeTags, rootDiskTags, networkNicMap, plan); } - + if (s_logger.isDebugEnabled()) { s_logger.debug("Successfully allocated DB entry for " + vm); @@ -3486,20 +2464,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use return vm; } - private void checkRequestedIpAddresses(String ip4, String ip6) throws InvalidParameterValueException { - if (ip4 != null) { - if (!NetUtils.isValidIp(ip4)) { - throw new InvalidParameterValueException("Invalid specified IPv4 address " + ip4); - } - } - if (ip6 != null) { - if (!NetUtils.isValidIpv6(ip6)) { - throw new InvalidParameterValueException("Invalid specified IPv6 address " + ip6); - } - } - } - - private void validateUserData(String userData) { + private void validateUserData(String userData) { byte[] decodedUserData = null; if (userData != null) { if (!Base64.isBase64(userData)) { @@ -3533,7 +2498,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use protected UserVm startVirtualMachine(DeployVMCmd cmd, Map additonalParams) throws ResourceUnavailableException, InsufficientCapacityException, - ConcurrentOperationException { + ConcurrentOperationException { long vmId = cmd.getEntityId(); Long hostId = cmd.getHostId(); @@ -3579,7 +2544,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use + vm.getIsoId()); } - Pair isoPathPair = _storageMgr.getAbsoluteIsoPath( + Pair isoPathPair = this.templateMgr.getAbsoluteIsoPath( template.getId(), vm.getDataCenterId()); if (template.getTemplateType() == TemplateType.PERHOST) { @@ -3930,9 +2895,20 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } VirtualMachineEntity vmEntity = _orchSrvc.getVirtualMachine(vm.getUuid()); - - String reservationId = vmEntity.reserve("FirstFitPlanner", plan, new ExcludeList(), new Long(callerUser.getId()).toString()); - vmEntity.deploy(reservationId, new Long(callerUser.getId()).toString()); + + String plannerName = null; + for (DeployPlannerSelector dps : plannerSelectors) { + plannerName = dps.selectPlanner(vm); + if (plannerName != null) { + break; + } + } + if (plannerName == null) { + throw new CloudRuntimeException(String.format("cannot find DeployPlannerSelector for vm[uuid:%s, hypervisorType:%s]", vm.getUuid(), vm.getHypervisorType())); + } + + String reservationId = vmEntity.reserve(plannerName, plan, new ExcludeList(), new Long(callerUser.getId()).toString()); + vmEntity.deploy(reservationId, new Long(callerUser.getId()).toString(), params); Pair> vmParamPair = new Pair(vm, params); if (vm != null && vm.isUpdateParameters()) { @@ -4092,7 +3068,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use if (tags != null && !tags.isEmpty()) { int count = 0; - for (String key : tags.keySet()) { + for (String key : tags.keySet()) { sc.setParameters("key" + String.valueOf(count), key); sc.setParameters("value" + String.valueOf(count), tags.get(key)); count++; @@ -4212,7 +3188,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, StorageUnavailableException, - ResourceAllocationException { + ResourceAllocationException { // TODO Auto-generated method stub return null; } @@ -4580,12 +3556,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use List securityGroupIdList = cmd.getSecurityGroupIdList(); if (zone.getNetworkType() == NetworkType.Basic) { - if (networkIdList != null && !networkIdList.isEmpty()) { + if (networkIdList != null && !networkIdList.isEmpty()) { throw new InvalidParameterValueException( "Can't move vm with network Ids; this is a basic zone VM"); - } + } // cleanup the old security groups - _securityGroupMgr.removeInstanceFromGroups(cmd.getVmId()); + _securityGroupMgr.removeInstanceFromGroups(cmd.getVmId()); // cleanup the network for the oldOwner _networkMgr.cleanupNics(vmOldProfile); _networkMgr.expungeNics(vmOldProfile); @@ -4723,7 +3699,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use throw new InvalidParameterValueException("Unable to find physical network with id: "+physicalNetworkId + " and tag: " +requiredOfferings.get(0).getTags()); } s_logger.debug("Creating network for account " + newAccount + " from the network offering id=" + - requiredOfferings.get(0).getId() + " as a part of deployVM process"); + requiredOfferings.get(0).getId() + " as a part of deployVM process"); Network newNetwork = _networkMgr.createGuestNetwork(requiredOfferings.get(0).getId(), newAccount.getAccountName() + "-network", newAccount.getAccountName() + "-network", null, null, null, null, newAccount, null, physicalNetwork, zone.getId(), ACLType.Account, null, null, null, null); @@ -4801,20 +3777,30 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use public UserVm restoreVM(RestoreVMCmd cmd) { // Input validation Account caller = UserContext.current().getCaller(); - Long userId = UserContext.current().getCallerUserId(); - UserVO user = _userDao.findById(userId); - boolean needRestart = false; long vmId = cmd.getVmId(); + Long newTemplateId = cmd.getTemplateId(); UserVmVO vm = _vmDao.findById(vmId); if (vm == null) { - InvalidParameterValueException ex = new InvalidParameterValueException( - "Cann not find VM with ID " + vmId); + InvalidParameterValueException ex = new InvalidParameterValueException("Cannot find VM with ID " + vmId); ex.addProxyObject(vm, vmId, "vmId"); throw ex; } + _accountMgr.checkAccess(caller, null, true, vm); + + return restoreVMInternal(caller, vm, newTemplateId); + } + + public UserVm restoreVMInternal(Account caller, UserVmVO vm, Long newTemplateId){ + + Long userId = caller.getId(); Account owner = _accountDao.findById(vm.getAccountId()); + UserVO user = _userDao.findById(userId); + long vmId = vm.getId(); + boolean needRestart = false; + + // Input validation if (owner == null) { throw new InvalidParameterValueException("The owner of " + vm + " does not exist: " + vm.getAccountId()); @@ -4829,7 +3815,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use && vm.getState() != VirtualMachine.State.Stopped) { throw new CloudRuntimeException( "Vm " - + vmId + + vm.getUuid() + " currently in " + vm.getState() + " state, restore vm can only execute when VM in Running or Stopped"); @@ -4842,27 +3828,39 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use List rootVols = _volsDao.findByInstance(vmId); if (rootVols.isEmpty()) { InvalidParameterValueException ex = new InvalidParameterValueException( - "Can not find root volume for VM " + vmId); + "Can not find root volume for VM " + vm.getUuid()); ex.addProxyObject(vm, vmId, "vmId"); throw ex; } VolumeVO root = rootVols.get(0); - long templateId = root.getTemplateId(); - VMTemplateVO template = _templateDao.findById(templateId); - if (template == null) { - InvalidParameterValueException ex = new InvalidParameterValueException( - "Cannot find template for specified volumeid and vmId"); + Long templateId = root.getTemplateId(); + if(templateId == null) { + InvalidParameterValueException ex = new InvalidParameterValueException("Currently there is no support to reset a vm that is deployed using ISO " + vm.getUuid()); ex.addProxyObject(vm, vmId, "vmId"); - ex.addProxyObject(root, root.getId(), "volumeId"); throw ex; } + VMTemplateVO template = null; + if(newTemplateId != null) { + template = _templateDao.findById(newTemplateId); + _accountMgr.checkAccess(caller, null, true, template); + } else { + template = _templateDao.findById(templateId); + if (template == null) { + InvalidParameterValueException ex = new InvalidParameterValueException( + "Cannot find template for specified volumeid and vmId"); + ex.addProxyObject(vm, vmId, "vmId"); + ex.addProxyObject(root, root.getId(), "volumeId"); + throw ex; + } + } + if (needRestart) { try { _itMgr.stop(vm, user, caller); } catch (ResourceUnavailableException e) { - s_logger.debug("Stop vm " + vmId + " failed", e); + s_logger.debug("Stop vm " + vm.getUuid() + " failed", e); CloudRuntimeException ex = new CloudRuntimeException( "Stop vm failed for specified vmId"); ex.addProxyObject(vm, vmId, "vmId"); @@ -4870,24 +3868,29 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } } - /* allocate a new volume from original template */ - VolumeVO newVol = _storageMgr.allocateDuplicateVolume(root, null); + /* If new template is provided allocate a new volume from new template otherwise allocate new volume from original template */ + VolumeVO newVol = null; + if (newTemplateId != null){ + newVol = volumeMgr.allocateDuplicateVolume(root, newTemplateId); + vm.setGuestOSId(template.getGuestOSId()); + vm.setTemplateId(newTemplateId); + _vmDao.update(vmId, vm); + } else { + newVol = volumeMgr.allocateDuplicateVolume(root, null); + } + _volsDao.attachVolume(newVol.getId(), vmId, newVol.getDeviceId()); /* Detach and destory the old root volume */ - try { - _volsDao.detachVolume(root.getId()); - _storageMgr.destroyVolume(root); - } catch (ConcurrentOperationException e) { - s_logger.debug("Unable to delete old root volume " + root.getId() - + ", user may manually delete it", e); - } + + _volsDao.detachVolume(root.getId()); + this.volumeMgr.destroyVolume(root); if (needRestart) { try { _itMgr.start(vm, null, user, caller); } catch (Exception e) { - s_logger.debug("Unable to start VM " + vmId, e); + s_logger.debug("Unable to start VM " + vm.getUuid(), e); CloudRuntimeException ex = new CloudRuntimeException( "Unable to start VM with specified id" + e.getMessage()); ex.addProxyObject(vm, vmId, "vmId"); @@ -4896,15 +3899,16 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use } s_logger.debug("Restore VM " + vmId + " with template " - + root.getTemplateId() + " successfully"); + + template.getUuid() + " done successfully"); return vm; + } @Override public boolean plugNic(Network network, NicTO nic, VirtualMachineTO vm, ReservationContext context, DeployDestination dest) throws ConcurrentOperationException, ResourceUnavailableException, - InsufficientCapacityException { + InsufficientCapacityException { UserVmVO vmVO = _vmDao.findById(vm.getId()); if (vmVO.getState() == State.Running) { try { diff --git a/server/src/com/cloud/vm/UserVmStateListener.java b/server/src/com/cloud/vm/UserVmStateListener.java index 18f85670948..04aa8180b67 100644 --- a/server/src/com/cloud/vm/UserVmStateListener.java +++ b/server/src/com/cloud/vm/UserVmStateListener.java @@ -20,24 +20,24 @@ import com.cloud.event.EventCategory; import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; import com.cloud.event.dao.UsageEventDao; +import com.cloud.network.Network; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; -import com.cloud.network.Network; import com.cloud.server.ManagementServer; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.fsm.StateListener; import com.cloud.vm.VirtualMachine.Event; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.NicDao; - +import org.apache.cloudstack.framework.events.EventBus; import org.apache.log4j.Logger; +import org.springframework.beans.factory.NoSuchBeanDefinitionException; -import java.util.Enumeration; +import javax.inject.Inject; import java.util.HashMap; import java.util.List; import java.util.Map; -import javax.inject.Inject; - public class UserVmStateListener implements StateListener { @Inject protected UsageEventDao _usageEventDao; @@ -45,8 +45,7 @@ public class UserVmStateListener implements StateListener> _vmGurus = new HashMap>(); protected StateMachine2 _stateMachine; @@ -314,15 +311,15 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } if (template.getFormat() == ImageFormat.ISO) { - _storageMgr.allocateRawVolume(Type.ROOT, "ROOT-" + vm.getId(), rootDiskOffering.first(), rootDiskOffering.second(), vm, owner); + this.volumeMgr.allocateRawVolume(Type.ROOT, "ROOT-" + vm.getId(), rootDiskOffering.first(), rootDiskOffering.second(), vm, owner); } else if (template.getFormat() == ImageFormat.BAREMETAL) { // Do nothing } else { - _storageMgr.allocateTemplatedVolume(Type.ROOT, "ROOT-" + vm.getId(), rootDiskOffering.first(), template, vm, owner); + this.volumeMgr.allocateTemplatedVolume(Type.ROOT, "ROOT-" + vm.getId(), rootDiskOffering.first(), template, vm, owner); } for (Pair offering : dataDiskOfferings) { - _storageMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vm.getId(), offering.first(), offering.second(), vm, owner); + this.volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vm.getId(), offering.first(), offering.second(), vm, owner); } txn.commit(); @@ -354,11 +351,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return (VirtualMachineGuru) _vmGurus.get(vm.getType()); } - @SuppressWarnings("unchecked") - private VirtualMachineGuru getBareMetalVmGuru(T vm) { - return (VirtualMachineGuru) _vmGurus.get(VirtualMachine.Type.UserBareMetal); - } - @Override public boolean expunge(T vm, User caller, Account account) throws ResourceUnavailableException { try { @@ -410,10 +402,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac s_logger.debug("Cleaning up NICS"); _networkMgr.cleanupNics(profile); // Clean up volumes based on the vm's instance id - _storageMgr.cleanupVolumes(vm.getId()); + this.volumeMgr.cleanupVolumes(vm.getId()); VirtualMachineGuru guru = getVmGuru(vm); guru.finalizeExpunge(vm); + //remove the overcommit detials from the uservm details + _uservmDetailsDao.deleteDetails(vm.getId()); if (s_logger.isDebugEnabled()) { s_logger.debug("Expunged " + vm); @@ -604,12 +598,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac public T advanceStart(T vm, Map params, User caller, Account account, DeploymentPlan planToDeploy) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { long vmId = vm.getId(); - VirtualMachineGuru vmGuru; - if (vm.getHypervisorType() == HypervisorType.BareMetal) { - vmGuru = getBareMetalVmGuru(vm); - } else { - vmGuru = getVmGuru(vm); - } + VirtualMachineGuru vmGuru = getVmGuru(vm); vm = vmGuru.findById(vm.getId()); Ternary start = changeToStartState(vmGuru, vm, caller, account); @@ -677,7 +666,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac continue; } - StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId()); + StoragePool pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(vol.getPoolId()); if (!pool.isInMaintenance()) { if (s_logger.isDebugEnabled()) { s_logger.debug("Root volume is ready, need to place VM in volume's cluster"); @@ -739,6 +728,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac long destHostId = dest.getHost().getId(); vm.setPodId(dest.getPod().getId()); + Long cluster_id = dest.getCluster().getId(); + ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,"cpuOvercommitRatio"); + ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio"); + vmProfile.setcpuOvercommitRatio(Float.parseFloat(cluster_detail_cpu.getValue())); + vmProfile.setramOvercommitRatio(Float.parseFloat(cluster_detail_ram.getValue())); try { if (!changeState(vm, Event.OperationRetry, destHostId, work, Step.Prepare)) { @@ -754,7 +748,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } _networkMgr.prepare(vmProfile, dest, ctx); if (vm.getHypervisorType() != HypervisorType.BareMetal) { - _storageMgr.prepare(vmProfile, dest); + this.volumeMgr.prepare(vmProfile, dest); } //since StorageMgr succeeded in volume creation, reuse Volume for further tries until current cluster has capacity if(!reuseVolume){ @@ -969,7 +963,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac s_logger.warn("Unable to release some network resources.", e); } - _storageMgr.release(profile); + this.volumeMgr.release(profile); s_logger.debug("Successfully cleanued up resources for the vm " + vm + " in " + state + " state"); return true; } @@ -1118,7 +1112,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { if (vm.getHypervisorType() != HypervisorType.BareMetal) { - _storageMgr.release(profile); + this.volumeMgr.release(profile); s_logger.debug("Successfully released storage resources for the vm " + vm); } } catch (Exception e) { @@ -1237,7 +1231,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); boolean migrationResult = false; try { - migrationResult = _storageMgr.StorageMigration(profile, destPool); + migrationResult = this.volumeMgr.storageMigration(profile, destPool); if (migrationResult) { //if the vm is migrated to different pod in basic mode, need to reallocate ip @@ -1322,7 +1316,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); _networkMgr.prepareNicForMigration(profile, dest); - _storageMgr.prepareForMigration(profile, dest); + this.volumeMgr.prepareForMigration(profile, dest); VirtualMachineTO to = toVmTO(profile); PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(to); @@ -2591,7 +2585,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac _networkModel.getNetworkRate(network.getId(), vm.getId()), _networkModel.isSecurityGroupSupportedInNetwork(network), _networkModel.getNetworkTag(vmProfile.getVirtualMachine().getHypervisorType(), network)); - + //1) Unplug the nic if (vm.getState() == State.Running) { NicTO nicTO = toNicTO(nicProfile, vmProfile.getVirtualMachine().getHypervisorType()); @@ -2608,11 +2602,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac throw new ResourceUnavailableException("Unable to remove vm " + vm + " from network, is not in the right state", DataCenter.class, vm.getDataCenterId()); } - + //2) Release the nic _networkMgr.releaseNic(vmProfile, nic); s_logger.debug("Successfully released nic " + nic + "for vm " + vm); - + //3) Remove the nic _networkMgr.removeNic(vmProfile, nic); _nicsDao.expunge(nic.getId()); @@ -2647,7 +2641,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac s_logger.warn("Could not get a nic with " + network); return false; } - + // don't delete default NIC on a user VM if (nic.isDefaultNic() && vm.getType() == VirtualMachine.Type.User ) { s_logger.warn("Failed to remove nic from " + vm + " in " + network + ", nic is default."); @@ -2661,15 +2655,15 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac //1) Unplug the nic if (vm.getState() == State.Running) { - NicTO nicTO = toNicTO(nicProfile, vmProfile.getVirtualMachine().getHypervisorType()); - s_logger.debug("Un-plugging nic for vm " + vm + " from network " + network); - boolean result = vmGuru.unplugNic(network, nicTO, vmTO, context, dest); - if (result) { - s_logger.debug("Nic is unplugged successfully for vm " + vm + " in network " + network ); - } else { - s_logger.warn("Failed to unplug nic for the vm " + vm + " from network " + network); - return false; - } + NicTO nicTO = toNicTO(nicProfile, vmProfile.getVirtualMachine().getHypervisorType()); + s_logger.debug("Un-plugging nic for vm " + vm + " from network " + network); + boolean result = vmGuru.unplugNic(network, nicTO, vmTO, context, dest); + if (result) { + s_logger.debug("Nic is unplugged successfully for vm " + vm + " in network " + network ); + } else { + s_logger.warn("Failed to unplug nic for the vm " + vm + " from network " + network); + return false; + } } else if (vm.getState() != State.Stopped) { s_logger.warn("Unable to remove vm " + vm + " from network " + network); throw new ResourceUnavailableException("Unable to remove vm " + vm + " from network, is not in the right state", diff --git a/server/src/com/cloud/vm/VirtualMachineProfileImpl.java b/server/src/com/cloud/vm/VirtualMachineProfileImpl.java index e83d6a0d926..24f44cb07ac 100644 --- a/server/src/com/cloud/vm/VirtualMachineProfileImpl.java +++ b/server/src/com/cloud/vm/VirtualMachineProfileImpl.java @@ -21,6 +21,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; + import com.cloud.agent.api.to.VolumeTO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.offering.ServiceOffering; @@ -42,13 +43,16 @@ public class VirtualMachineProfileImpl implements Virtua T _vm; ServiceOfferingVO _offering; VMTemplateVO _template; + UserVmDetailVO _userVmDetails; Map _params; List _nics = new ArrayList(); List _disks = new ArrayList(); StringBuilder _bootArgs = new StringBuilder(); Account _owner; BootloaderType _bootloader; - + Float cpuOvercommitRatio = 1.0f; + Float memoryOvercommitRatio = 1.0f; + VirtualMachine.Type _type; public VirtualMachineProfileImpl(T vm, VMTemplateVO template, ServiceOfferingVO offering, Account owner, Map params) { @@ -238,6 +242,25 @@ public class VirtualMachineProfileImpl implements Virtua public void setServiceOffering(ServiceOfferingVO offering) { _offering = offering; } - - + + public void setcpuOvercommitRatio(Float cpuOvercommitRatio){ + this.cpuOvercommitRatio= cpuOvercommitRatio; + + } + + public void setramOvercommitRatio(Float memoryOvercommitRatio){ + this.memoryOvercommitRatio= memoryOvercommitRatio; + + } + @Override + public Float getCpuOvercommitRatio(){ + return this.cpuOvercommitRatio; + } + + @Override + public Float getMemoryOvercommitRatio(){ + return this.memoryOvercommitRatio; + } + + } diff --git a/server/src/com/cloud/vm/dao/DomainRouterDaoImpl.java b/server/src/com/cloud/vm/dao/DomainRouterDaoImpl.java index 52075880b9c..391fa5895b0 100755 --- a/server/src/com/cloud/vm/dao/DomainRouterDaoImpl.java +++ b/server/src/com/cloud/vm/dao/DomainRouterDaoImpl.java @@ -25,15 +25,19 @@ import javax.inject.Inject; import org.springframework.stereotype.Component; import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDaoImpl; import com.cloud.network.Network; +import com.cloud.network.dao.RouterNetworkDao; import com.cloud.network.dao.RouterNetworkDaoImpl; import com.cloud.network.dao.RouterNetworkVO; import com.cloud.network.router.VirtualRouter; import com.cloud.network.router.VirtualRouter.Role; import com.cloud.offering.NetworkOffering; +import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.offerings.dao.NetworkOfferingDaoImpl; import com.cloud.user.UserStatisticsVO; +import com.cloud.user.dao.UserStatisticsDao; import com.cloud.user.dao.UserStatisticsDaoImpl; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @@ -57,10 +61,10 @@ public class DomainRouterDaoImpl extends GenericDaoBase im protected SearchBuilder HostUpSearch; protected SearchBuilder StateNetworkTypeSearch; protected SearchBuilder OutsidePodSearch; - @Inject HostDaoImpl _hostsDao; - @Inject RouterNetworkDaoImpl _routerNetworkDao; - @Inject UserStatisticsDaoImpl _userStatsDao; - @Inject NetworkOfferingDaoImpl _offDao; + @Inject HostDao _hostsDao; + @Inject RouterNetworkDao _routerNetworkDao; + @Inject UserStatisticsDao _userStatsDao; + @Inject NetworkOfferingDao _offDao; protected SearchBuilder VpcSearch; public DomainRouterDaoImpl() { diff --git a/server/src/com/cloud/vm/dao/NicDao.java b/server/src/com/cloud/vm/dao/NicDao.java index 762048b65bf..35d719131bb 100644 --- a/server/src/com/cloud/vm/dao/NicDao.java +++ b/server/src/com/cloud/vm/dao/NicDao.java @@ -58,4 +58,8 @@ public interface NicDao extends GenericDao { NicVO findByNetworkIdInstanceIdAndBroadcastUri(long networkId, long instanceId, String broadcastUri); NicVO findByIp4AddressAndNetworkIdAndInstanceId(long networkId, long instanceId, String ip4Address); + + List listByVmIdAndNicId(Long vmId, Long nicId); + + NicVO findByIp4AddressAndVmId(String ip4Address, long instance); } diff --git a/server/src/com/cloud/vm/dao/NicDaoImpl.java b/server/src/com/cloud/vm/dao/NicDaoImpl.java index 5cf152f9f90..b9ec72ee7c9 100644 --- a/server/src/com/cloud/vm/dao/NicDaoImpl.java +++ b/server/src/com/cloud/vm/dao/NicDaoImpl.java @@ -53,8 +53,10 @@ public class NicDaoImpl extends GenericDaoBase implements NicDao { AllFieldsSearch.and("address", AllFieldsSearch.entity().getIp4Address(), Op.EQ); AllFieldsSearch.and("isDefault", AllFieldsSearch.entity().isDefaultNic(), Op.EQ); AllFieldsSearch.and("broadcastUri", AllFieldsSearch.entity().getBroadcastUri(), Op.EQ); + AllFieldsSearch.and("secondaryip", AllFieldsSearch.entity().getSecondaryIp(), Op.EQ); + AllFieldsSearch.and("nicid", AllFieldsSearch.entity().getId(), Op.EQ); AllFieldsSearch.done(); - + IpSearch = createSearchBuilder(String.class); IpSearch.select(null, Func.DISTINCT, IpSearch.entity().getIp4Address()); IpSearch.and("network", IpSearch.entity().getNetworkId(), Op.EQ); @@ -202,4 +204,21 @@ public class NicDaoImpl extends GenericDaoBase implements NicDao { sc.setParameters("address", ip4Address); return findOneBy(sc); } + + @Override + public List listByVmIdAndNicId(Long vmId, Long nicId) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("instance", vmId); + sc.setParameters("nicid", nicId); + return listBy(sc); + } + + @Override + public NicVO findByIp4AddressAndVmId(String ip4Address, long instance) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("address", ip4Address); + sc.setParameters("instance", instance); + return findOneBy(sc); + } + } diff --git a/server/src/com/cloud/vm/dao/NicSecondaryIpDao.java b/server/src/com/cloud/vm/dao/NicSecondaryIpDao.java new file mode 100644 index 00000000000..da96df43e83 --- /dev/null +++ b/server/src/com/cloud/vm/dao/NicSecondaryIpDao.java @@ -0,0 +1,53 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm.dao; + +import java.util.List; +import com.cloud.utils.db.GenericDao; + +public interface NicSecondaryIpDao extends GenericDao { + List listByVmId(long instanceId); + + List listSecondaryIpAddressInNetwork(long networkConfigId); + List listByNetworkId(long networkId); + + NicSecondaryIpVO findByInstanceIdAndNetworkId(long networkId, long instanceId); + + // void removeNicsForInstance(long instanceId); + // void removeSecondaryIpForNic(long nicId); + + NicSecondaryIpVO findByIp4AddressAndNetworkId(String ip4Address, long networkId); + + /** + * @param networkId + * @param instanceId + * @return + */ + + List getSecondaryIpAddressesForVm(long vmId); + + List listByNicId(long nicId); + + List listByNicIdAndVmid(long nicId, long vmId); + + NicSecondaryIpVO findByIp4AddressAndNicId(String ip4Address, long nicId); + + NicSecondaryIpVO findByIp4AddressAndNetworkIdAndInstanceId(long networkId, + Long vmId, String vmIp); + + List getSecondaryIpAddressesForNic(long nicId); +} diff --git a/server/src/com/cloud/vm/dao/NicSecondaryIpDaoImpl.java b/server/src/com/cloud/vm/dao/NicSecondaryIpDaoImpl.java new file mode 100644 index 00000000000..3befaf70529 --- /dev/null +++ b/server/src/com/cloud/vm/dao/NicSecondaryIpDaoImpl.java @@ -0,0 +1,138 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm.dao; + +import java.util.ArrayList; +import java.util.List; + +import javax.ejb.Local; + +import org.springframework.stereotype.Component; + +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Func; +import com.cloud.utils.db.SearchCriteria.Op; + +@Component +@Local(value=NicSecondaryIpDao.class) +public class NicSecondaryIpDaoImpl extends GenericDaoBase implements NicSecondaryIpDao { + private final SearchBuilder AllFieldsSearch; + private final GenericSearchBuilder IpSearch; + + protected NicSecondaryIpDaoImpl() { + super(); + AllFieldsSearch = createSearchBuilder(); + AllFieldsSearch.and("instanceId", AllFieldsSearch.entity().getVmId(), Op.EQ); + AllFieldsSearch.and("network", AllFieldsSearch.entity().getNetworkId(), Op.EQ); + AllFieldsSearch.and("address", AllFieldsSearch.entity().getIp4Address(), Op.EQ); + AllFieldsSearch.and("nicId", AllFieldsSearch.entity().getNicId(), Op.EQ); + AllFieldsSearch.done(); + + IpSearch = createSearchBuilder(String.class); + IpSearch.select(null, Func.DISTINCT, IpSearch.entity().getIp4Address()); + IpSearch.and("network", IpSearch.entity().getNetworkId(), Op.EQ); + IpSearch.and("address", IpSearch.entity().getIp4Address(), Op.NNULL); + IpSearch.done(); + } + + @Override + public List listByVmId(long instanceId) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("instanceId", instanceId); + return listBy(sc); + } + + @Override + public List listByNicId(long nicId) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("nicId", nicId); + return listBy(sc); + } + + @Override + public List listSecondaryIpAddressInNetwork(long networkId) { + SearchCriteria sc = IpSearch.create(); + sc.setParameters("network", networkId); + return customSearch(sc, null); + } + + @Override + public List listByNetworkId(long networkId) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("network", networkId); + return listBy(sc); + } + + @Override + public List listByNicIdAndVmid(long nicId, long vmId) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("nicId", nicId); + sc.setParameters("instanceId", vmId); + return listBy(sc); + } + + @Override + public List getSecondaryIpAddressesForVm(long vmId) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("instanceId", vmId); + return listBy(sc); + } + + @Override + public List getSecondaryIpAddressesForNic(long nicId) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("nicId", nicId); + List results = search(sc, null); + List ips = new ArrayList(results.size()); + for (NicSecondaryIpVO result : results) { + ips.add(result.getIp4Address()); + } + return ips; + } + + @Override + public NicSecondaryIpVO findByInstanceIdAndNetworkId(long networkId, long instanceId) { + // TODO Auto-generated method stub + return null; + } + + @Override + public NicSecondaryIpVO findByIp4AddressAndNetworkId(String ip4Address, long networkId) { + // TODO Auto-generated method stub + return null; + } + @Override + public NicSecondaryIpVO findByIp4AddressAndNicId(String ip4Address, long nicId) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("address", ip4Address); + sc.setParameters("nicId", nicId); + return findOneBy(sc); + } + + @Override + public NicSecondaryIpVO findByIp4AddressAndNetworkIdAndInstanceId( + long networkId, Long vmId, String vmIp) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("network", networkId); + sc.setParameters("instanceId", vmId); + sc.setParameters("address", vmIp); + return findOneBy(sc); + } +} diff --git a/server/src/com/cloud/vm/dao/NicSecondaryIpVO.java b/server/src/com/cloud/vm/dao/NicSecondaryIpVO.java new file mode 100644 index 00000000000..770e188ad83 --- /dev/null +++ b/server/src/com/cloud/vm/dao/NicSecondaryIpVO.java @@ -0,0 +1,160 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm.dao; + +import java.util.Date; +import java.util.UUID; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import com.cloud.utils.db.GenericDao; +import com.cloud.vm.NicSecondaryIp; + +@Entity +@Table(name = "nic_secondary_ips") +public class NicSecondaryIpVO implements NicSecondaryIp { + + public NicSecondaryIpVO(Long nicId, String ipaddr, Long vmId, + Long accountId, Long domainId, Long networkId) { + this.nicId = nicId; + this.vmId = vmId; + this.ip4Address = ipaddr; + this.accountId = accountId; + this.domainId = domainId; + this.networkId = networkId; + } + + protected NicSecondaryIpVO() { + } + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + long id; + + @Column(name = "nicId") + long nicId; + + @Column(name="domain_id", updatable=false) + long domainId; + + @Column(name="account_id", updatable=false) + private Long accountId; + + @Column(name = "ip4_address") + String ip4Address; + + @Column(name = "ip6_address") + String ip6Address; + + @Column(name = "network_id") + long networkId; + + @Column(name = GenericDao.CREATED_COLUMN) + Date created; + + @Column(name = "uuid") + String uuid = UUID.randomUUID().toString(); + + @Column(name = "vmId") + Long vmId; + + public long getId() { + return id; + } + + public void setId(long id) { + this.id = id; + } + + public long getNicId() { + return nicId; + } + + public void setNicId(long nicId) { + this.nicId = nicId; + } + + public long getDomainId() { + return domainId; + } + + public void setDomainId(Long domainId) { + this.domainId = domainId; + } + + public long getAccountId() { + return accountId; + } + + public void setAccountId(Long accountId) { + this.accountId = accountId; + } + + public String getIp4Address() { + return ip4Address; + } + + public void setIp4Address(String ip4Address) { + this.ip4Address = ip4Address; + } + + public String getIp6Address() { + return ip6Address; + } + + public void setIp6Address(String ip6Address) { + this.ip6Address = ip6Address; + } + + public long getNetworkId() { + return networkId; + } + + public void setNetworkId(long networkId) { + this.networkId = networkId; + } + + public Date getCreated() { + return created; + } + + public void setCreated(Date created) { + this.created = created; + } + + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } + + public long getVmId() { + return vmId; + } + + public void setVmId(Long vmId) { + this.vmId = vmId; + } +} diff --git a/server/src/com/cloud/baremetal/PxeServerResponse.java b/server/src/com/cloud/vm/dao/UserVmCloneSettingDao.java similarity index 61% rename from server/src/com/cloud/baremetal/PxeServerResponse.java rename to server/src/com/cloud/vm/dao/UserVmCloneSettingDao.java index 32fcc7fb5b9..44a1bf32f8c 100644 --- a/server/src/com/cloud/baremetal/PxeServerResponse.java +++ b/server/src/com/cloud/vm/dao/UserVmCloneSettingDao.java @@ -14,22 +14,24 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package com.cloud.baremetal; +package com.cloud.vm.dao; -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.BaseResponse; -import com.cloud.serializer.Param; -import com.google.gson.annotations.SerializedName; +import java.util.List; -public class PxeServerResponse extends BaseResponse { - @SerializedName(ApiConstants.ID) @Param(description="the ID of the PXE server") - private String id; +import com.cloud.utils.db.GenericDao; +import com.cloud.vm.UserVmCloneSettingVO; - public String getId() { - return id; - } +public interface UserVmCloneSettingDao extends GenericDao { + + /* + * Returns a User VM clone type record by vm id. + */ + UserVmCloneSettingVO findByVmId(long id); + + /* + * Returns a list of VMs by clone type. + * cloneType can be full/linked. + */ + List listByCloneType(String cloneType); - public void setId(String id) { - this.id = id; - } } diff --git a/server/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java b/server/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java new file mode 100644 index 00000000000..174f28350d1 --- /dev/null +++ b/server/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm.dao; + + +import java.util.List; + +import javax.annotation.PostConstruct; +import javax.ejb.Local; + +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.vm.UserVmCloneSettingVO; +import com.cloud.utils.db.DB; + + +@Component +@Local(value= { UserVmCloneSettingDao.class }) +@DB(txn = false) +public class UserVmCloneSettingDaoImpl extends GenericDaoBase implements UserVmCloneSettingDao { + public static final Logger s_logger = Logger.getLogger(UserVmCloneSettingDaoImpl.class); + + protected SearchBuilder vmIdSearch; + protected SearchBuilder cloneTypeSearch; + + public UserVmCloneSettingDaoImpl() { + } + + @PostConstruct + public void init() { + // Initialize the search builders. + vmIdSearch = createSearchBuilder(); + vmIdSearch.and("vmId", vmIdSearch.entity().getCloneType(), Op.EQ); + vmIdSearch.done(); + + cloneTypeSearch = createSearchBuilder(); + cloneTypeSearch.and("cloneType", cloneTypeSearch.entity().getCloneType(), Op.EQ); + cloneTypeSearch.done(); + } + + @Override + public UserVmCloneSettingVO findByVmId(long vmId) { + SearchCriteria sc = vmIdSearch.create(); + sc.setParameters("vmId", vmId); + return findOneBy(sc); + } + + @Override + public List listByCloneType(String cloneType) { + SearchCriteria sc = cloneTypeSearch.create(); + sc.setParameters("cloneType", cloneType); + return search(sc, null); + } + +} diff --git a/server/src/com/cloud/vm/dao/UserVmDao.java b/server/src/com/cloud/vm/dao/UserVmDao.java index 9fbcde377dd..81d13cda2ed 100755 --- a/server/src/com/cloud/vm/dao/UserVmDao.java +++ b/server/src/com/cloud/vm/dao/UserVmDao.java @@ -70,5 +70,7 @@ public interface UserVmDao extends GenericDao { public Long countAllocatedVMsForAccount(long accountId); Hashtable listVmDetails(Hashtable userVmData); + + List listByIsoId(Long isoId); } diff --git a/server/src/com/cloud/vm/dao/UserVmDaoImpl.java b/server/src/com/cloud/vm/dao/UserVmDaoImpl.java index f2fc10bbaa4..49228287f67 100755 --- a/server/src/com/cloud/vm/dao/UserVmDaoImpl.java +++ b/server/src/com/cloud/vm/dao/UserVmDaoImpl.java @@ -35,6 +35,7 @@ import org.springframework.stereotype.Component; import com.cloud.configuration.Resource; import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.tags.dao.ResourceTagDao; import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.user.Account; @@ -72,13 +73,14 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use protected SearchBuilder DestroySearch; protected SearchBuilder AccountDataCenterVirtualSearch; protected GenericSearchBuilder CountByAccountPod; - protected GenericSearchBuilder CountByAccount; - protected GenericSearchBuilder PodsHavingVmsForAccount; - - protected SearchBuilder UserVmSearch; + protected GenericSearchBuilder CountByAccount; + protected GenericSearchBuilder PodsHavingVmsForAccount; + + protected SearchBuilder UserVmSearch; + protected SearchBuilder UserVmByIsoSearch; protected Attribute _updateTimeAttr; // ResourceTagsDaoImpl _tagsDao = ComponentLocator.inject(ResourceTagsDaoImpl.class); - @Inject ResourceTagsDaoImpl _tagsDao; + @Inject ResourceTagDao _tagsDao; private static final String LIST_PODS_HAVING_VMS_FOR_ACCOUNT = "SELECT pod_id FROM cloud.vm_instance WHERE data_center_id = ? AND account_id = ? AND pod_id IS NOT NULL AND (state = 'Running' OR state = 'Stopped') " + "GROUP BY pod_id HAVING count(id) > 0 ORDER BY count(id) DESC"; @@ -194,7 +196,10 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use AccountDataCenterVirtualSearch.and("dc", AccountDataCenterVirtualSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); AccountDataCenterVirtualSearch.join("nicSearch", nicSearch, AccountDataCenterVirtualSearch.entity().getId(), nicSearch.entity().getInstanceId(), JoinBuilder.JoinType.INNER); AccountDataCenterVirtualSearch.done(); - + + UserVmByIsoSearch = createSearchBuilder(); + UserVmByIsoSearch.and("isoId", UserVmByIsoSearch.entity().getIsoId(), SearchCriteria.Op.EQ); + UserVmByIsoSearch.done(); _updateTimeAttr = _allAttributes.get("updateTime"); assert _updateTimeAttr != null : "Couldn't get this updateTime attribute"; @@ -248,13 +253,20 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use public List listByHostId(Long id) { SearchCriteria sc = HostSearch.create(); sc.setParameters("host", id); - - return listBy(sc); - } - - @Override - public List listUpByHostId(Long hostId) { - SearchCriteria sc = HostUpSearch.create(); + + return listBy(sc); + } + + @Override + public List listByIsoId(Long isoId) { + SearchCriteria sc = UserVmByIsoSearch.create(); + sc.setParameters("isoId", isoId); + return listBy(sc); + } + + @Override + public List listUpByHostId(Long hostId) { + SearchCriteria sc = HostUpSearch.create(); sc.setParameters("host", hostId); sc.setParameters("states", new Object[] {State.Destroyed, State.Stopped, State.Expunging}); return listBy(sc); diff --git a/server/src/com/cloud/vm/dao/UserVmDetailsDao.java b/server/src/com/cloud/vm/dao/UserVmDetailsDao.java index 87fb9b6482b..bdccec94ef0 100644 --- a/server/src/com/cloud/vm/dao/UserVmDetailsDao.java +++ b/server/src/com/cloud/vm/dao/UserVmDetailsDao.java @@ -18,12 +18,9 @@ package com.cloud.vm.dao; import java.util.Map; -import org.springframework.stereotype.Component; - import com.cloud.utils.db.GenericDao; import com.cloud.vm.UserVmDetailVO; -@Component public interface UserVmDetailsDao extends GenericDao { Map findDetails(long vmId); diff --git a/server/src/com/cloud/vm/dao/VMInstanceDao.java b/server/src/com/cloud/vm/dao/VMInstanceDao.java index d34b25726dc..c604027abde 100644 --- a/server/src/com/cloud/vm/dao/VMInstanceDao.java +++ b/server/src/com/cloud/vm/dao/VMInstanceDao.java @@ -26,7 +26,6 @@ import com.cloud.utils.fsm.StateDao; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.State; -import com.cloud.vm.VirtualMachine.Type; /* @@ -75,6 +74,8 @@ public interface VMInstanceDao extends GenericDao, StateDao< VMInstanceVO findByIdTypes(long id, VirtualMachine.Type... types); + VMInstanceVO findVMByInstanceName(String name); + void updateProxyId(long id, Long proxyId, Date time); List listByHostIdTypes(long hostid, VirtualMachine.Type... types); diff --git a/server/src/com/cloud/vm/dao/VMInstanceDaoImpl.java b/server/src/com/cloud/vm/dao/VMInstanceDaoImpl.java index 531c79447b7..7198b7c24e0 100644 --- a/server/src/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/server/src/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -35,12 +35,9 @@ import org.springframework.stereotype.Component; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; -import com.cloud.host.dao.HostDaoImpl; import com.cloud.server.ResourceTag.TaggedResourceType; import com.cloud.tags.dao.ResourceTagDao; -import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.utils.Pair; - import com.cloud.utils.db.Attribute; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @@ -80,6 +77,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem protected SearchBuilder HostIdTypesSearch; protected SearchBuilder HostIdUpTypesSearch; protected SearchBuilder HostUpSearch; + protected SearchBuilder InstanceNameSearch; protected GenericSearchBuilder CountVirtualRoutersByAccount; protected GenericSearchBuilder CountRunningByHost; protected GenericSearchBuilder CountRunningByAccount; @@ -188,6 +186,10 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem HostUpSearch.and("states", HostUpSearch.entity().getState(), Op.IN); HostUpSearch.done(); + InstanceNameSearch = createSearchBuilder(); + InstanceNameSearch.and("instanceName", InstanceNameSearch.entity().getInstanceName(), Op.EQ); + InstanceNameSearch.done(); + CountVirtualRoutersByAccount = createSearchBuilder(Long.class); CountVirtualRoutersByAccount.select(null, Func.COUNT, null); CountVirtualRoutersByAccount.and("account", CountVirtualRoutersByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); @@ -340,6 +342,12 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return findOneIncludingRemovedBy(sc); } + @Override + public VMInstanceVO findVMByInstanceName(String name) { + SearchCriteria sc = InstanceNameSearch.create(); + sc.setParameters("instanceName", name); + return findOneBy(sc); + } @Override public void updateProxyId(long id, Long proxyId, Date time) { diff --git a/server/src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java b/server/src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java index a0335634113..12a059727be 100644 --- a/server/src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java +++ b/server/src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java @@ -28,6 +28,9 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.cloudstack.api.command.user.vmsnapshot.ListVMSnapshotCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -61,11 +64,10 @@ import com.cloud.projects.Project.ListProjectResourcesCriteria; import com.cloud.storage.GuestOSVO; import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; -import com.cloud.storage.StoragePoolVO; +import com.cloud.storage.StoragePool; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.SnapshotDao; -import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; @@ -112,9 +114,10 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana @Inject HypervisorGuruManager _hvGuruMgr; @Inject AccountManager _accountMgr; @Inject GuestOSDao _guestOSDao; - @Inject StoragePoolDao _storagePoolDao; + @Inject PrimaryDataStoreDao _storagePoolDao; @Inject SnapshotDao _snapshotDao; @Inject VirtualMachineManager _itMgr; + @Inject DataStoreManager dataStoreMgr; @Inject ConfigurationDao _configDao; int _vmSnapshotMax; StateMachine2 _vmSnapshottateMachine ; @@ -393,7 +396,7 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana List volumeVos = _volumeDao.findByInstance(vmId); for (VolumeVO volume : volumeVos) { - StoragePoolVO pool = _storagePoolDao.findById(volume.getPoolId()); + StoragePool pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(volume.getPoolId()); VolumeTO volumeTO = new VolumeTO(volume, pool); volumeTOs.add(volumeTO); } diff --git a/server/src/com/cloud/vm/snapshot/VMSnapshotVO.java b/server/src/com/cloud/vm/snapshot/VMSnapshotVO.java new file mode 100644 index 00000000000..03d4945fda0 --- /dev/null +++ b/server/src/com/cloud/vm/snapshot/VMSnapshotVO.java @@ -0,0 +1,224 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.vm.snapshot; + +import java.util.Date; +import java.util.UUID; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.TableGenerator; +import javax.persistence.Temporal; +import javax.persistence.TemporalType; + +import com.cloud.utils.db.GenericDao; + +@Entity +@Table(name = "vm_snapshots") +public class VMSnapshotVO implements VMSnapshot { + @Id + @TableGenerator(name = "vm_snapshots_sq", table = "sequence", pkColumnName = "name", valueColumnName = "value", pkColumnValue = "vm_snapshots_seq", allocationSize = 1) + @GeneratedValue(strategy = GenerationType.TABLE) + @Column(name = "id") + long id; + + @Column(name = "uuid") + String uuid = UUID.randomUUID().toString(); + + @Column(name = "name") + String name; + + @Column(name = "display_name") + String displayName; + + @Column(name = "description") + String description; + + @Column(name = "vm_id") + long vmId; + + @Column(name = "account_id") + long accountId; + + @Column(name = "domain_id") + long domainId; + + @Column(name = "vm_snapshot_type") + @Enumerated(EnumType.STRING) + VMSnapshot.Type type; + + @Column(name = "state", updatable = true, nullable = false) + @Enumerated(value = EnumType.STRING) + private State state; + + @Column(name = GenericDao.CREATED_COLUMN) + Date created; + + @Column(name = GenericDao.REMOVED_COLUMN) + Date removed; + + @Column(name = "current") + Boolean current; + + @Column(name = "parent") + Long parent; + + @Column(name = "updated") + @Temporal(value = TemporalType.TIMESTAMP) + Date updated; + + @Column(name="update_count", updatable = true, nullable=false) + protected long updatedCount; + + public Long getParent() { + return parent; + } + + public void setParent(Long parent) { + this.parent = parent; + } + + public VMSnapshotVO() { + + } + + public Date getRemoved() { + return removed; + } + + public VMSnapshotVO(Long accountId, Long domainId, Long vmId, + String description, String vmSnapshotName, String vsDisplayName, + Long serviceOfferingId, Type type, Boolean current) { + this.accountId = accountId; + this.domainId = domainId; + this.vmId = vmId; + this.state = State.Allocated; + this.description = description; + this.name = vmSnapshotName; + this.displayName = vsDisplayName; + this.type = type; + this.current = current; + } + + public String getDescription() { + return description; + } + + @Override + public Date getCreated() { + return created; + } + + public void setCreated(Date created) { + this.created = created; + } + + @Override + public long getId() { + return id; + } + + @Override + public Long getVmId() { + return vmId; + } + + public void setVmId(Long vmId) { + this.vmId = vmId; + } + + @Override + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Override + public State getState() { + return state; + } + + public void setState(State state) { + this.state = state; + } + + @Override + public String getUuid() { + return uuid; + } + + @Override + public long getAccountId() { + return accountId; + } + + @Override + public long getDomainId() { + return domainId; + } + + @Override + public String getDisplayName() { + return displayName; + } + + public void setDisplayName(String displayName) { + this.displayName = displayName; + } + + public Boolean getCurrent() { + return current; + } + + public void setCurrent(Boolean current) { + this.current = current; + } + + @Override + public long getUpdatedCount() { + return updatedCount; + } + + @Override + public void incrUpdatedCount() { + this.updatedCount++; + } + + @Override + public Date getUpdated() { + return updated; + } + + @Override + public Type getType() { + return type; + } + + public void setRemoved(Date removed) { + this.removed = removed; + } +} diff --git a/server/src/org/apache/cloudstack/region/RegionManager.java b/server/src/org/apache/cloudstack/region/RegionManager.java index 56bdb9baedf..4cbd664ce5a 100644 --- a/server/src/org/apache/cloudstack/region/RegionManager.java +++ b/server/src/org/apache/cloudstack/region/RegionManager.java @@ -31,95 +31,49 @@ import com.cloud.user.Account; import com.cloud.user.UserAccount; public interface RegionManager { - - /** - * Propagates Account details to peer Regions - * @param userName - * @param password - * @param firstName - * @param lastName - * @param email - * @param timezone - * @param accountName - * @param accountType - * @param domainId - * @param networkDomain - * @param details - * @param accountUUID - * @param userUUID - * @return - */ - public boolean propagateAddAccount(String userName, String password, String firstName, String lastName, String email, String timezone, String accountName, short accountType, Long domainId, String networkDomain, - Map details, String accountUUID, String userUUID); - - /** - * Returns the Id of local Region - * @return - */ - public int getId(); - - /** - * Propagates User details to peer Regions - * @param userName - * @param password - * @param firstName - * @param lastName - * @param email - * @param timeZone - * @param accountName - * @param domainUUId - * @param userUUID - */ - public void propagateAddUser(String userName, String password, - String firstName, String lastName, String email, String timeZone, - String accountName, String domainUUId, String userUUID); - - /** - * Propagates Domain details to peer Regions - * @param name - * @param parentId - * @param networkDomain - * @param uuid - */ - public void propagateAddDomain(String name, Long parentId, String networkDomain, String uuid); - - - /** - * Adds a peer Region to the local Region - * @param id - * @param name - * @param endPoint - * @param apiKey - * @param secretKey - * @return Returns added Region object - */ - Region addRegion(int id, String name, String endPoint, String apiKey, String secretKey); - - /** - * Update details of the Region with specified Id - * @param id - * @param name - * @param endPoint - * - * @param apiKey - * @param secretKey - * @return Returns update Region object - */ - Region updateRegion(int id, String name, String endPoint, String apiKey, String secretKey); - - /** - * @param id - * @return True if region is successfully removed - */ - boolean removeRegion(int id); - - /** List all Regions or by Id/Name - * @param id - * @param name - * @return List of Regions - */ - List listRegions(Integer id, String name); - + + /** + * Returns the Id of local Region + * @return + */ + public int getId(); + + /** + * Adds a peer Region to the local Region + * @param id + * @param name + * @param endPoint + * @param apiKey + * @param secretKey + * @return Returns added Region object + */ + Region addRegion(int id, String name, String endPoint, String apiKey, String secretKey); + + /** + * Update details of the Region with specified Id + * @param id + * @param name + * @param endPoint + * + * @param apiKey + * @param secretKey + * @return Returns update Region object + */ + Region updateRegion(int id, String name, String endPoint, String apiKey, String secretKey); + + /** + * @param id + * @return True if region is successfully removed + */ + boolean removeRegion(int id); + + /** List all Regions or by Id/Name + * @param id + * @param name + * @return List of Regions + */ + List listRegions(Integer id, String name); + /** * Deletes a user by userId and propagates the change to peer Regions * @@ -128,8 +82,8 @@ public interface RegionManager { * * @return true if delete was successful, false otherwise */ - boolean deleteUserAccount(long accountId); - + boolean deleteUserAccount(long accountId); + /** * Updates an account * isPopagate falg is set to true if sent from peer Region @@ -139,19 +93,19 @@ public interface RegionManager { * @return updated account object */ Account updateAccount(UpdateAccountCmd cmd); - - /** - * Disables an account by accountName and domainId or accountId - * @param accountName - * @param domainId - * @param id - * @param lockRequested - * @return - * @throws ConcurrentOperationException - * @throws ResourceUnavailableException - */ - Account disableAccount(String accountName, Long domainId, Long id, Boolean lockRequested) throws ConcurrentOperationException, ResourceUnavailableException; - + + /** + * Disables an account by accountName and domainId or accountId + * @param accountName + * @param domainId + * @param id + * @param lockRequested + * @return + * @throws ConcurrentOperationException + * @throws ResourceUnavailableException + */ + Account disableAccount(String accountName, Long domainId, Long id, Boolean lockRequested) throws ConcurrentOperationException, ResourceUnavailableException; + /** * Enables an account by accountId * @@ -163,14 +117,14 @@ public interface RegionManager { * @return account object */ Account enableAccount(String accountName, Long domainId, Long accountId); - + /** * Deletes user by Id * @param deleteUserCmd * @return */ boolean deleteUser(DeleteUserCmd deleteUserCmd); - + /** * update an existing domain * @@ -178,24 +132,24 @@ public interface RegionManager { * - the command containing domainId and new domainName * @return Domain object if the command succeeded */ - Domain updateDomain(UpdateDomainCmd updateDomainCmd); - - /** - * Deletes domain by Id - * @param id - * @param cleanup - * @return true if delete was successful, false otherwise - */ - boolean deleteDomain(Long id, Boolean cleanup); - + Domain updateDomain(UpdateDomainCmd updateDomainCmd); + + /** + * Deletes domain by Id + * @param id + * @param cleanup + * @return true if delete was successful, false otherwise + */ + boolean deleteDomain(Long id, Boolean cleanup); + /** * Update a user by userId * * @param userId * @return UserAccount object */ - UserAccount updateUser(UpdateUserCmd updateUserCmd); - + UserAccount updateUser(UpdateUserCmd updateUserCmd); + /** * Disables a user by userId * @@ -203,8 +157,8 @@ public interface RegionManager { * - the userId * @return UserAccount object */ - UserAccount disableUser(Long id); - + UserAccount disableUser(Long id); + /** * Enables a user * diff --git a/server/src/org/apache/cloudstack/region/RegionManagerImpl.java b/server/src/org/apache/cloudstack/region/RegionManagerImpl.java index c7bca5b00cb..cb0b1a69ad8 100755 --- a/server/src/org/apache/cloudstack/region/RegionManagerImpl.java +++ b/server/src/org/apache/cloudstack/region/RegionManagerImpl.java @@ -16,25 +16,6 @@ // under the License. package org.apache.cloudstack.region; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.command.admin.account.UpdateAccountCmd; -import org.apache.cloudstack.api.command.admin.domain.UpdateDomainCmd; -import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd; -import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; -import org.apache.cloudstack.region.dao.RegionDao; -import org.apache.cloudstack.region.dao.RegionSyncDao; -import org.apache.commons.httpclient.NameValuePair; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; @@ -53,13 +34,28 @@ import com.cloud.user.dao.UserDao; import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.uuididentity.dao.IdentityDao; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.command.admin.account.UpdateAccountCmd; +import org.apache.cloudstack.api.command.admin.domain.UpdateDomainCmd; +import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd; +import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; +import org.apache.cloudstack.region.dao.RegionDao; +import org.apache.commons.httpclient.NameValuePair; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; @Component @Local(value = { RegionManager.class }) public class RegionManagerImpl extends ManagerBase implements RegionManager, Manager{ public static final Logger s_logger = Logger.getLogger(RegionManagerImpl.class); - + @Inject RegionDao _regionDao; @Inject @@ -73,22 +69,18 @@ public class RegionManagerImpl extends ManagerBase implements RegionManager, Man @Inject private DomainManager _domainMgr; @Inject - private UserAccountDao _userAccountDao; - @Inject - private IdentityDao _identityDao; - @Inject - private RegionSyncDao _regionSyncDao; - + private UserAccountDao _userAccountDao; + private String _name; private int _id; - + @Override public boolean configure(final String name, final Map params) throws ConfigurationException { _name = name; _id = _regionDao.getRegionId(); return true; } - + @Override public boolean start() { return true; @@ -104,286 +96,137 @@ public class RegionManagerImpl extends ManagerBase implements RegionManager, Man return _name; } - public int getId() { - return _id; - } - - /* - * Propagates Account creation to peer Regions - * Adds an entry in region_sync table on failure - */ - @Override - public boolean propagateAddAccount(String userName, String password, String firstName, String lastName, String email, String timezone, - String accountName, short accountType, Long domainId, String networkDomain, Map details, String accountUUID, String userUUID) { - String command = "createAccount"; - List params = new ArrayList(); - params.add(new NameValuePair(ApiConstants.USERNAME, userName)); - params.add(new NameValuePair(ApiConstants.PASSWORD, password)); - params.add(new NameValuePair(ApiConstants.FIRSTNAME, firstName)); - params.add(new NameValuePair(ApiConstants.LASTNAME, lastName)); - params.add(new NameValuePair(ApiConstants.EMAIL, email)); - params.add(new NameValuePair(ApiConstants.TIMEZONE, timezone)); - params.add(new NameValuePair(ApiConstants.ACCOUNT, accountName)); - params.add(new NameValuePair(ApiConstants.ACCOUNT_TYPE, ""+accountType)); - //ToDo: use domain UUID - params.add(new NameValuePair(ApiConstants.DOMAIN_ID, ((domainId != null) ? domainId.toString() : ""))); - params.add(new NameValuePair(ApiConstants.NETWORK_DOMAIN, networkDomain)); - params.add(new NameValuePair(ApiConstants.ACCOUNT_DETAILS, (details != null) ? details.toString() : "")); - params.add(new NameValuePair(ApiConstants.ACCOUNT_ID, accountUUID)); - params.add(new NameValuePair(ApiConstants.USER_ID, userUUID)); - params.add(new NameValuePair(ApiConstants.REGION_ID, ""+getId())); - - List regions = _regionDao.listAll(); - for (Region region : regions){ - if(region.getId() == getId()){ - continue; - } - s_logger.debug("Adding account :"+accountName+" to Region: "+region.getId()); - if (RegionsApiUtil.makeAPICall(region, command, params)) { - s_logger.debug("Successfully added account :"+accountName+" to Region: "+region.getId()); - } else { - // api call failed. Add entry in region_sync table - addRegionSyncItem(region.getId(), command, params); - s_logger.error("Error while Adding account :"+accountName+" to Region: "+region.getId()); - } - } - return true; - } - - /* - * Propagates User creation to peer Regions - * Adds an entry in region_sync table on failure - */ - @Override - public void propagateAddUser(String userName, String password, - String firstName, String lastName, String email, String timezone, - String accountName, String domainUUId, String userUUID) { - - String command = "createUser"; - List params = new ArrayList(); - params.add(new NameValuePair(ApiConstants.USERNAME, userName)); - params.add(new NameValuePair(ApiConstants.PASSWORD, password)); - params.add(new NameValuePair(ApiConstants.FIRSTNAME, firstName)); - params.add(new NameValuePair(ApiConstants.LASTNAME, lastName)); - params.add(new NameValuePair(ApiConstants.EMAIL, email)); - params.add(new NameValuePair(ApiConstants.TIMEZONE, timezone)); - params.add(new NameValuePair(ApiConstants.ACCOUNT, accountName)); - params.add(new NameValuePair(ApiConstants.DOMAIN_ID, domainUUId)); - params.add(new NameValuePair(ApiConstants.USER_ID, userUUID)); - params.add(new NameValuePair(ApiConstants.REGION_ID, ""+getId())); - - List regions = _regionDao.listAll(); - for (Region region : regions){ - if(region.getId() == getId()){ - continue; - } - s_logger.debug("Adding account :"+accountName+" to Region: "+region.getId()); - if (RegionsApiUtil.makeAPICall(region, command, params)) { - s_logger.debug("Successfully added user :"+userName+" to Region: "+region.getId()); - } else { - // api call failed. Add entry in region_sync table - addRegionSyncItem(region.getId(), command, params); - s_logger.error("Error while Adding user :"+userName+" to Region: "+region.getId()); - } - } - return; - } - - /* - * Propagates Domain creation details to peer Regions - * Adds an entry in region_sync table on failure - */ - @Override - public void propagateAddDomain(String name, Long parentId, String networkDomain, String uuid) { - - String command = "createDomain"; - List params = new ArrayList(); - params.add(new NameValuePair(ApiConstants.NAME, name)); - if(parentId != null){ - DomainVO domain = _domainDao.findById(parentId); - if(domain != null){ - params.add(new NameValuePair(ApiConstants.PARENT_DOMAIN_ID, domain.getUuid())); - } - } - params.add(new NameValuePair(ApiConstants.NETWORK_DOMAIN, networkDomain)); - params.add(new NameValuePair(ApiConstants.DOMAIN_ID, uuid)); - params.add(new NameValuePair(ApiConstants.REGION_ID, ""+getId())); - - List regions = _regionDao.listAll(); - for (Region region : regions){ - if(region.getId() == getId()){ - continue; - } - s_logger.debug("Adding domain :"+name+" to Region: "+region.getId()); - if (RegionsApiUtil.makeAPICall(region, command, params)) { - s_logger.debug("Successfully added domain :"+name+" to Region: "+region.getId()); - } else { - // api call failed. Add entry in region_sync table - addRegionSyncItem(region.getId(), command, params); - s_logger.error("Error while Adding domain :"+name+" to Region: "+region.getId()); - } - } - return; - } - - /** - * Adds an entry to region_sync table - * Entry contains region Id along with failed api - * @param regionId - * @param command - * @param params - */ - private void addRegionSyncItem(int regionId, String command, List params){ - String api = RegionsApiUtil.buildParams(command, params); - RegionSyncVO sync = new RegionSyncVO(regionId, api); - if(_regionSyncDao.persist(sync) == null){ - s_logger.error("Failed to add Region Sync Item. RegionId: "+regionId + "API command: "+api); - } - } + public int getId() { + return _id; + } /** * {@inheritDoc} */ - @Override - public Region addRegion(int id, String name, String endPoint, String apiKey, String secretKey) { - //Region Id should be unique - if( _regionDao.findById(id) != null ){ - throw new InvalidParameterValueException("Region with id: "+id+" already exists"); - } - //Region Name should be unique - if( _regionDao.findByName(name) != null ){ - throw new InvalidParameterValueException("Region with name: "+name+" already exists"); - } - RegionVO region = new RegionVO(id, name, endPoint, apiKey, secretKey); - return _regionDao.persist(region); - } + @Override + public Region addRegion(int id, String name, String endPoint, String apiKey, String secretKey) { + //Region Id should be unique + if( _regionDao.findById(id) != null ){ + throw new InvalidParameterValueException("Region with id: "+id+" already exists"); + } + //Region Name should be unique + if( _regionDao.findByName(name) != null ){ + throw new InvalidParameterValueException("Region with name: "+name+" already exists"); + } + RegionVO region = new RegionVO(id, name, endPoint, apiKey, secretKey); + return _regionDao.persist(region); + } /** * {@inheritDoc} */ - @Override - public Region updateRegion(int id, String name, String endPoint, String apiKey, String secretKey) { - RegionVO region = _regionDao.findById(id); - - if(region == null){ - throw new InvalidParameterValueException("Region with id: "+id+" does not exist"); - } - - //Ensure region name is unique - if(name != null){ - RegionVO region1 = _regionDao.findByName(name); - if(region1 != null && id != region1.getId()){ - throw new InvalidParameterValueException("Region with name: "+name+" already exists"); - } - } - - if(name != null){ - region.setName(name); - } - - if(endPoint != null){ - region.setEndPoint(endPoint); - } - - if(apiKey != null){ - region.setApiKey(apiKey); - } - - if(secretKey != null){ - region.setSecretKey(secretKey); - } - - _regionDao.update(id, region); - return _regionDao.findById(id); - } + @Override + public Region updateRegion(int id, String name, String endPoint, String apiKey, String secretKey) { + RegionVO region = _regionDao.findById(id); + + if(region == null){ + throw new InvalidParameterValueException("Region with id: "+id+" does not exist"); + } + + //Ensure region name is unique + if(name != null){ + RegionVO region1 = _regionDao.findByName(name); + if(region1 != null && id != region1.getId()){ + throw new InvalidParameterValueException("Region with name: "+name+" already exists"); + } + } + + if(name != null){ + region.setName(name); + } + + if(endPoint != null){ + region.setEndPoint(endPoint); + } + + if(apiKey != null){ + region.setApiKey(apiKey); + } + + if(secretKey != null){ + region.setSecretKey(secretKey); + } + + _regionDao.update(id, region); + return _regionDao.findById(id); + } /** * {@inheritDoc} */ - @Override - public boolean removeRegion(int id) { - RegionVO region = _regionDao.findById(id); - if(region == null){ - throw new InvalidParameterValueException("Failed to delete Region: " + id + ", Region not found"); - } - return _regionDao.remove(id); - } + @Override + public boolean removeRegion(int id) { + RegionVO region = _regionDao.findById(id); + if(region == null){ + throw new InvalidParameterValueException("Failed to delete Region: " + id + ", Region not found"); + } + return _regionDao.remove(id); + } /** * {@inheritDoc} */ - @Override - public List listRegions(Integer id, String name) { - List regions = new ArrayList(); - if(id != null){ - RegionVO region = _regionDao.findById(id); - if(region != null){ - regions.add(region); - } - return regions; - } - if(name != null){ - RegionVO region = _regionDao.findByName(name); - if(region != null){ - regions.add(region); - } - return regions; - } - return _regionDao.listAll(); - } + @Override + public List listRegions(Integer id, String name) { + List regions = new ArrayList(); + if(id != null){ + RegionVO region = _regionDao.findById(id); + if(region != null){ + regions.add(region); + } + return regions; + } + if(name != null){ + RegionVO region = _regionDao.findByName(name); + if(region != null){ + regions.add(region); + } + return regions; + } + return _regionDao.listAll(); + } /** * {@inheritDoc} */ - @Override - public boolean deleteUserAccount(long accountId) { - AccountVO account = _accountDao.findById(accountId); - if(account == null){ - throw new InvalidParameterValueException("The specified account does not exist in the system"); - } - String accountUUID = account.getUuid(); - int regionId = account.getRegionId(); - - String command = "deleteAccount"; - List params = new ArrayList(); - params.add(new NameValuePair(ApiConstants.ID, accountUUID)); + @Override + public boolean deleteUserAccount(long accountId) { + AccountVO account = _accountDao.findById(accountId); + if(account == null){ + throw new InvalidParameterValueException("The specified account does not exist in the system"); + } + String accountUUID = account.getUuid(); + int regionId = account.getRegionId(); - if(getId() == regionId){ - if(_accountMgr.deleteUserAccount(accountId)){ - List regions = _regionDao.listAll(); - for (Region region : regions){ - if(region.getId() == getId()){ - continue; - } - params.add(new NameValuePair(ApiConstants.IS_PROPAGATE, "true")); - if (RegionsApiUtil.makeAPICall(region, command, params)) { - s_logger.debug("Successfully deleted account :"+accountUUID+" in Region: "+region.getId()); - } else { - s_logger.error("Error while deleting account :"+accountUUID+" in Region: "+region.getId()); - } - } - return true; - } else { - return false; - } - } else { - //First delete in the Region where account is created - Region region = _regionDao.findById(regionId); - if (RegionsApiUtil.makeAPICall(region, command, params)) { - s_logger.debug("Successfully deleted account :"+accountUUID+" in Region: "+region.getId()); - return true; - } else { - s_logger.error("Error while deleting account :"+accountUUID+" in Region: "+region.getId()); - return false; - } - } - } + String command = "deleteAccount"; + List params = new ArrayList(); + params.add(new NameValuePair(ApiConstants.ID, accountUUID)); + + if(getId() == regionId){ + return _accountMgr.deleteUserAccount(accountId); + } else { + //First delete in the Region where account is created + Region region = _regionDao.findById(regionId); + if (RegionsApiUtil.makeAPICall(region, command, params)) { + s_logger.debug("Successfully deleted account :"+accountUUID+" in Region: "+region.getId()); + return true; + } else { + s_logger.error("Error while deleting account :"+accountUUID+" in Region: "+region.getId()); + return false; + } + } + } /** * {@inheritDoc} */ - @Override - public Account updateAccount(UpdateAccountCmd cmd) { + @Override + public Account updateAccount(UpdateAccountCmd cmd) { Long accountId = cmd.getId(); Long domainId = cmd.getDomainId(); DomainVO domain = _domainDao.findById(domainId); @@ -391,73 +234,52 @@ public class RegionManagerImpl extends ManagerBase implements RegionManager, Man String newAccountName = cmd.getNewName(); String networkDomain = cmd.getNetworkDomain(); //ToDo send details - Map details = cmd.getDetails(); - + Map details = cmd.getDetails(); + Account account = null; if (accountId != null) { account = _accountDao.findById(accountId); } else { account = _accountDao.findEnabledAccount(accountName, domainId); } - + // Check if account exists if (account == null || account.getType() == Account.ACCOUNT_TYPE_PROJECT) { s_logger.error("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); throw new InvalidParameterValueException("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); - } + } String command = "updateAccount"; - List params = new ArrayList(); - params.add(new NameValuePair(ApiConstants.NEW_NAME, newAccountName)); - params.add(new NameValuePair(ApiConstants.ID, account.getUuid())); - params.add(new NameValuePair(ApiConstants.ACCOUNT, accountName)); - params.add(new NameValuePair(ApiConstants.DOMAIN_ID, domain.getUuid())); - params.add(new NameValuePair(ApiConstants.NETWORK_DOMAIN, networkDomain)); - params.add(new NameValuePair(ApiConstants.NEW_NAME, newAccountName)); - if(details != null){ - params.add(new NameValuePair(ApiConstants.ACCOUNT_DETAILS, details.toString())); - } - int regionId = account.getRegionId(); - if(getId() == regionId){ - Account updatedAccount = _accountMgr.updateAccount(cmd); - if(updatedAccount != null){ - List regions = _regionDao.listAll(); - for (Region region : regions){ - if(region.getId() == getId()){ - continue; - } - params.add(new NameValuePair(ApiConstants.IS_PROPAGATE, "true")); - if (RegionsApiUtil.makeAPICall(region, command, params)) { - s_logger.debug("Successfully updated account :"+account.getUuid()+" in Region: "+region.getId()); - } else { - s_logger.error("Error while updating account :"+account.getUuid()+" in Region: "+region.getId()); - } - } - } - return updatedAccount; - } else { - //First update in the Region where account is created - Region region = _regionDao.findById(regionId); - RegionAccount updatedAccount = RegionsApiUtil.makeAccountAPICall(region, command, params); - if (updatedAccount != null) { - Long id = _identityDao.getIdentityId("account", updatedAccount.getUuid()); - updatedAccount.setId(id); - Long domainID = _identityDao.getIdentityId("domain", updatedAccount.getDomainUuid()); - updatedAccount.setDomainId(domainID); - s_logger.debug("Successfully updated account :"+account.getUuid()+" in source Region: "+region.getId()); - return updatedAccount; - } else { - throw new CloudRuntimeException("Error while updating account :"+account.getUuid()+" in source Region: "+region.getId()); - } - } - } + List params = new ArrayList(); + params.add(new NameValuePair(ApiConstants.ID, account.getUuid())); + params.add(new NameValuePair(ApiConstants.ACCOUNT, accountName)); + params.add(new NameValuePair(ApiConstants.DOMAIN_ID, domain.getUuid())); + params.add(new NameValuePair(ApiConstants.NETWORK_DOMAIN, networkDomain)); + params.add(new NameValuePair(ApiConstants.NEW_NAME, newAccountName)); + if(details != null){ + params.add(new NameValuePair(ApiConstants.ACCOUNT_DETAILS, details.toString())); + } + int regionId = account.getRegionId(); + if(getId() == regionId){ + return _accountMgr.updateAccount(cmd); + } else { + //First update in the Region where account is created + Region region = _regionDao.findById(regionId); + if (RegionsApiUtil.makeAPICall(region, command, params)) { + s_logger.debug("Successfully updated account :"+account.getUuid()+" in source Region: "+region.getId()); + return account; + } else { + throw new CloudRuntimeException("Error while updating account :"+account.getUuid()+" in source Region: "+region.getId()); + } + } + } /** * {@inheritDoc} */ - @Override - public Account disableAccount(String accountName, Long domainId, Long accountId, Boolean lockRequested) throws ConcurrentOperationException, ResourceUnavailableException { - Account account = null; + @Override + public Account disableAccount(String accountName, Long domainId, Long accountId, Boolean lockRequested) throws ConcurrentOperationException, ResourceUnavailableException { + Account account = null; if (accountId != null) { account = _accountDao.findById(accountId); } else { @@ -466,60 +288,45 @@ public class RegionManagerImpl extends ManagerBase implements RegionManager, Man if (account == null || account.getType() == Account.ACCOUNT_TYPE_PROJECT) { throw new InvalidParameterValueException("Unable to find active account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); - } - - String accountUUID = account.getUuid(); - - String command = "disableAccount"; - List params = new ArrayList(); - params.add(new NameValuePair(ApiConstants.LOCK, lockRequested.toString())); - params.add(new NameValuePair(ApiConstants.ID, accountUUID)); - DomainVO domain = _domainDao.findById(domainId); - if(domain != null){ - params.add(new NameValuePair(ApiConstants.DOMAIN_ID, domain.getUuid())); - } - - int regionId = account.getRegionId(); - if(getId() == regionId){ - Account retAccount = null; - if(lockRequested){ - retAccount = _accountMgr.lockAccount(accountName, domainId, accountId); - } else { - retAccount = _accountMgr.disableAccount(accountName, domainId, accountId); - } - if(retAccount != null){ - List regions = _regionDao.listAll(); - for (Region region : regions){ - if(region.getId() == getId()){ - continue; - } - params.add(new NameValuePair(ApiConstants.IS_PROPAGATE, "true")); - if (RegionsApiUtil.makeAPICall(region, command, params)) { - s_logger.debug("Successfully disabled account :"+accountUUID+" in Region: "+region.getId()); - } else { - s_logger.error("Error while disabling account :"+accountUUID+" in Region: "+region.getId()); - } - } - } - return retAccount; - } else { - //First disable account in the Region where account is created - Region region = _regionDao.findById(regionId); - Account retAccount = RegionsApiUtil.makeAccountAPICall(region, command, params); - if (retAccount != null) { - s_logger.debug("Successfully disabled account :"+accountUUID+" in source Region: "+region.getId()); - return retAccount; - } else { - throw new CloudRuntimeException("Error while disabling account :"+accountUUID+" in source Region: "+region.getId()); - } - } - } + } + + String accountUUID = account.getUuid(); + + String command = "disableAccount"; + List params = new ArrayList(); + params.add(new NameValuePair(ApiConstants.LOCK, lockRequested.toString())); + params.add(new NameValuePair(ApiConstants.ID, accountUUID)); + DomainVO domain = _domainDao.findById(domainId); + if(domain != null){ + params.add(new NameValuePair(ApiConstants.DOMAIN_ID, domain.getUuid())); + } + + int regionId = account.getRegionId(); + if(getId() == regionId){ + Account retAccount = null; + if(lockRequested){ + retAccount = _accountMgr.lockAccount(accountName, domainId, accountId); + } else { + retAccount = _accountMgr.disableAccount(accountName, domainId, accountId); + } + return retAccount; + } else { + //First disable account in the Region where account is created + Region region = _regionDao.findById(regionId); + if (RegionsApiUtil.makeAPICall(region, command, params)) { + s_logger.debug("Successfully disabled account :"+accountUUID+" in source Region: "+region.getId()); + return account; + } else { + throw new CloudRuntimeException("Error while disabling account :"+accountUUID+" in source Region: "+region.getId()); + } + } + } /** * {@inheritDoc} */ - @Override - public Account enableAccount(String accountName, Long domainId, Long accountId) { + @Override + public Account enableAccount(String accountName, Long domainId, Long accountId) { // Check if account exists Account account = null; if (accountId != null) { @@ -530,350 +337,235 @@ public class RegionManagerImpl extends ManagerBase implements RegionManager, Man if (account == null || account.getType() == Account.ACCOUNT_TYPE_PROJECT) { throw new InvalidParameterValueException("Unable to find account by accountId: " + accountId + " OR by name: " + accountName + " in domain " + domainId); - } - - String accountUUID = account.getUuid(); - - String command = "enableAccount"; - List params = new ArrayList(); - params.add(new NameValuePair(ApiConstants.ID, accountUUID)); - params.add(new NameValuePair(ApiConstants.ACCOUNT, accountName)); - DomainVO domain = _domainDao.findById(domainId); - if(domain != null){ - params.add(new NameValuePair(ApiConstants.DOMAIN_ID, domain.getUuid())); - } - - int regionId = account.getRegionId(); - if(getId() == regionId){ - Account retAccount = _accountMgr.enableAccount(accountName, domainId, accountId); - if(retAccount != null){ - List regions = _regionDao.listAll(); + } - for (Region region : regions){ - if(region.getId() == getId()){ - continue; - } - params.add(new NameValuePair(ApiConstants.IS_PROPAGATE, "true")); - if (RegionsApiUtil.makeAPICall(region, command, params)) { - s_logger.debug("Successfully enabled account :"+accountUUID+" in Region: "+region.getId()); - } else { - s_logger.error("Error while enabling account :"+accountUUID+" in Region: "+region.getId()); - } - } - } - return retAccount; - } else { - //First disable account in the Region where account is created - Region region = _regionDao.findById(regionId); - Account retAccount = RegionsApiUtil.makeAccountAPICall(region, command, params); - if (retAccount != null) { - s_logger.debug("Successfully enabled account :"+accountUUID+" in source Region: "+region.getId()); - return retAccount; - } else { - throw new CloudRuntimeException("Error while enabling account :"+accountUUID+" in source Region: "+region.getId()); - } - } - } + String accountUUID = account.getUuid(); + + String command = "enableAccount"; + List params = new ArrayList(); + params.add(new NameValuePair(ApiConstants.ID, accountUUID)); + params.add(new NameValuePair(ApiConstants.ACCOUNT, accountName)); + DomainVO domain = _domainDao.findById(domainId); + if(domain != null){ + params.add(new NameValuePair(ApiConstants.DOMAIN_ID, domain.getUuid())); + } + + int regionId = account.getRegionId(); + if(getId() == regionId){ + return _accountMgr.enableAccount(accountName, domainId, accountId); + } else { + //First disable account in the Region where account is created + Region region = _regionDao.findById(regionId); + if (RegionsApiUtil.makeAPICall(region, command, params)) { + s_logger.debug("Successfully enabled account :"+accountUUID+" in source Region: "+region.getId()); + return account; + } else { + throw new CloudRuntimeException("Error while enabling account :"+accountUUID+" in source Region: "+region.getId()); + } + } + } /** * {@inheritDoc} - */ - @Override - public boolean deleteUser(DeleteUserCmd cmd) { + */ + @Override + public boolean deleteUser(DeleteUserCmd cmd) { long id = cmd.getId(); UserVO user = _userDao.findById(id); if (user == null) { throw new InvalidParameterValueException("The specified user doesn't exist in the system"); - } - - String userUUID = user.getUuid(); - int regionId = user.getRegionId(); - - String command = "deleteUser"; - List params = new ArrayList(); - params.add(new NameValuePair(ApiConstants.ID, userUUID)); - - if(getId() == regionId){ - if(_accountMgr.deleteUser(cmd)){ - List regions = _regionDao.listAll(); - for (Region region : regions){ - if(region.getId() == getId()){ - continue; - } - params.add(new NameValuePair(ApiConstants.IS_PROPAGATE, "true")); - if (RegionsApiUtil.makeAPICall(region, command, params)) { - s_logger.debug("Successfully deleted user :"+userUUID+" in Region: "+region.getId()); - } else { - s_logger.error("Error while deleting account :"+userUUID+" in Region: "+region.getId()); - } - } - return true; - } else { - return false; - } - } else { - //First delete in the Region where account is created - Region region = _regionDao.findById(regionId); - if (RegionsApiUtil.makeAPICall(region, command, params)) { - s_logger.debug("Successfully deleted user :"+userUUID+" in source Region: "+region.getId()); - return true; - } else { - s_logger.error("Error while deleting user :"+userUUID+" in source Region: "+region.getId()); - return false; - } - } - } + } + + String userUUID = user.getUuid(); + int regionId = user.getRegionId(); + + String command = "deleteUser"; + List params = new ArrayList(); + params.add(new NameValuePair(ApiConstants.ID, userUUID)); + + if(getId() == regionId){ + return _accountMgr.deleteUser(cmd); + } else { + //First delete in the Region where user is created + Region region = _regionDao.findById(regionId); + if (RegionsApiUtil.makeAPICall(region, command, params)) { + s_logger.debug("Successfully deleted user :"+userUUID+" in source Region: "+region.getId()); + return true; + } else { + s_logger.error("Error while deleting user :"+userUUID+" in source Region: "+region.getId()); + return false; + } + } + } /** * {@inheritDoc} - */ - @Override - public Domain updateDomain(UpdateDomainCmd cmd) { - long id = cmd.getId(); - DomainVO domain = _domainDao.findById(id); - if(domain == null){ - throw new InvalidParameterValueException("The specified domain doesn't exist in the system"); - } - - String domainUUID = domain.getUuid(); - - String command = "updateDomain"; - List params = new ArrayList(); - params.add(new NameValuePair(ApiConstants.ID, domainUUID)); - params.add(new NameValuePair(ApiConstants.NAME, cmd.getDomainName())); - params.add(new NameValuePair(ApiConstants.NETWORK_DOMAIN, cmd.getNetworkDomain())); - - int regionId = domain.getRegionId(); - if(getId() == regionId){ - Domain updatedDomain = _domainMgr.updateDomain(cmd); - if(updatedDomain != null){ - List regions = _regionDao.listAll(); - for (Region region : regions){ - if(region.getId() == getId()){ - continue; - } - params.add(new NameValuePair(ApiConstants.IS_PROPAGATE, "true")); - if (RegionsApiUtil.makeAPICall(region, command, params)) { - s_logger.debug("Successfully updated updatedDomain :"+domainUUID+" in Region: "+region.getId()); - } else { - s_logger.error("Error while updating updatedDomain :"+domainUUID+" in Region: "+region.getId()); - } - } - } - return updatedDomain; - } else { - //First update in the Region where domain was created - Region region = _regionDao.findById(regionId); - RegionDomain updatedDomain = RegionsApiUtil.makeDomainAPICall(region, command, params); - if (updatedDomain != null) { - Long parentId = _identityDao.getIdentityId("domain", updatedDomain.getParentUuid()); - updatedDomain.setParent(parentId); - s_logger.debug("Successfully updated user :"+domainUUID+" in source Region: "+region.getId()); - return (DomainVO)updatedDomain; - } else { - throw new CloudRuntimeException("Error while updating user :"+domainUUID+" in source Region: "+region.getId()); - } - } - } - + */ + @Override + public Domain updateDomain(UpdateDomainCmd cmd) { + long id = cmd.getId(); + DomainVO domain = _domainDao.findById(id); + if(domain == null){ + throw new InvalidParameterValueException("The specified domain doesn't exist in the system"); + } + + String domainUUID = domain.getUuid(); + + String command = "updateDomain"; + List params = new ArrayList(); + params.add(new NameValuePair(ApiConstants.ID, domainUUID)); + params.add(new NameValuePair(ApiConstants.NAME, cmd.getDomainName())); + params.add(new NameValuePair(ApiConstants.NETWORK_DOMAIN, cmd.getNetworkDomain())); + + int regionId = domain.getRegionId(); + if(getId() == regionId){ + return _domainMgr.updateDomain(cmd); + } else { + //First update in the Region where domain was created + Region region = _regionDao.findById(regionId); + if (RegionsApiUtil.makeAPICall(region, command, params)) { + s_logger.debug("Successfully updated user :"+domainUUID+" in source Region: "+region.getId()); + return domain; + } else { + throw new CloudRuntimeException("Error while updating user :"+domainUUID+" in source Region: "+region.getId()); + } + } + } + /** * {@inheritDoc} */ - @Override - public boolean deleteDomain(Long id, Boolean cleanup) { - DomainVO domain = _domainDao.findById(id); - if(domain == null){ - throw new InvalidParameterValueException("The specified domain doesn't exist in the system"); - } - - String domainUUID = domain.getUuid(); - - String command = "deleteDomain"; - List params = new ArrayList(); - params.add(new NameValuePair(ApiConstants.ID, domainUUID)); - params.add(new NameValuePair(ApiConstants.CLEANUP, cleanup.toString())); - + @Override + public boolean deleteDomain(Long id, Boolean cleanup) { + DomainVO domain = _domainDao.findById(id); + if(domain == null){ + throw new InvalidParameterValueException("The specified domain doesn't exist in the system"); + } + + String domainUUID = domain.getUuid(); + + String command = "deleteDomain"; + List params = new ArrayList(); + params.add(new NameValuePair(ApiConstants.ID, domainUUID)); + params.add(new NameValuePair(ApiConstants.CLEANUP, cleanup.toString())); + int regionId = domain.getRegionId(); - if(getId() == regionId){ - if(_domainMgr.deleteDomain(id, cleanup)){ - List regions = _regionDao.listAll(); - for (Region region : regions){ - if(region.getId() == getId()){ - continue; - } - params.add(new NameValuePair(ApiConstants.IS_PROPAGATE, "true")); - if (RegionsApiUtil.makeAPICall(region, command, params)) { - s_logger.debug("Successfully deleted domain :"+domainUUID+" in Region: "+region.getId()); - } else { - s_logger.error("Error while deleting domain :"+domainUUID+" in Region: "+region.getId()); - } - } - return true; - } else { - return false; - } - } else { - //First delete in the Region where domain is created - Region region = _regionDao.findById(regionId); - if (RegionsApiUtil.makeAPICall(region, command, params)) { - s_logger.debug("Successfully deleted domain :"+domainUUID+" in Region: "+region.getId()); - return true; - } else { - s_logger.error("Error while deleting domain :"+domainUUID+" in Region: "+region.getId()); - return false; - } - } - } + if(getId() == regionId){ + return _domainMgr.deleteDomain(id, cleanup); + } else { + //First delete in the Region where domain is created + Region region = _regionDao.findById(regionId); + if (RegionsApiUtil.makeAPICall(region, command, params)) { + s_logger.debug("Successfully deleted domain :"+domainUUID+" in Region: "+region.getId()); + return true; + } else { + s_logger.error("Error while deleting domain :"+domainUUID+" in Region: "+region.getId()); + return false; + } + } + } /** * {@inheritDoc} - */ - @Override - public UserAccount updateUser(UpdateUserCmd cmd) { + */ + @Override + public UserAccount updateUser(UpdateUserCmd cmd) { long id = cmd.getId(); UserVO user = _userDao.findById(id); if (user == null) { throw new InvalidParameterValueException("The specified user doesn't exist in the system"); - } - - String userUUID = user.getUuid(); - - String command = "updateUser"; - List params = new ArrayList(); - params.add(new NameValuePair(ApiConstants.ID, userUUID)); - params.add(new NameValuePair(ApiConstants.API_KEY, cmd.getApiKey())); - params.add(new NameValuePair(ApiConstants.EMAIL, cmd.getEmail())); - params.add(new NameValuePair(ApiConstants.FIRSTNAME, cmd.getFirstname())); - params.add(new NameValuePair(ApiConstants.LASTNAME, cmd.getLastname())); - params.add(new NameValuePair(ApiConstants.PASSWORD, cmd.getPassword())); - params.add(new NameValuePair(ApiConstants.SECRET_KEY, cmd.getSecretKey())); - params.add(new NameValuePair(ApiConstants.TIMEZONE, cmd.getTimezone())); - params.add(new NameValuePair(ApiConstants.USERNAME, cmd.getUsername())); - - int regionId = user.getRegionId(); - if(getId() == regionId){ - UserAccount updateUser = _accountMgr.updateUser(cmd); - if(updateUser != null){ - List regions = _regionDao.listAll(); - for (Region region : regions){ - if(region.getId() == getId()){ - continue; - } - params.add(new NameValuePair(ApiConstants.IS_PROPAGATE, "true")); - if (RegionsApiUtil.makeAPICall(region, command, params)) { - s_logger.debug("Successfully updated user :"+userUUID+" in Region: "+region.getId()); - } else { - s_logger.error("Error while updating user :"+userUUID+" in Region: "+region.getId()); - } - } - } - return updateUser; - } else { - //First update in the Region where user was created - Region region = _regionDao.findById(regionId); - UserAccount updateUser = RegionsApiUtil.makeUserAccountAPICall(region, command, params); - if (updateUser != null) { - s_logger.debug("Successfully updated user :"+userUUID+" in source Region: "+region.getId()); - return updateUser; - } else { - throw new CloudRuntimeException("Error while updating user :"+userUUID+" in source Region: "+region.getId()); - } - } - } + } + + String userUUID = user.getUuid(); + + String command = "updateUser"; + List params = new ArrayList(); + params.add(new NameValuePair(ApiConstants.ID, userUUID)); + params.add(new NameValuePair(ApiConstants.API_KEY, cmd.getApiKey())); + params.add(new NameValuePair(ApiConstants.EMAIL, cmd.getEmail())); + params.add(new NameValuePair(ApiConstants.FIRSTNAME, cmd.getFirstname())); + params.add(new NameValuePair(ApiConstants.LASTNAME, cmd.getLastname())); + params.add(new NameValuePair(ApiConstants.PASSWORD, cmd.getPassword())); + params.add(new NameValuePair(ApiConstants.SECRET_KEY, cmd.getSecretKey())); + params.add(new NameValuePair(ApiConstants.TIMEZONE, cmd.getTimezone())); + params.add(new NameValuePair(ApiConstants.USERNAME, cmd.getUsername())); + + int regionId = user.getRegionId(); + if(getId() == regionId){ + return _accountMgr.updateUser(cmd); + } else { + //First update in the Region where user was created + Region region = _regionDao.findById(regionId); + if (RegionsApiUtil.makeAPICall(region, command, params)) { + s_logger.debug("Successfully updated user :"+userUUID+" in source Region: "+region.getId()); + return _userAccountDao.findById(id); + } else { + throw new CloudRuntimeException("Error while updating user :"+userUUID+" in source Region: "+region.getId()); + } + } + } /** * {@inheritDoc} - */ - @Override - public UserAccount disableUser(Long userId) { + */ + @Override + public UserAccount disableUser(Long userId) { UserVO user = _userDao.findById(userId); if (user == null || user.getRemoved() != null) { throw new InvalidParameterValueException("Unable to find active user by id " + userId); } - + int regionId = user.getRegionId(); - + String command = "disableUser"; - List params = new ArrayList(); - params.add(new NameValuePair(ApiConstants.ID, user.getUuid())); - - if(getId() == regionId){ - UserAccount disabledUser = _accountMgr.disableUser(userId); - if(disabledUser != null){ - List regions = _regionDao.listAll(); - for (Region region : regions){ - if(region.getId() == getId()){ - continue; - } - params.add(new NameValuePair(ApiConstants.IS_PROPAGATE, "true")); - if (RegionsApiUtil.makeAPICall(region, command, params)) { - s_logger.debug("Successfully disabled user :"+user.getUuid()+" in Region: "+region.getId()); - } else { - s_logger.error("Error while disabling user :"+user.getUuid()+" in Region: "+region.getId()); - } - } - } - return disabledUser; - } else { - //First disable in the Region where user was created - Region region = _regionDao.findById(regionId); - UserAccount disabledUser = RegionsApiUtil.makeUserAccountAPICall(region, command, params); - if (disabledUser != null) { - s_logger.debug("Successfully disabled user :"+user.getUuid()+" in source Region: "+region.getId()); - return disabledUser; - } else { - throw new CloudRuntimeException("Error while disabling user :"+user.getUuid()+" in source Region: "+region.getId()); - } - } - } + List params = new ArrayList(); + params.add(new NameValuePair(ApiConstants.ID, user.getUuid())); + + if(getId() == regionId){ + return _accountMgr.disableUser(userId); + } else { + //First disable in the Region where user was created + Region region = _regionDao.findById(regionId); + if (RegionsApiUtil.makeAPICall(region, command, params)) { + s_logger.debug("Successfully disabled user :"+user.getUuid()+" in source Region: "+region.getId()); + return _userAccountDao.findById(userId); + } else { + throw new CloudRuntimeException("Error while disabling user :"+user.getUuid()+" in source Region: "+region.getId()); + } + } + } /** * {@inheritDoc} - */ - @Override - public UserAccount enableUser(long userId) { + */ + @Override + public UserAccount enableUser(long userId) { UserVO user = _userDao.findById(userId); if (user == null || user.getRemoved() != null) { throw new InvalidParameterValueException("Unable to find active user by id " + userId); - } - + } + int regionId = user.getRegionId(); - + String command = "enableUser"; - List params = new ArrayList(); - params.add(new NameValuePair(ApiConstants.ID, user.getUuid())); - - if(getId() == regionId){ - UserAccount enabledUser = _accountMgr.enableUser(userId); - if(enabledUser != null){ - List regions = _regionDao.listAll(); - for (Region region : regions){ - if(region.getId() == getId()){ - continue; - } - params.add(new NameValuePair(ApiConstants.IS_PROPAGATE, "true")); - if (RegionsApiUtil.makeAPICall(region, command, params)) { - s_logger.debug("Successfully enabled user :"+user.getUuid()+" in Region: "+region.getId()); - } else { - s_logger.error("Error while disabling user :"+user.getUuid()+" in Region: "+region.getId()); - } - } - } - return enabledUser; - } else { - //First enable in the Region where user was created - Region region = _regionDao.findById(regionId); - UserAccount enabledUser = RegionsApiUtil.makeUserAccountAPICall(region, command, params); - if (enabledUser != null) { - s_logger.debug("Successfully enabled user :"+user.getUuid()+" in source Region: "+region.getId()); - return enabledUser; - } else { - throw new CloudRuntimeException("Error while enabling user :"+user.getUuid()+" in source Region: "+region.getId()); - } - } - } + List params = new ArrayList(); + params.add(new NameValuePair(ApiConstants.ID, user.getUuid())); + + if(getId() == regionId){ + return _accountMgr.enableUser(userId); + } else { + //First enable in the Region where user was created + Region region = _regionDao.findById(regionId); + if (RegionsApiUtil.makeAPICall(region, command, params)) { + s_logger.debug("Successfully enabled user :"+user.getUuid()+" in source Region: "+region.getId()); + return _userAccountDao.findById(userId); + } else { + throw new CloudRuntimeException("Error while enabling user :"+user.getUuid()+" in source Region: "+region.getId()); + } + } + } } diff --git a/server/src/org/apache/cloudstack/region/RegionServiceImpl.java b/server/src/org/apache/cloudstack/region/RegionServiceImpl.java index 7aed881d215..0662c320145 100755 --- a/server/src/org/apache/cloudstack/region/RegionServiceImpl.java +++ b/server/src/org/apache/cloudstack/region/RegionServiceImpl.java @@ -16,13 +16,13 @@ // under the License. package org.apache.cloudstack.region; -import java.util.List; -import java.util.Map; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; - +import com.cloud.domain.Domain; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import com.cloud.user.UserAccount; +import com.cloud.utils.component.Manager; +import com.cloud.utils.component.ManagerBase; import org.apache.cloudstack.api.command.admin.account.DeleteAccountCmd; import org.apache.cloudstack.api.command.admin.account.DisableAccountCmd; import org.apache.cloudstack.api.command.admin.account.EnableAccountCmd; @@ -34,53 +34,31 @@ import org.apache.cloudstack.api.command.admin.user.DisableUserCmd; import org.apache.cloudstack.api.command.admin.user.EnableUserCmd; import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; import org.apache.cloudstack.api.command.user.region.ListRegionsCmd; -import org.apache.cloudstack.region.dao.RegionDao; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import com.cloud.domain.Domain; -import com.cloud.domain.dao.DomainDao; -import com.cloud.exception.ConcurrentOperationException; -import com.cloud.exception.PermissionDeniedException; -import com.cloud.exception.ResourceUnavailableException; -import com.cloud.user.Account; -import com.cloud.user.AccountManager; -import com.cloud.user.DomainManager; -import com.cloud.user.UserAccount; -import com.cloud.user.UserContext; -import com.cloud.user.dao.AccountDao; -import com.cloud.user.dao.UserDao; -import com.cloud.utils.component.Manager; -import com.cloud.utils.component.ManagerBase; +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import java.util.List; +import java.util.Map; @Component @Local(value = { RegionService.class }) public class RegionServiceImpl extends ManagerBase implements RegionService, Manager { public static final Logger s_logger = Logger.getLogger(RegionServiceImpl.class); - - @Inject - private RegionDao _regionDao; - @Inject - private AccountDao _accountDao; - @Inject - private UserDao _userDao; - @Inject - private DomainDao _domainDao; + @Inject private RegionManager _regionMgr; - @Inject - private AccountManager _accountMgr; - @Inject - private DomainManager _domainMgr; - + private String _name; - + @Override public boolean configure(final String name, final Map params) throws ConfigurationException { _name = name; return true; } - + @Override public boolean start() { return true; @@ -99,203 +77,117 @@ public class RegionServiceImpl extends ManagerBase implements RegionService, Man /** * {@inheritDoc} */ - @Override - public Region addRegion(int id, String name, String endPoint, String apiKey, String secretKey) { - //Check for valid Name - //Check valid end_point url - return _regionMgr.addRegion(id, name, endPoint, apiKey, secretKey); - } - - /** - * {@inheritDoc} - */ - @Override - public Region updateRegion(int id, String name, String endPoint, String apiKey, String secretKey) { - //Check for valid Name - //Check valid end_point url - return _regionMgr.updateRegion(id, name, endPoint, apiKey, secretKey); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean removeRegion(int id) { - return _regionMgr.removeRegion(id); - } - - /** - * {@inheritDoc} - */ - @Override - public List listRegions(ListRegionsCmd cmd) { - return _regionMgr.listRegions(cmd.getId(), cmd.getName()); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean deleteUserAccount(DeleteAccountCmd cmd) { - boolean result = false; - if(checkIsPropagate(cmd.getIsPropagate())){ - result = _accountMgr.deleteUserAccount(cmd.getId()); - } else { - result = _regionMgr.deleteUserAccount(cmd.getId()); - } - return result; - } - - /** - * {@inheritDoc} - */ - @Override - public Account updateAccount(UpdateAccountCmd cmd) { - Account result = null; - if(checkIsPropagate(cmd.getIsPropagate())){ - result = _accountMgr.updateAccount(cmd); - } else { - result = _regionMgr.updateAccount(cmd); - } - - return result; - } - - /** - * {@inheritDoc} - */ - @Override - public Account disableAccount(DisableAccountCmd cmd) throws ConcurrentOperationException, ResourceUnavailableException { - Account result = null; - if(checkIsPropagate(cmd.getIsPropagate())){ - if(cmd.getLockRequested()) - result = _accountMgr.lockAccount(cmd.getAccountName(), cmd.getDomainId(), cmd.getId()); - else - result = _accountMgr.disableAccount(cmd.getAccountName(), cmd.getDomainId(), cmd.getId()); - } else { - result = _regionMgr.disableAccount(cmd.getAccountName(), cmd.getDomainId(), cmd.getId(), cmd.getLockRequested()); - } - return result; - } - - /** - * {@inheritDoc} - */ - @Override - public Account enableAccount(EnableAccountCmd cmd) { - Account result = null; - if(checkIsPropagate(cmd.getIsPropagate())){ - result = _accountMgr.enableAccount(cmd.getAccountName(), cmd.getDomainId(), cmd.getId()); - } else { - result = _regionMgr.enableAccount(cmd.getAccountName(), cmd.getDomainId(), cmd.getId()); - } - return result; - } - - /** - * {@inheritDoc} - */ - @Override - public boolean deleteUser(DeleteUserCmd cmd) { - boolean result = false; - if(checkIsPropagate(cmd.getIsPropagate())){ - result = _accountMgr.deleteUser(cmd); - } else { - result = _regionMgr.deleteUser(cmd); - } - return result; - } - - /** - * {@inheritDoc} - */ - @Override - public Domain updateDomain(UpdateDomainCmd cmd) { - Domain domain = null; - if(checkIsPropagate(cmd.getIsPropagate())){ - domain = _domainMgr.updateDomain(cmd); - } else { - domain = _regionMgr.updateDomain(cmd); - } - return domain; - } - - /** - * {@inheritDoc} - */ - @Override - public boolean deleteDomain(DeleteDomainCmd cmd) { - boolean result = false; - if(checkIsPropagate(cmd.isPropagate())){ - result = _domainMgr.deleteDomain(cmd.getId(), cmd.getCleanup()); - } else { - result = _regionMgr.deleteDomain(cmd.getId(), cmd.getCleanup()); - } - return result; - } - - /** - * {@inheritDoc} - */ - @Override - public UserAccount updateUser(UpdateUserCmd cmd){ - UserAccount user = null; - if(checkIsPropagate(cmd.getIsPropagate())){ - user = _accountMgr.updateUser(cmd); - } else { - user = _regionMgr.updateUser(cmd); - } - return user; - } - - /** - * {@inheritDoc} - */ - @Override - public UserAccount disableUser(DisableUserCmd cmd) { - UserAccount user = null; - if(checkIsPropagate(cmd.getIsPropagate())){ - user = _accountMgr.disableUser(cmd.getId()); - } else { - user = _regionMgr.disableUser(cmd.getId()); - } - return user; - } - - /** - * {@inheritDoc} - */ - @Override - public UserAccount enableUser(EnableUserCmd cmd) { - UserAccount user = null; - if(checkIsPropagate(cmd.getIsPropagate())){ - user = _accountMgr.enableUser(cmd.getId()); - } else { - user = _regionMgr.enableUser(cmd.getId()); - } - return user; - } - - private boolean isRootAdmin(short accountType) { - return (accountType == Account.ACCOUNT_TYPE_ADMIN); + @Override + public Region addRegion(int id, String name, String endPoint, String apiKey, String secretKey) { + //Check for valid Name + //Check valid end_point url + return _regionMgr.addRegion(id, name, endPoint, apiKey, secretKey); } - + /** - * Check isPopagate flag, Only ROOT Admin can use this param - * @param isPopagate - * @return + * {@inheritDoc} + */ + @Override + public Region updateRegion(int id, String name, String endPoint, String apiKey, String secretKey) { + //Check for valid Name + //Check valid end_point url + return _regionMgr.updateRegion(id, name, endPoint, apiKey, secretKey); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean removeRegion(int id) { + return _regionMgr.removeRegion(id); + } + + /** + * {@inheritDoc} + */ + @Override + public List listRegions(ListRegionsCmd cmd) { + return _regionMgr.listRegions(cmd.getId(), cmd.getName()); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean deleteUserAccount(DeleteAccountCmd cmd) { + return _regionMgr.deleteUserAccount(cmd.getId()); + } + + /** + * {@inheritDoc} */ - private boolean checkIsPropagate(Boolean isPopagate){ - if(isPopagate == null || !isPopagate){ - return false; - } - // Only Admin can use isPopagate flag - UserContext ctx = UserContext.current(); - Account caller = ctx.getCaller(); - if(!isRootAdmin(caller.getType())){ - throw new PermissionDeniedException("isPropagate param cannot be used by non ROOT Admin"); - } - return true; + @Override + public Account updateAccount(UpdateAccountCmd cmd) { + return _regionMgr.updateAccount(cmd); + } + + /** + * {@inheritDoc} + */ + @Override + public Account disableAccount(DisableAccountCmd cmd) throws ConcurrentOperationException, ResourceUnavailableException { + return _regionMgr.disableAccount(cmd.getAccountName(), cmd.getDomainId(), cmd.getId(), cmd.getLockRequested()); + } + + /** + * {@inheritDoc} + */ + @Override + public Account enableAccount(EnableAccountCmd cmd) { + return _regionMgr.enableAccount(cmd.getAccountName(), cmd.getDomainId(), cmd.getId()); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean deleteUser(DeleteUserCmd cmd) { + return _regionMgr.deleteUser(cmd); + } + + /** + * {@inheritDoc} + */ + @Override + public Domain updateDomain(UpdateDomainCmd cmd) { + return _regionMgr.updateDomain(cmd); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean deleteDomain(DeleteDomainCmd cmd) { + return _regionMgr.deleteDomain(cmd.getId(), cmd.getCleanup()); + } + + /** + * {@inheritDoc} + */ + @Override + public UserAccount updateUser(UpdateUserCmd cmd){ + return _regionMgr.updateUser(cmd); + } + + /** + * {@inheritDoc} + */ + @Override + public UserAccount disableUser(DisableUserCmd cmd) { + return _regionMgr.disableUser(cmd.getId()); + } + + /** + * {@inheritDoc} + */ + @Override + public UserAccount enableUser(EnableUserCmd cmd) { + return _regionMgr.enableUser(cmd.getId()); } } diff --git a/server/src/org/apache/cloudstack/region/RegionsApiUtil.java b/server/src/org/apache/cloudstack/region/RegionsApiUtil.java index c7625db5534..2ace4f9295c 100644 --- a/server/src/org/apache/cloudstack/region/RegionsApiUtil.java +++ b/server/src/org/apache/cloudstack/region/RegionsApiUtil.java @@ -49,258 +49,258 @@ import com.thoughtworks.xstream.io.xml.DomDriver; * */ public class RegionsApiUtil { - public static final Logger s_logger = Logger.getLogger(RegionsApiUtil.class); + public static final Logger s_logger = Logger.getLogger(RegionsApiUtil.class); - /** - * Makes an api call using region service end_point, api command and params - * @param region - * @param command - * @param params - * @return True, if api is successful - */ - protected static boolean makeAPICall(Region region, String command, List params){ - try { - String apiParams = buildParams(command, params); - String url = buildUrl(apiParams, region); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - if( client.executeMethod(method) == 200){ - return true; - } else { - return false; - } - } catch (HttpException e) { - s_logger.error(e.getMessage()); - return false; - } catch (IOException e) { - s_logger.error(e.getMessage()); - return false; - } - } + /** + * Makes an api call using region service end_point, api command and params + * @param region + * @param command + * @param params + * @return True, if api is successful + */ + protected static boolean makeAPICall(Region region, String command, List params){ + try { + String apiParams = buildParams(command, params); + String url = buildUrl(apiParams, region); + HttpClient client = new HttpClient(); + HttpMethod method = new GetMethod(url); + if( client.executeMethod(method) == 200){ + return true; + } else { + return false; + } + } catch (HttpException e) { + s_logger.error(e.getMessage()); + return false; + } catch (IOException e) { + s_logger.error(e.getMessage()); + return false; + } + } - /** - * Makes an api call using region service end_point, api command and params - * Returns Account object on success - * @param region - * @param command - * @param params - * @return - */ - protected static RegionAccount makeAccountAPICall(Region region, String command, List params){ - try { - String url = buildUrl(buildParams(command, params), region); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - if( client.executeMethod(method) == 200){ - InputStream is = method.getResponseBodyAsStream(); - //Translate response to Account object - XStream xstream = new XStream(new DomDriver()); - xstream.alias("account", RegionAccount.class); - xstream.alias("user", RegionUser.class); - xstream.aliasField("id", RegionAccount.class, "uuid"); - xstream.aliasField("name", RegionAccount.class, "accountName"); - xstream.aliasField("accounttype", RegionAccount.class, "type"); - xstream.aliasField("domainid", RegionAccount.class, "domainUuid"); - xstream.aliasField("networkdomain", RegionAccount.class, "networkDomain"); - xstream.aliasField("id", RegionUser.class, "uuid"); - xstream.aliasField("accountId", RegionUser.class, "accountUuid"); - ObjectInputStream in = xstream.createObjectInputStream(is); - return (RegionAccount)in.readObject(); - } else { - return null; - } - } catch (HttpException e) { - s_logger.error(e.getMessage()); - return null; - } catch (IOException e) { - s_logger.error(e.getMessage()); - return null; - } catch (ClassNotFoundException e) { - s_logger.error(e.getMessage()); - return null; - } - } + /** + * Makes an api call using region service end_point, api command and params + * Returns Account object on success + * @param region + * @param command + * @param params + * @return + */ + protected static RegionAccount makeAccountAPICall(Region region, String command, List params){ + try { + String url = buildUrl(buildParams(command, params), region); + HttpClient client = new HttpClient(); + HttpMethod method = new GetMethod(url); + if( client.executeMethod(method) == 200){ + InputStream is = method.getResponseBodyAsStream(); + //Translate response to Account object + XStream xstream = new XStream(new DomDriver()); + xstream.alias("account", RegionAccount.class); + xstream.alias("user", RegionUser.class); + xstream.aliasField("id", RegionAccount.class, "uuid"); + xstream.aliasField("name", RegionAccount.class, "accountName"); + xstream.aliasField("accounttype", RegionAccount.class, "type"); + xstream.aliasField("domainid", RegionAccount.class, "domainUuid"); + xstream.aliasField("networkdomain", RegionAccount.class, "networkDomain"); + xstream.aliasField("id", RegionUser.class, "uuid"); + xstream.aliasField("accountId", RegionUser.class, "accountUuid"); + ObjectInputStream in = xstream.createObjectInputStream(is); + return (RegionAccount)in.readObject(); + } else { + return null; + } + } catch (HttpException e) { + s_logger.error(e.getMessage()); + return null; + } catch (IOException e) { + s_logger.error(e.getMessage()); + return null; + } catch (ClassNotFoundException e) { + s_logger.error(e.getMessage()); + return null; + } + } - /** - * Makes an api call using region service end_point, api command and params - * Returns Domain object on success - * @param region - * @param command - * @param params - * @return - */ - protected static RegionDomain makeDomainAPICall(Region region, String command, List params){ - try { - String url = buildUrl(buildParams(command, params), region); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - if( client.executeMethod(method) == 200){ - InputStream is = method.getResponseBodyAsStream(); - XStream xstream = new XStream(new DomDriver()); - //Translate response to Domain object - xstream.alias("domain", RegionDomain.class); - xstream.aliasField("id", RegionDomain.class, "uuid"); - xstream.aliasField("parentdomainid", RegionDomain.class, "parentUuid"); - xstream.aliasField("networkdomain", DomainVO.class, "networkDomain"); - ObjectInputStream in = xstream.createObjectInputStream(is); - return (RegionDomain)in.readObject(); - } else { - return null; - } - } catch (HttpException e) { - s_logger.error(e.getMessage()); - return null; - } catch (IOException e) { - s_logger.error(e.getMessage()); - return null; - } catch (ClassNotFoundException e) { - s_logger.error(e.getMessage()); - return null; - } - } + /** + * Makes an api call using region service end_point, api command and params + * Returns Domain object on success + * @param region + * @param command + * @param params + * @return + */ + protected static RegionDomain makeDomainAPICall(Region region, String command, List params){ + try { + String url = buildUrl(buildParams(command, params), region); + HttpClient client = new HttpClient(); + HttpMethod method = new GetMethod(url); + if( client.executeMethod(method) == 200){ + InputStream is = method.getResponseBodyAsStream(); + XStream xstream = new XStream(new DomDriver()); + //Translate response to Domain object + xstream.alias("domain", RegionDomain.class); + xstream.aliasField("id", RegionDomain.class, "uuid"); + xstream.aliasField("parentdomainid", RegionDomain.class, "parentUuid"); + xstream.aliasField("networkdomain", DomainVO.class, "networkDomain"); + ObjectInputStream in = xstream.createObjectInputStream(is); + return (RegionDomain)in.readObject(); + } else { + return null; + } + } catch (HttpException e) { + s_logger.error(e.getMessage()); + return null; + } catch (IOException e) { + s_logger.error(e.getMessage()); + return null; + } catch (ClassNotFoundException e) { + s_logger.error(e.getMessage()); + return null; + } + } - /** - * Makes an api call using region service end_point, api command and params - * Returns UserAccount object on success - * @param region - * @param command - * @param params - * @return - */ - protected static UserAccount makeUserAccountAPICall(Region region, String command, List params){ - try { - String url = buildUrl(buildParams(command, params), region); - HttpClient client = new HttpClient(); - HttpMethod method = new GetMethod(url); - if( client.executeMethod(method) == 200){ - InputStream is = method.getResponseBodyAsStream(); - XStream xstream = new XStream(new DomDriver()); - xstream.alias("useraccount", UserAccountVO.class); - xstream.aliasField("id", UserAccountVO.class, "uuid"); - ObjectInputStream in = xstream.createObjectInputStream(is); - return (UserAccountVO)in.readObject(); - } else { - return null; - } - } catch (HttpException e) { - s_logger.error(e.getMessage()); - return null; - } catch (IOException e) { - s_logger.error(e.getMessage()); - return null; - } catch (ClassNotFoundException e) { - s_logger.error(e.getMessage()); - return null; - } - } + /** + * Makes an api call using region service end_point, api command and params + * Returns UserAccount object on success + * @param region + * @param command + * @param params + * @return + */ + protected static UserAccount makeUserAccountAPICall(Region region, String command, List params){ + try { + String url = buildUrl(buildParams(command, params), region); + HttpClient client = new HttpClient(); + HttpMethod method = new GetMethod(url); + if( client.executeMethod(method) == 200){ + InputStream is = method.getResponseBodyAsStream(); + XStream xstream = new XStream(new DomDriver()); + xstream.alias("useraccount", UserAccountVO.class); + xstream.aliasField("id", UserAccountVO.class, "uuid"); + ObjectInputStream in = xstream.createObjectInputStream(is); + return (UserAccountVO)in.readObject(); + } else { + return null; + } + } catch (HttpException e) { + s_logger.error(e.getMessage()); + return null; + } catch (IOException e) { + s_logger.error(e.getMessage()); + return null; + } catch (ClassNotFoundException e) { + s_logger.error(e.getMessage()); + return null; + } + } - /** - * Builds parameters string with command and encoded param values - * @param command - * @param params - * @return - */ - protected static String buildParams(String command, List params) { - StringBuffer paramString = new StringBuffer("command="+command); - Iterator iter = params.iterator(); - try { - while(iter.hasNext()){ - NameValuePair param = iter.next(); - if(param.getValue() != null && !(param.getValue().isEmpty())){ - paramString.append("&"+param.getName()+"="+URLEncoder.encode(param.getValue(), "UTF-8")); - } - } - } - catch (UnsupportedEncodingException e) { - s_logger.error(e.getMessage()); - return null; - } - return paramString.toString(); - } - - /** - * Build URL for api call using region end_point - * Parameters are sorted and signed using secret_key - * @param apiParams - * @param region - * @return - */ - private static String buildUrl(String apiParams, Region region) { + /** + * Builds parameters string with command and encoded param values + * @param command + * @param params + * @return + */ + protected static String buildParams(String command, List params) { + StringBuffer paramString = new StringBuffer("command="+command); + Iterator iter = params.iterator(); + try { + while(iter.hasNext()){ + NameValuePair param = iter.next(); + if(param.getValue() != null && !(param.getValue().isEmpty())){ + paramString.append("&"+param.getName()+"="+URLEncoder.encode(param.getValue(), "UTF-8")); + } + } + } + catch (UnsupportedEncodingException e) { + s_logger.error(e.getMessage()); + return null; + } + return paramString.toString(); + } - String apiKey = region.getApiKey(); - String secretKey = region.getSecretKey(); + /** + * Build URL for api call using region end_point + * Parameters are sorted and signed using secret_key + * @param apiParams + * @param region + * @return + */ + private static String buildUrl(String apiParams, Region region) { + + String apiKey = region.getApiKey(); + String secretKey = region.getSecretKey(); - if (apiKey == null || secretKey == null) { - return region.getEndPoint() +"?"+ apiParams; - } + if (apiKey == null || secretKey == null) { + return region.getEndPoint() +"?"+ apiParams; + } - String encodedApiKey; - try { - encodedApiKey = URLEncoder.encode(apiKey, "UTF-8"); + String encodedApiKey; + try { + encodedApiKey = URLEncoder.encode(apiKey, "UTF-8"); - List sortedParams = new ArrayList(); - sortedParams.add("apikey=" + encodedApiKey.toLowerCase()); - StringTokenizer st = new StringTokenizer(apiParams, "&"); - String url = null; - boolean first = true; - while (st.hasMoreTokens()) { - String paramValue = st.nextToken(); - String param = paramValue.substring(0, paramValue.indexOf("=")); - String value = paramValue.substring(paramValue.indexOf("=") + 1, paramValue.length()); - if (first) { - url = param + "=" + value; - first = false; - } else { - url = url + "&" + param + "=" + value; - } - sortedParams.add(param.toLowerCase() + "=" + value.toLowerCase()); - } - Collections.sort(sortedParams); + List sortedParams = new ArrayList(); + sortedParams.add("apikey=" + encodedApiKey.toLowerCase()); + StringTokenizer st = new StringTokenizer(apiParams, "&"); + String url = null; + boolean first = true; + while (st.hasMoreTokens()) { + String paramValue = st.nextToken(); + String param = paramValue.substring(0, paramValue.indexOf("=")); + String value = paramValue.substring(paramValue.indexOf("=") + 1, paramValue.length()); + if (first) { + url = param + "=" + value; + first = false; + } else { + url = url + "&" + param + "=" + value; + } + sortedParams.add(param.toLowerCase() + "=" + value.toLowerCase()); + } + Collections.sort(sortedParams); - //Construct the sorted URL and sign and URL encode the sorted URL with your secret key - String sortedUrl = null; - first = true; - for (String param : sortedParams) { - if (first) { - sortedUrl = param; - first = false; - } else { - sortedUrl = sortedUrl + "&" + param; - } - } - String encodedSignature = signRequest(sortedUrl, secretKey); + //Construct the sorted URL and sign and URL encode the sorted URL with your secret key + String sortedUrl = null; + first = true; + for (String param : sortedParams) { + if (first) { + sortedUrl = param; + first = false; + } else { + sortedUrl = sortedUrl + "&" + param; + } + } + String encodedSignature = signRequest(sortedUrl, secretKey); - String finalUrl = region.getEndPoint() +"?"+apiParams+ "&apiKey=" + apiKey + "&signature=" + encodedSignature; - - return finalUrl; + String finalUrl = region.getEndPoint() +"?"+apiParams+ "&apiKey=" + apiKey + "&signature=" + encodedSignature; - } catch (UnsupportedEncodingException e) { - s_logger.error(e.getMessage()); - return null; - } - } + return finalUrl; + + } catch (UnsupportedEncodingException e) { + s_logger.error(e.getMessage()); + return null; + } + } + + /** + * 1. Signs a string with a secret key using SHA-1 2. Base64 encode the result 3. URL encode the final result + * + * @param request + * @param key + * @return + */ + private static String signRequest(String request, String key) { + try { + Mac mac = Mac.getInstance("HmacSHA1"); + SecretKeySpec keySpec = new SecretKeySpec(key.getBytes(), "HmacSHA1"); + mac.init(keySpec); + mac.update(request.getBytes()); + byte[] encryptedBytes = mac.doFinal(); + return URLEncoder.encode(Base64.encodeBase64String(encryptedBytes), "UTF-8"); + } catch (Exception ex) { + s_logger.error(ex.getMessage()); + return null; + } + } - /** - * 1. Signs a string with a secret key using SHA-1 2. Base64 encode the result 3. URL encode the final result - * - * @param request - * @param key - * @return - */ - private static String signRequest(String request, String key) { - try { - Mac mac = Mac.getInstance("HmacSHA1"); - SecretKeySpec keySpec = new SecretKeySpec(key.getBytes(), "HmacSHA1"); - mac.init(keySpec); - mac.update(request.getBytes()); - byte[] encryptedBytes = mac.doFinal(); - return URLEncoder.encode(Base64.encodeBase64String(encryptedBytes), "UTF-8"); - } catch (Exception ex) { - s_logger.error(ex.getMessage()); - return null; - } - } - } \ No newline at end of file diff --git a/server/src/org/apache/cloudstack/region/dao/RegionSyncDaoImpl.java b/server/src/org/apache/cloudstack/region/dao/RegionSyncDaoImpl.java deleted file mode 100644 index 9cd9b0dd71b..00000000000 --- a/server/src/org/apache/cloudstack/region/dao/RegionSyncDaoImpl.java +++ /dev/null @@ -1,35 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.region.dao; - -import javax.ejb.Local; - -import org.apache.cloudstack.region.RegionSyncVO; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - -import com.cloud.utils.db.GenericDaoBase; - -@Component -@Local(value={RegionSyncDao.class}) -public class RegionSyncDaoImpl extends GenericDaoBase implements RegionSyncDao { - private static final Logger s_logger = Logger.getLogger(RegionSyncDaoImpl.class); - - public RegionSyncDaoImpl(){ - - } -} diff --git a/server/test/com/cloud/agent/MockAgentManagerImpl.java b/server/test/com/cloud/agent/MockAgentManagerImpl.java index bdacf68e28a..7e3462d8ff8 100755 --- a/server/test/com/cloud/agent/MockAgentManagerImpl.java +++ b/server/test/com/cloud/agent/MockAgentManagerImpl.java @@ -188,4 +188,10 @@ public class MockAgentManagerImpl extends ManagerBase implements AgentManager { return null; } + @Override + public void disconnectWithInvestigation(long hostId, Event event) { + // TODO Auto-generated method stub + + } + } diff --git a/server/test/com/cloud/alert/AlertControlsUnitTest.java b/server/test/com/cloud/alert/AlertControlsUnitTest.java new file mode 100644 index 00000000000..c1e4c5487f1 --- /dev/null +++ b/server/test/com/cloud/alert/AlertControlsUnitTest.java @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.alert; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyList; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.when; + +import java.util.Date; + +import junit.framework.TestCase; + +import org.apache.log4j.Logger; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.mockito.Spy; + +import com.cloud.alert.dao.AlertDao; +import com.cloud.server.ManagementServerImpl; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; + +public class AlertControlsUnitTest extends TestCase { + private static final Logger s_logger = Logger.getLogger(AlertControlsUnitTest.class); + + @Spy ManagementServerImpl _mgmtServer = new ManagementServerImpl(); + @Mock AccountManager _accountMgr; + @Mock AlertDao _alertDao; + @Override + @Before + protected void setUp() { + MockitoAnnotations.initMocks(this); + _mgmtServer._alertDao = _alertDao; + _mgmtServer._accountMgr = _accountMgr; + doReturn(3L).when(_accountMgr).checkAccessAndSpecifyAuthority(any(Account.class), anyLong()); + when(_alertDao.archiveAlert(anyList(), anyString(), any(Date.class), anyLong())).thenReturn(true); + when(_alertDao.deleteAlert(anyList(), anyString(), any(Date.class), anyLong())).thenReturn(true); + } + + @After + public void tearDown() throws Exception { + } + + @Test + public void testInjected() throws Exception { + s_logger.info("Starting test to archive and delete alerts"); + archiveAlerts(); + deleteAlerts(); + s_logger.info("archive/delete alerts: TEST PASSED"); + } + + protected void archiveAlerts() { + // archive alerts + String msg = "Archive Alerts: TEST FAILED"; + assertNotNull(msg, _mgmtServer._alertDao.archiveAlert(null, "system alert",null, 2L)); + } + + protected void deleteAlerts() { + // delete alerts + String msg = "Delete Alerts: TEST FAILED"; + assertNotNull(msg, _mgmtServer._alertDao.deleteAlert(null, "system alert",null, 2L)); + } +} diff --git a/server/test/com/cloud/api/APITest.java b/server/test/com/cloud/api/APITest.java index 0b040abc3f5..63e08719f4b 100644 --- a/server/test/com/cloud/api/APITest.java +++ b/server/test/com/cloud/api/APITest.java @@ -36,8 +36,6 @@ import com.google.gson.Gson; /** * Base class for API Test * - * @author Min Chen - * */ public abstract class APITest { diff --git a/server/test/com/cloud/api/ListPerfTest.java b/server/test/com/cloud/api/ListPerfTest.java index b8cb97eb8f0..8437ca42770 100644 --- a/server/test/com/cloud/api/ListPerfTest.java +++ b/server/test/com/cloud/api/ListPerfTest.java @@ -33,8 +33,6 @@ import com.cloud.utils.exception.CloudRuntimeException; * Test fixture to do performance test for list command * Currently we commented out this test suite since it requires a real MS and Db running. * - * @author Min Chen - * */ public class ListPerfTest extends APITest { diff --git a/server/test/com/cloud/api/LoginResponse.java b/server/test/com/cloud/api/LoginResponse.java index 097ae42c999..0f58374f187 100644 --- a/server/test/com/cloud/api/LoginResponse.java +++ b/server/test/com/cloud/api/LoginResponse.java @@ -24,8 +24,6 @@ import com.google.gson.annotations.SerializedName; /** * Login Response object * - * @author Min Chen - * */ public class LoginResponse extends BaseResponse { diff --git a/server/test/com/cloud/capacity/CapacityManagerTest.java b/server/test/com/cloud/capacity/CapacityManagerTest.java new file mode 100644 index 00000000000..3faa32f0f6d --- /dev/null +++ b/server/test/com/cloud/capacity/CapacityManagerTest.java @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.capacity; +import com.cloud.capacity.dao.CapacityDao; +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.dc.ClusterDetailsVO; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.vm.UserVmDetailVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.UserVmDetailsDao; +import org.apache.log4j.Logger; +import org.junit.*; +import org.junit.Test; +import org.mockito.Mockito; + +import static org.mockito.Mockito.*; + + +public class CapacityManagerTest { + CapacityDao CDao = mock(CapacityDao.class); + ServiceOfferingDao SOfferingDao = mock(ServiceOfferingDao.class); + ClusterDetailsDao ClusterDetailsDao= mock(com.cloud.dc.ClusterDetailsDao.class); + CapacityManagerImpl capMgr; + private ServiceOfferingVO svo = mock(ServiceOfferingVO.class); + private CapacityVO cvo_cpu = mock(CapacityVO.class); + private CapacityVO cvo_ram = mock(CapacityVO.class); + private VirtualMachine vm = mock(VirtualMachine.class); + private ClusterDetailsVO cluster_detail_cpu = mock(ClusterDetailsVO.class); + private ClusterDetailsVO cluster_detail_ram = mock(ClusterDetailsVO.class); + + public CapacityManagerImpl setUp() { + CapacityManagerImpl capMgr = new CapacityManagerImpl(); + ((CapacityManagerImpl)capMgr)._clusterDetailsDao= ClusterDetailsDao; + capMgr._capacityDao = CDao; + capMgr._offeringsDao = SOfferingDao; + return capMgr; + } + + @Test + public void allocateCapacityTest(){ + capMgr=setUp(); + when(vm.getHostId()).thenReturn(1l); + when(vm.getServiceOfferingId()).thenReturn(2l); + when(SOfferingDao.findById(anyLong())).thenReturn(svo); + when(CDao.findByHostIdType(anyLong(), eq(Capacity.CAPACITY_TYPE_CPU))).thenReturn(cvo_cpu); + when(CDao.findByHostIdType(anyLong(), eq(Capacity.CAPACITY_TYPE_MEMORY))).thenReturn(cvo_ram); + when(cvo_cpu.getUsedCapacity()).thenReturn(500l); + when(cvo_cpu.getTotalCapacity()).thenReturn(2000l); + when(cvo_ram.getUsedCapacity()).thenReturn(3000l); + when(cvo_ram.getTotalCapacity()).thenReturn((long) 1024*1024*1024); + when(svo.getCpu()).thenReturn(500); + when(svo.getRamSize()).thenReturn(512); + when(cvo_cpu.getReservedCapacity()).thenReturn(0l); + when(cvo_ram.getReservedCapacity()).thenReturn(0l); + when(cluster_detail_ram.getValue()).thenReturn("1.5"); + when(cluster_detail_cpu.getValue()).thenReturn("2"); + when(CDao.update(anyLong(), isA(CapacityVO.class))).thenReturn(true) ; + boolean hasCapacity=capMgr.checkIfHostHasCapacity(1l,500,1024*1024*1024,false,2,2,false); + Assert.assertTrue(hasCapacity); + + } +} diff --git a/server/test/com/cloud/event/EventControlsUnitTest.java b/server/test/com/cloud/event/EventControlsUnitTest.java new file mode 100644 index 00000000000..3c2527565c9 --- /dev/null +++ b/server/test/com/cloud/event/EventControlsUnitTest.java @@ -0,0 +1,84 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.event; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyList; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; + +import java.util.Date; +import java.util.List; + +import junit.framework.TestCase; + +import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.acl.SecurityChecker.AccessType; +import org.apache.log4j.Logger; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.mockito.Spy; + +import com.cloud.event.dao.EventDao; +import com.cloud.server.ManagementServerImpl; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; + +public class EventControlsUnitTest extends TestCase{ + private static final Logger s_logger = Logger.getLogger(EventControlsUnitTest.class); + + @Spy ManagementServerImpl _mgmtServer = new ManagementServerImpl(); + @Mock AccountManager _accountMgr; + @Mock EventDao _eventDao; + List _events = null; + + @Override + @Before + protected void setUp() { + MockitoAnnotations.initMocks(this); + _mgmtServer._eventDao = _eventDao; + _mgmtServer._accountMgr = _accountMgr; + doNothing().when(_accountMgr).checkAccess(any(Account.class), any(AccessType.class), any(Boolean.class), any(ControlledEntity.class)); + when(_eventDao.listToArchiveOrDeleteEvents(anyList(), anyString(), any(Date.class), anyLong())).thenReturn(_events); + } + + @After + public void tearDown() throws Exception { + } + + @Test + public void testInjected() throws Exception { + s_logger.info("Starting test to archive and delete events"); + archiveEvents(); + deleteEvents(); + s_logger.info("archive/delete events: TEST PASSED"); + } + + protected void archiveEvents() { + // archive alerts + doNothing().when(_eventDao).archiveEvents(_events); + } + + protected void deleteEvents() { + // delete alerts + } +} diff --git a/server/test/com/cloud/network/MockNetworkManagerImpl.java b/server/test/com/cloud/network/MockNetworkManagerImpl.java index 4a24f9a8fcf..eb43cce0b9e 100755 --- a/server/test/com/cloud/network/MockNetworkManagerImpl.java +++ b/server/test/com/cloud/network/MockNetworkManagerImpl.java @@ -29,8 +29,10 @@ import org.apache.cloudstack.api.command.user.network.CreateNetworkCmd; import org.apache.cloudstack.api.command.user.network.ListNetworksCmd; import org.apache.cloudstack.api.command.user.network.RestartNetworkCmd; import org.springframework.stereotype.Component; +import org.apache.cloudstack.api.command.user.vm.ListNicsCmd; import com.cloud.dc.DataCenter; +import com.cloud.dc.Pod; import com.cloud.dc.Vlan.VlanType; import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeployDestination; @@ -64,6 +66,7 @@ import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.vm.Nic; import com.cloud.vm.NicProfile; +import com.cloud.vm.NicSecondaryIp; import com.cloud.vm.ReservationContext; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; @@ -569,7 +572,7 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage */ @Override public Network updateGuestNetwork(long networkId, String name, String displayText, Account callerAccount, - User callerUser, String domainSuffix, Long networkOfferingId, Boolean changeCidr) { + User callerUser, String domainSuffix, Long networkOfferingId, Boolean changeCidr, String guestVmCidr) { // TODO Auto-generated method stub return null; } @@ -824,4 +827,58 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage // TODO Auto-generated method stub return null; } + + @Override + public boolean isSecondaryIpSetForNic(long nicId) { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean releaseSecondaryIpFromNic(long ipAddressId) { + // TODO Auto-generated method stub + return false; + } + + @Override + public String allocateSecondaryGuestIP(Account account, long zoneId, + Long nicId, Long networkId, String ipaddress) { + // TODO Auto-generated method stub + return null; + } + + @Override + public String allocateGuestIP(Account ipOwner, boolean isSystem, + long zoneId, Long networkId, String requestedIp) + throws InsufficientAddressCapacityException { + // TODO Auto-generated method stub + return null; + } + + + @Override + public List listVmNics(Long vmId, Long nicId) { + // TODO Auto-generated method stub + return null; + } + + @Override + public List listNics(ListNicsCmd listNicsCmd) { + // TODO Auto-generated method stub + return null; + } + + @Override + public String allocatePublicIpForGuestNic(Long networkId, DataCenter dc, + Pod pod, Account caller, String requestedIp) + throws InsufficientAddressCapacityException { + // TODO Auto-generated method stub + return null; + } + + @Override + public boolean removeVmSecondaryIpsOfNic(long nicId) { + // TODO Auto-generated method stub + return false; + } } diff --git a/server/test/com/cloud/network/MockNetworkModelImpl.java b/server/test/com/cloud/network/MockNetworkModelImpl.java index a2bef63d539..d7ffa7da280 100644 --- a/server/test/com/cloud/network/MockNetworkModelImpl.java +++ b/server/test/com/cloud/network/MockNetworkModelImpl.java @@ -829,4 +829,10 @@ public class MockNetworkModelImpl extends ManagerBase implements NetworkModel { // TODO Auto-generated method stub } + + @Override + public void checkRequestedIpAddresses(long networkId, String ip4, String ip6) + throws InvalidParameterValueException { + // TODO Auto-generated method stub + } } diff --git a/server/test/com/cloud/network/MockRulesManagerImpl.java b/server/test/com/cloud/network/MockRulesManagerImpl.java index ba3dd413cc3..e5a6894d76d 100644 --- a/server/test/com/cloud/network/MockRulesManagerImpl.java +++ b/server/test/com/cloud/network/MockRulesManagerImpl.java @@ -39,6 +39,7 @@ import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.net.Ip; import com.cloud.vm.VirtualMachine; @Local(value = {RulesManager.class, RulesService.class}) @@ -53,14 +54,6 @@ public class MockRulesManagerImpl extends ManagerBase implements RulesManager, R return null; } - @Override - public PortForwardingRule createPortForwardingRule(PortForwardingRule rule, - Long vmId, boolean openFirewall) - throws NetworkRuleConflictException { - // TODO Auto-generated method stub - return null; - } - @Override public boolean revokePortForwardingRule(long ruleId, boolean apply) { // TODO Auto-generated method stub @@ -83,7 +76,7 @@ public class MockRulesManagerImpl extends ManagerBase implements RulesManager, R @Override public boolean enableStaticNat(long ipAddressId, long vmId, long networkId, - boolean isSystemVm) throws NetworkRuleConflictException, + boolean isSystemVm, String ipAddr) throws NetworkRuleConflictException, ResourceUnavailableException { // TODO Auto-generated method stub return false; @@ -310,4 +303,12 @@ public class MockRulesManagerImpl extends ManagerBase implements RulesManager, R return "MockRulesManagerImpl"; } + @Override + public PortForwardingRule createPortForwardingRule(PortForwardingRule rule, + Long vmId, Ip vmIp, boolean openFirewall) + throws NetworkRuleConflictException { + // TODO Auto-generated method stub + return null; + } + } diff --git a/server/test/com/cloud/resource/MockResourceManagerImpl.java b/server/test/com/cloud/resource/MockResourceManagerImpl.java index 889318bcd46..5202c317e56 100644 --- a/server/test/com/cloud/resource/MockResourceManagerImpl.java +++ b/server/test/com/cloud/resource/MockResourceManagerImpl.java @@ -53,8 +53,10 @@ import com.cloud.storage.S3; import com.cloud.storage.Swift; import com.cloud.template.VirtualMachineTemplate; import com.cloud.utils.Pair; -import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.component.Manager; + + import com.cloud.utils.fsm.NoTransitionException; @Local(value = {ResourceManager.class}) @@ -111,7 +113,7 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana */ @Override public Cluster updateCluster(Cluster cluster, String clusterType, String hypervisor, String allocationState, - String managedstate) { + String managedstate, Float memoryOvercommitRaito, Float cpuOvercommitRatio) { // TODO Auto-generated method stub return null; } @@ -599,4 +601,11 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana return "MockResourceManagerImpl"; } + @Override + public List listAllUpAndEnabledHostsInOneZoneByHypervisor( + HypervisorType type, long dcId) { + // TODO Auto-generated method stub + return null; + } + } diff --git a/server/test/com/cloud/storage/dao/StoragePoolDaoTest.java b/server/test/com/cloud/storage/dao/StoragePoolDaoTest.java index e79f582e7ec..eea61a1a129 100644 --- a/server/test/com/cloud/storage/dao/StoragePoolDaoTest.java +++ b/server/test/com/cloud/storage/dao/StoragePoolDaoTest.java @@ -18,6 +18,7 @@ package com.cloud.storage.dao; import javax.inject.Inject; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDaoImpl; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.test.context.ContextConfiguration; @@ -30,7 +31,7 @@ import com.cloud.storage.StoragePoolStatus; @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(locations = "classpath:/StoragePoolDaoTestContext.xml") public class StoragePoolDaoTest extends TestCase { - @Inject StoragePoolDaoImpl dao; + @Inject PrimaryDataStoreDaoImpl dao; @Test public void testCountByStatus() { diff --git a/server/test/com/cloud/storage/dao/StoragePoolDaoTestConfiguration.java b/server/test/com/cloud/storage/dao/StoragePoolDaoTestConfiguration.java index 60161dc31bf..de0a4edb655 100644 --- a/server/test/com/cloud/storage/dao/StoragePoolDaoTestConfiguration.java +++ b/server/test/com/cloud/storage/dao/StoragePoolDaoTestConfiguration.java @@ -19,6 +19,7 @@ package com.cloud.storage.dao; import java.io.IOException; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDaoImpl; import org.springframework.context.annotation.ComponentScan; import org.springframework.context.annotation.ComponentScan.Filter; import org.springframework.context.annotation.Configuration; @@ -31,7 +32,7 @@ import com.cloud.utils.component.SpringComponentScanUtils; @Configuration @ComponentScan(basePackageClasses={ - StoragePoolDaoImpl.class, + PrimaryDataStoreDaoImpl.class, StoragePoolDetailsDaoImpl.class}, includeFilters={@Filter(value=StoragePoolDaoTestConfiguration.Library.class, type=FilterType.CUSTOM)}, useDefaultFilters=false diff --git a/server/test/com/cloud/upgrade/DbUpgrade22Test.java b/server/test/com/cloud/upgrade/DbUpgrade22Test.java index 2718ff23b06..e0db82db0bc 100644 --- a/server/test/com/cloud/upgrade/DbUpgrade22Test.java +++ b/server/test/com/cloud/upgrade/DbUpgrade22Test.java @@ -16,12 +16,14 @@ // under the License. package com.cloud.upgrade; +import org.junit.Ignore; import org.junit.runner.RunWith; import org.junit.runners.Suite; @RunWith(Suite.class) @Suite.SuiteClasses({ AdvanceZone217To224UpgradeTest.class, AdvanceZone223To224UpgradeTest.class, PortForwarding218To224UpgradeTest.class, InstanceGroup218To224UpgradeTest.class, BasicZone218To224UpgradeTest.class, UsageEvents218To224UpgradeTest.class }) +@Ignore("maven-sure-fire discovered") public class DbUpgrade22Test { } diff --git a/server/test/com/cloud/user/MockAccountManagerImpl.java b/server/test/com/cloud/user/MockAccountManagerImpl.java index 5632070d6d6..b637c2aaf4e 100644 --- a/server/test/com/cloud/user/MockAccountManagerImpl.java +++ b/server/test/com/cloud/user/MockAccountManagerImpl.java @@ -325,30 +325,29 @@ public class MockAccountManagerImpl extends ManagerBase implements Manager, Acco } @Override - public UserAccount createUserAccount(String userName, String password, - String firstName, String lastName, String email, String timezone, - String accountName, short accountType, Long domainId, - String networkDomain, Map details, - String accountUUID, String userUUID, Integer regionId) { - // TODO Auto-generated method stub - return null; - } + public UserAccount createUserAccount(String userName, String password, + String firstName, String lastName, String email, String timezone, + String accountName, short accountType, Long domainId, + String networkDomain, Map details) { + // TODO Auto-generated method stub + return null; + } - @Override - public User createUser(String userName, String password, String firstName, - String lastName, String email, String timeZone, String accountName, - Long domainId, String userUUID, Integer regionId) { - // TODO Auto-generated method stub - return null; - } + @Override + public User createUser(String userName, String password, String firstName, + String lastName, String email, String timeZone, String accountName, + Long domainId) { + // TODO Auto-generated method stub + return null; + } - @Override - public Account createAccount(String accountName, short accountType, - Long domainId, String networkDomain, Map details, String uuid, - int regionId) { - // TODO Auto-generated method stub - return null; - } + @Override + public Account createAccount(String accountName, short accountType, + Long domainId, String networkDomain, Map details, String uuid, + int regionId) { + // TODO Auto-generated method stub + return null; + } @Override public RoleType getRoleType(Account account) { return null; diff --git a/server/test/com/cloud/user/MockDomainManagerImpl.java b/server/test/com/cloud/user/MockDomainManagerImpl.java index b791f4cf8a1..bc92965849c 100644 --- a/server/test/com/cloud/user/MockDomainManagerImpl.java +++ b/server/test/com/cloud/user/MockDomainManagerImpl.java @@ -137,24 +137,24 @@ public class MockDomainManagerImpl extends ManagerBase implements DomainManager, return null; } - @Override - public Domain createDomain(String name, Long parentId, - String networkDomain, String domainUUID, Integer regionId) { - // TODO Auto-generated method stub - return null; - } + @Override + public Domain createDomain(String name, Long parentId, + String networkDomain) { + // TODO Auto-generated method stub + return null; + } - @Override - public Domain updateDomain(UpdateDomainCmd cmd) { - // TODO Auto-generated method stub - return null; - } + @Override + public Domain updateDomain(UpdateDomainCmd cmd) { + // TODO Auto-generated method stub + return null; + } - @Override - public Domain createDomain(String name, Long parentId, Long ownerId, - String networkDomain, String domainUUID, Integer regionId) { - // TODO Auto-generated method stub - return null; - } + @Override + public Domain createDomain(String name, Long parentId, Long ownerId, + String networkDomain) { + // TODO Auto-generated method stub + return null; + } } diff --git a/server/test/com/cloud/vm/MockUserVmManagerImpl.java b/server/test/com/cloud/vm/MockUserVmManagerImpl.java index 1ee627fb738..09825a8eeb6 100644 --- a/server/test/com/cloud/vm/MockUserVmManagerImpl.java +++ b/server/test/com/cloud/vm/MockUserVmManagerImpl.java @@ -25,7 +25,6 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; import org.apache.cloudstack.api.command.admin.vm.RecoverVMCmd; -import org.apache.cloudstack.api.command.user.template.CreateTemplateCmd; import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd; import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; import org.apache.cloudstack.api.command.user.vm.DestroyVMCmd; @@ -40,8 +39,6 @@ import org.apache.cloudstack.api.command.user.vm.UpdateVMCmd; import org.apache.cloudstack.api.command.user.vm.UpgradeVMCmd; import org.apache.cloudstack.api.command.user.vmgroup.CreateVMGroupCmd; import org.apache.cloudstack.api.command.user.vmgroup.DeleteVMGroupCmd; -import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; -import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; import org.springframework.stereotype.Component; import com.cloud.agent.api.StopAnswer; @@ -69,7 +66,6 @@ import com.cloud.offering.ServiceOffering; import com.cloud.projects.Project.ListProjectResourcesCriteria; import com.cloud.server.Criteria; import com.cloud.storage.StoragePool; -import com.cloud.storage.Volume; import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.Account; import com.cloud.uservm.UserVm; @@ -155,11 +151,6 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager, return null; } - @Override - public boolean attachISOToVM(long vmId, long isoId, boolean attach) { - // TODO Auto-generated method stub - return false; - } @Override public boolean stopVirtualMachine(long userId, long vmId) { @@ -209,12 +200,6 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager, return null; } - @Override - public String getChecksum(Long hostId, String templatePath) { - // TODO Auto-generated method stub - return null; - } - @Override public boolean configure(String name, Map params) throws ConfigurationException { return true; @@ -255,24 +240,6 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager, return null; } - @Override - public UserVm resetVMSSHKey(ResetVMSSHKeyCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException { - // TODO Auto-generated method stub - return null; - } - - @Override - public Volume attachVolumeToVM(AttachVolumeCmd cmd) { - // TODO Auto-generated method stub - return null; - } - - @Override - public Volume detachVolumeFromVM(DetachVolumeCmd cmmd) { - // TODO Auto-generated method stub - return null; - } - @Override public UserVm startVirtualMachine(StartVMCmd cmd) throws StorageUnavailableException, ExecutionException, ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException { @@ -316,18 +283,6 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager, return null; } - @Override - public VirtualMachineTemplate createPrivateTemplateRecord(CreateTemplateCmd cmd, Account templateOwner) throws ResourceAllocationException { - // TODO Auto-generated method stub - return null; - } - - @Override - public VirtualMachineTemplate createPrivateTemplate(CreateTemplateCmd cmd) { - // TODO Auto-generated method stub - return null; - } - @Override public UserVm startVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { // TODO Auto-generated method stub @@ -469,4 +424,9 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager, return false; } + @Override + public UserVm resetVMSSHKey(ResetVMSSHKeyCmd cmd) throws ResourceUnavailableException, InsufficientCapacityException { + // TODO Auto-generated method stub + return null; + } } diff --git a/server/test/com/cloud/vm/UserVmManagerTest.java b/server/test/com/cloud/vm/UserVmManagerTest.java new file mode 100755 index 00000000000..0795a359fdd --- /dev/null +++ b/server/test/com/cloud/vm/UserVmManagerTest.java @@ -0,0 +1,182 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.vm; + +import java.util.List; + +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.user.vm.RestoreVMCmd; +import org.apache.log4j.Logger; +import org.junit.Test; +import org.junit.Before; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.mockito.Spy; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.storage.StorageManager; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VolumeManager; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.AccountVO; +import com.cloud.user.UserVO; +import com.cloud.user.dao.AccountDao; +import com.cloud.user.dao.UserDao; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.dao.UserVmDao; + +import static org.mockito.Mockito.*; + +public class UserVmManagerTest { + + @Spy UserVmManagerImpl _userVmMgr = new UserVmManagerImpl(); + @Mock VirtualMachineManager _itMgr; + @Mock VolumeManager _storageMgr; + @Mock Account _account; + @Mock AccountManager _accountMgr; + @Mock AccountDao _accountDao; + @Mock UserDao _userDao; + @Mock UserVmDao _vmDao; + @Mock VMTemplateDao _templateDao; + @Mock VolumeDao _volsDao; + @Mock RestoreVMCmd _restoreVMCmd; + @Mock AccountVO _accountMock; + @Mock UserVO _userMock; + @Mock UserVmVO _vmMock; + @Mock VMTemplateVO _templateMock; + @Mock VolumeVO _volumeMock; + @Mock List _rootVols; + @Before + public void setup(){ + MockitoAnnotations.initMocks(this); + + _userVmMgr._vmDao = _vmDao; + _userVmMgr._templateDao = _templateDao; + _userVmMgr._volsDao = _volsDao; + _userVmMgr._itMgr = _itMgr; + _userVmMgr.volumeMgr = _storageMgr; + _userVmMgr._accountDao = _accountDao; + _userVmMgr._userDao = _userDao; + _userVmMgr._accountMgr = _accountMgr; + + doReturn(3L).when(_account).getId(); + doReturn(8L).when(_vmMock).getAccountId(); + when(_accountDao.findById(anyLong())).thenReturn(_accountMock); + when(_userDao.findById(anyLong())).thenReturn(_userMock); + doReturn(Account.State.enabled).when(_account).getState(); + when(_vmMock.getId()).thenReturn(314L); + + } + + // Test restoreVm when VM state not in running/stopped case + @Test(expected=CloudRuntimeException.class) + public void testRestoreVMF1() throws ResourceAllocationException { + + when(_vmDao.findById(anyLong())).thenReturn(_vmMock); + when(_templateDao.findById(anyLong())).thenReturn(_templateMock); + doReturn(VirtualMachine.State.Error).when(_vmMock).getState(); + _userVmMgr.restoreVMInternal(_account, _vmMock, null); + } + + // Test restoreVm when VM is in stopped state + @Test + public void testRestoreVMF2() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, + ConcurrentOperationException, ResourceAllocationException { + + doReturn(VirtualMachine.State.Stopped).when(_vmMock).getState(); + when(_vmDao.findById(anyLong())).thenReturn(_vmMock); + when(_volsDao.findByInstance(anyLong())).thenReturn(_rootVols); + doReturn(false).when(_rootVols).isEmpty(); + when(_rootVols.get(eq(0))).thenReturn(_volumeMock); + doReturn(3L).when(_volumeMock).getTemplateId(); + when(_templateDao.findById(anyLong())).thenReturn(_templateMock); + when(_storageMgr.allocateDuplicateVolume(_volumeMock, null)).thenReturn(_volumeMock); + doNothing().when(_volsDao).attachVolume(anyLong(), anyLong(), anyLong()); + when(_volumeMock.getId()).thenReturn(3L); + doNothing().when(_volsDao).detachVolume(anyLong()); + + when(_templateMock.getUuid()).thenReturn("e0552266-7060-11e2-bbaa-d55f5db67735"); + + _userVmMgr.restoreVMInternal(_account, _vmMock, null); + + } + + // Test restoreVM when VM is in running state + @Test + public void testRestoreVMF3() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, + ConcurrentOperationException, ResourceAllocationException { + + doReturn(VirtualMachine.State.Running).when(_vmMock).getState(); + when(_vmDao.findById(anyLong())).thenReturn(_vmMock); + when(_volsDao.findByInstance(anyLong())).thenReturn(_rootVols); + doReturn(false).when(_rootVols).isEmpty(); + when(_rootVols.get(eq(0))).thenReturn(_volumeMock); + doReturn(3L).when(_volumeMock).getTemplateId(); + when(_templateDao.findById(anyLong())).thenReturn(_templateMock); + when(_itMgr.stop(_vmMock, _userMock, _account)).thenReturn(true); + when(_itMgr.start(_vmMock, null, _userMock, _account)).thenReturn(_vmMock); + when(_storageMgr.allocateDuplicateVolume(_volumeMock, null)).thenReturn(_volumeMock); + doNothing().when(_volsDao).attachVolume(anyLong(), anyLong(), anyLong()); + when(_volumeMock.getId()).thenReturn(3L); + doNothing().when(_volsDao).detachVolume(anyLong()); + + when(_templateMock.getUuid()).thenReturn("e0552266-7060-11e2-bbaa-d55f5db67735"); + + _userVmMgr.restoreVMInternal(_account, _vmMock, null); + + } + + // Test restoreVM on providing new template Id, when VM is in running state + @Test + public void testRestoreVMF4() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, + ConcurrentOperationException, ResourceAllocationException { + doReturn(VirtualMachine.State.Running).when(_vmMock).getState(); + when(_vmDao.findById(anyLong())).thenReturn(_vmMock); + when(_volsDao.findByInstance(anyLong())).thenReturn(_rootVols); + doReturn(false).when(_rootVols).isEmpty(); + when(_rootVols.get(eq(0))).thenReturn(_volumeMock); + doReturn(3L).when(_volumeMock).getTemplateId(); + when(_templateDao.findById(anyLong())).thenReturn(_templateMock); + doNothing().when(_accountMgr).checkAccess(_account, null, true, _templateMock); + when(_itMgr.stop(_vmMock, _userMock, _account)).thenReturn(true); + when(_storageMgr.allocateDuplicateVolume(_volumeMock, 14L)).thenReturn(_volumeMock); + when(_templateMock.getGuestOSId()).thenReturn(5L); + doNothing().when(_vmMock).setGuestOSId(anyLong()); + doNothing().when(_vmMock).setTemplateId(3L); + when(_vmDao.update(314L, _vmMock)).thenReturn(true); + when(_itMgr.start(_vmMock, null, _userMock, _account)).thenReturn(_vmMock); + when(_storageMgr.allocateDuplicateVolume(_volumeMock, null)).thenReturn(_volumeMock); + doNothing().when(_volsDao).attachVolume(anyLong(), anyLong(), anyLong()); + when(_volumeMock.getId()).thenReturn(3L); + doNothing().when(_volsDao).detachVolume(anyLong()); + + when(_templateMock.getUuid()).thenReturn("b1a3626e-72e0-4697-8c7c-a110940cc55d"); + + _userVmMgr.restoreVMInternal(_account, _vmMock, 14L); + + } + +} \ No newline at end of file diff --git a/server/test/com/cloud/vm/dao/UserVmCloneSettingDaoImplTest.java b/server/test/com/cloud/vm/dao/UserVmCloneSettingDaoImplTest.java new file mode 100644 index 00000000000..c96ba3656fc --- /dev/null +++ b/server/test/com/cloud/vm/dao/UserVmCloneSettingDaoImplTest.java @@ -0,0 +1,62 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm.dao; + +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +import junit.framework.TestCase; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import com.cloud.vm.UserVmCloneSettingVO; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(locations = "classpath:/CloneSettingDaoTestContext.xml") +public class UserVmCloneSettingDaoImplTest extends TestCase { + @Inject UserVmCloneSettingDaoImpl _vmcsdao; + + public void makeEntry(Long vmId, String cloneType) { + UserVmCloneSettingVO vo = new UserVmCloneSettingVO(vmId, cloneType); + _vmcsdao.persist(vo); + vo = _vmcsdao.findById(vmId); + assert (vo.getCloneType().equalsIgnoreCase(cloneType)) : "Unexpected Clone Type retrieved from table! Retrieved: " + vo.getCloneType() + " while expected was: " + cloneType; + + // Next test whether the record is retrieved by clone type. + List voList = new ArrayList(); + voList = _vmcsdao.listByCloneType(cloneType); + assert (voList != null && !voList.isEmpty()) : "Failed to retrieve any record of VMs by clone type!"; + + // If a vo list is indeed retrieved, also check whether the vm id retrieved matches what we put in there. + assert (voList.get(0).getVmId() == vmId) : "Retrieved vmId " + voList.get(0).getVmId() + " does not match input vmId: " + vmId; + } + @Test + public void testPersist() { + + Long vmId = 2222l; + String[] arr = {"full", "linked"}; + for (String cloneType : arr) { + _vmcsdao.expunge(vmId); + makeEntry(vmId, cloneType); + } + } +} diff --git a/server/test/com/cloud/vm/dao/UserVmCloneSettingDaoTestConfiguration.java b/server/test/com/cloud/vm/dao/UserVmCloneSettingDaoTestConfiguration.java new file mode 100644 index 00000000000..6e22e174f58 --- /dev/null +++ b/server/test/com/cloud/vm/dao/UserVmCloneSettingDaoTestConfiguration.java @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.vm.dao; + +import java.io.IOException; + +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.ComponentScan.Filter; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.FilterType; +import org.springframework.core.type.classreading.MetadataReader; +import org.springframework.core.type.classreading.MetadataReaderFactory; +import org.springframework.core.type.filter.TypeFilter; + +import com.cloud.utils.component.SpringComponentScanUtils; +import com.cloud.vm.dao.UserVmCloneSettingDaoImpl; + +@Configuration +@ComponentScan(basePackageClasses={ + UserVmCloneSettingDaoImpl.class}, + includeFilters={@Filter(value=UserVmCloneSettingDaoTestConfiguration.Library.class, type=FilterType.CUSTOM)}, + useDefaultFilters=false + ) +public class UserVmCloneSettingDaoTestConfiguration { + + + public static class Library implements TypeFilter { + + @Override + public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException { + mdr.getClassMetadata().getClassName(); + ComponentScan cs = UserVmCloneSettingDaoTestConfiguration.class.getAnnotation(ComponentScan.class); + return SpringComponentScanUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs); + } + + } +} diff --git a/server/test/com/cloud/vm/snapshot/VMSnapshotManagerTest.java b/server/test/com/cloud/vm/snapshot/VMSnapshotManagerTest.java index 6fc6404e15f..a238e52c5d5 100644 --- a/server/test/com/cloud/vm/snapshot/VMSnapshotManagerTest.java +++ b/server/test/com/cloud/vm/snapshot/VMSnapshotManagerTest.java @@ -29,6 +29,7 @@ import java.util.List; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.SecurityChecker.AccessType; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.junit.Before; import org.junit.Test; import org.mockito.Mock; @@ -53,7 +54,6 @@ import com.cloud.storage.SnapshotVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.SnapshotDao; -import com.cloud.storage.dao.StoragePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; @@ -84,7 +84,7 @@ public class VMSnapshotManagerTest { @Mock HypervisorGuruManager _hvGuruMgr; @Mock AccountManager _accountMgr; @Mock GuestOSDao _guestOSDao; - @Mock StoragePoolDao _storagePoolDao; + @Mock PrimaryDataStoreDao _storagePoolDao; @Mock SnapshotDao _snapshotDao; @Mock VirtualMachineManager _itMgr; @Mock ConfigurationDao _configDao; diff --git a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java index 574ce0a0352..d96e831cfeb 100644 --- a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java +++ b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java @@ -365,6 +365,15 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu return false; } + /* (non-Javadoc) + * @see com.cloud.configuration.ConfigurationService#listLDAPConfig(org.apache.cloudstack.api.commands.LDAPConfigCmd) + */ + @Override + public LDAPConfigCmd listLDAPConfig(LDAPConfigCmd cmd) { + // TODO Auto-generated method stub + return null; + } + /* (non-Javadoc) * @see com.cloud.configuration.ConfigurationService#isOfferingForVpc(com.cloud.offering.NetworkOffering) */ @@ -424,7 +433,7 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu */ @Override public ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, Type vm_typeType, String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired, boolean offerHA, - boolean limitResourceUse, String tags, Long domainId, String hostTag, Integer networkRate) { + boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag, Integer networkRate) { // TODO Auto-generated method stub return null; } @@ -599,7 +608,7 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu */ @Override public DataCenterVO createZone(long userId, String zoneName, String dns1, String dns2, String internalDns1, String internalDns2, String guestCidr, String domain, Long domainId, NetworkType zoneType, - String allocationState, String networkDomain, boolean isSecurityGroupEnabled, boolean isLocalStorageEnabled) { + String allocationState, String networkDomain, boolean isSecurityGroupEnabled, boolean isLocalStorageEnabled, String ip6Dns1, String ip6Dns2) { // TODO Auto-generated method stub return null; } diff --git a/server/test/com/cloud/vpc/MockNetworkManagerImpl.java b/server/test/com/cloud/vpc/MockNetworkManagerImpl.java index bcaaa26b418..c798cdf7810 100644 --- a/server/test/com/cloud/vpc/MockNetworkManagerImpl.java +++ b/server/test/com/cloud/vpc/MockNetworkManagerImpl.java @@ -29,10 +29,12 @@ import org.apache.cloudstack.api.command.admin.usage.ListTrafficTypeImplementors import org.apache.cloudstack.api.command.user.network.CreateNetworkCmd; import org.apache.cloudstack.api.command.user.network.ListNetworksCmd; import org.apache.cloudstack.api.command.user.network.RestartNetworkCmd; +import org.apache.cloudstack.api.command.user.vm.ListNicsCmd; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenter; +import com.cloud.dc.Pod; import com.cloud.dc.Vlan.VlanType; import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeployDestination; @@ -80,6 +82,7 @@ import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.vm.Nic; import com.cloud.vm.NicProfile; +import com.cloud.vm.NicSecondaryIp; import com.cloud.vm.ReservationContext; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; @@ -303,15 +306,11 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage */ @Override public Network updateGuestNetwork(long networkId, String name, String displayText, Account callerAccount, - User callerUser, String domainSuffix, Long networkOfferingId, Boolean changeCidr) { + User callerUser, String domainSuffix, Long networkOfferingId, Boolean changeCidr, String guestVmCidr) { // TODO Auto-generated method stub return null; } - - - - /* (non-Javadoc) * @see com.cloud.network.NetworkService#createPhysicalNetwork(java.lang.Long, java.lang.String, java.lang.String, java.util.List, java.lang.String, java.lang.Long, java.util.List, java.lang.String) */ @@ -1321,4 +1320,95 @@ public class MockNetworkManagerImpl extends ManagerBase implements NetworkManage // TODO Auto-generated method stub return null; } + + + + + + @Override + public boolean isSecondaryIpSetForNic(long nicId) { + // TODO Auto-generated method stub + return false; + } + + @Override + public String allocateSecondaryGuestIP(Account account, long zoneId, + Long nicId, Long networkId, String ipaddress) { + // TODO Auto-generated method stub + return null; + } + + + + + + + + @Override + public boolean releaseSecondaryIpFromNic(long ipAddressId) { + // TODO Auto-generated method stub + return false; + } + + + + + + @Override + public String allocateGuestIP(Account ipOwner, boolean isSystem, + long zoneId, Long networkId, String requestedIp) + throws InsufficientAddressCapacityException { + // TODO Auto-generated method stub + return null; + } + + + + + + + + + + @Override + public List listVmNics(Long vmId, Long nicId) { + // TODO Auto-generated method stub + return null; + } + + + + + + @Override + public List listNics(ListNicsCmd listNicsCmd) { + // TODO Auto-generated method stub + return null; + } + + + + + + @Override + public String allocatePublicIpForGuestNic(Long networkId, DataCenter dc, + Pod pod, Account caller, String requestedIp) + throws InsufficientAddressCapacityException { + // TODO Auto-generated method stub + return null; + } + + + + + + + + + + @Override + public boolean removeVmSecondaryIpsOfNic(long nicId) { + // TODO Auto-generated method stub + return false; + } } diff --git a/server/test/com/cloud/vpc/MockNetworkModelImpl.java b/server/test/com/cloud/vpc/MockNetworkModelImpl.java index 3fad33870bf..5ac87772529 100644 --- a/server/test/com/cloud/vpc/MockNetworkModelImpl.java +++ b/server/test/com/cloud/vpc/MockNetworkModelImpl.java @@ -842,4 +842,10 @@ public class MockNetworkModelImpl extends ManagerBase implements NetworkModel { // TODO Auto-generated method stub } + @Override + public void checkRequestedIpAddresses(long networkId, String ip4, String ip6) + throws InvalidParameterValueException { + // TODO Auto-generated method stub + } + } diff --git a/server/test/com/cloud/vpc/MockVpcManagerImpl.java b/server/test/com/cloud/vpc/MockVpcManagerImpl.java index 0a44a49c5e9..e6c65200ded 100644 --- a/server/test/com/cloud/vpc/MockVpcManagerImpl.java +++ b/server/test/com/cloud/vpc/MockVpcManagerImpl.java @@ -72,12 +72,8 @@ public class MockVpcManagerImpl extends ManagerBase implements VpcManager { return null; } - /* (non-Javadoc) - * @see com.cloud.network.vpc.VpcService#createVpcOffering(java.lang.String, java.lang.String, java.util.List) - */ @Override - public VpcOffering createVpcOffering(String name, String displayText, List supportedServices) { - // TODO Auto-generated method stub + public VpcOffering createVpcOffering(String name, String displayText, List supportedServices, Map> serviceProviders) { return null; } @@ -320,11 +316,6 @@ public class MockVpcManagerImpl extends ManagerBase implements VpcManager { /* (non-Javadoc) * @see com.cloud.network.vpc.VpcService#updateVpcGuestNetwork(long, java.lang.String, java.lang.String, com.cloud.user.Account, com.cloud.user.User, java.lang.String, java.lang.Long, java.lang.Boolean) */ - @Override - public Network updateVpcGuestNetwork(long networkId, String name, String displayText, Account callerAccount, User callerUser, String domainSuffix, Long ntwkOffId, Boolean changeCidr) { - // TODO Auto-generated method stub - return null; - } /* (non-Javadoc) * @see com.cloud.network.vpc.VpcManager#validateNtkwOffForVpc(long, java.lang.String, java.lang.String, com.cloud.user.Account, com.cloud.network.vpc.Vpc, java.lang.Long, java.lang.String) @@ -362,12 +353,8 @@ public class MockVpcManagerImpl extends ManagerBase implements VpcManager { return null; } - /* (non-Javadoc) - * @see com.cloud.network.vpc.VpcManager#vpcProviderEnabledInZone(long) - */ @Override - public boolean vpcProviderEnabledInZone(long zoneId) { - // TODO Auto-generated method stub + public boolean vpcProviderEnabledInZone(long zoneId, String provider) { return false; } @@ -471,4 +458,11 @@ public class MockVpcManagerImpl extends ManagerBase implements VpcManager { return null; } + @Override + public Network updateVpcGuestNetwork(long networkId, String name, String displayText, Account callerAccount, User callerUser, + String domainSuffix, Long ntwkOffId, Boolean changeCidr, String guestVmCidr) { + // TODO Auto-generated method stub + return null; + } + } diff --git a/server/test/com/cloud/vpc/dao/MockVpcDaoImpl.java b/server/test/com/cloud/vpc/dao/MockVpcDaoImpl.java index 99bdb58e4f7..562d67dc207 100644 --- a/server/test/com/cloud/vpc/dao/MockVpcDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockVpcDaoImpl.java @@ -18,6 +18,7 @@ package com.cloud.vpc.dao; import java.lang.reflect.Field; import java.util.List; +import java.util.Map; import javax.ejb.Local; @@ -83,7 +84,17 @@ public class MockVpcDaoImpl extends GenericDaoBase implements VpcDa // TODO Auto-generated method stub return 0; } - + + @Override + public VpcVO persist(VpcVO vpc, Map serviceProviderMap) { + return null; + } + + @Override + public void persistVpcServiceProviders(long vpcId, Map serviceProviderMap) { + return; + } + @Override public VpcVO findById(Long id) { VpcVO vo = null; diff --git a/server/test/resources/CloneSettingDaoTestContext.xml b/server/test/resources/CloneSettingDaoTestContext.xml new file mode 100644 index 00000000000..1d13500a2e9 --- /dev/null +++ b/server/test/resources/CloneSettingDaoTestContext.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/services/console-proxy/plugin/pom.xml b/services/console-proxy/plugin/pom.xml index 8cf3d76aa00..4cbe6d1c8f4 100644 --- a/services/console-proxy/plugin/pom.xml +++ b/services/console-proxy/plugin/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-service-console-proxy - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/services/console-proxy/pom.xml b/services/console-proxy/pom.xml index cd57526273d..1453e8cc264 100644 --- a/services/console-proxy/pom.xml +++ b/services/console-proxy/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-services - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/services/console-proxy/server/pom.xml b/services/console-proxy/server/pom.xml index 71e83933ca9..0df7559781e 100644 --- a/services/console-proxy/server/pom.xml +++ b/services/console-proxy/server/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloud-service-console-proxy - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/services/console-proxy/server/src/com/cloud/consoleproxy/ConsoleProxy.java b/services/console-proxy/server/src/com/cloud/consoleproxy/ConsoleProxy.java index a722d8305a2..b5c29892a7b 100644 --- a/services/console-proxy/server/src/com/cloud/consoleproxy/ConsoleProxy.java +++ b/services/console-proxy/server/src/com/cloud/consoleproxy/ConsoleProxy.java @@ -425,23 +425,32 @@ public class ConsoleProxy { synchronized (connectionMap) { ConsoleProxyClient viewer = connectionMap.get(clientKey); if (viewer == null) { + authenticationExternally(param); viewer = new ConsoleProxyVncClient(); viewer.initClient(param); connectionMap.put(clientKey, viewer); s_logger.info("Added viewer object " + viewer); reportLoadChange = true; - } else if (!viewer.isFrontEndAlive()) { - s_logger.info("The rfb thread died, reinitializing the viewer " + viewer); - viewer.initClient(param); - } else if (!param.getClientHostPassword().equals(viewer.getClientHostPassword())) { - s_logger.warn("Bad sid detected(VNC port may be reused). sid in session: " - + viewer.getClientHostPassword() + ", sid in request: " + param.getClientHostPassword()); - viewer.initClient(param); - } else { - if(ajaxSession == null || ajaxSession.isEmpty()) + } else { + // protected against malicous attack by modifying URL content + if(ajaxSession != null) { + long ajaxSessionIdFromUrl = Long.parseLong(ajaxSession); + if(ajaxSessionIdFromUrl != viewer.getAjaxSessionId()) + throw new AuthenticationException ("Cannot use the existing viewer " + + viewer + ": modified AJAX session id"); + } + + if(param.getClientHostPassword() == null || param.getClientHostPassword().isEmpty() || !param.getClientHostPassword().equals(viewer.getClientHostPassword())) + throw new AuthenticationException ("Cannot use the existing viewer " + + viewer + ": bad sid"); + + if(!viewer.isFrontEndAlive()) { authenticationExternally(param); - } + viewer.initClient(param); + reportLoadChange = true; + } + } if(reportLoadChange) { ConsoleProxyClientStatsCollector statsCollector = getStatsCollector(); diff --git a/services/console-proxy/server/src/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java b/services/console-proxy/server/src/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java index 7756d01cd7f..6815b0d43bc 100644 --- a/services/console-proxy/server/src/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java +++ b/services/console-proxy/server/src/com/cloud/consoleproxy/ConsoleProxyHttpHandlerHelper.java @@ -49,8 +49,11 @@ public class ConsoleProxyHttpHandlerHelper { if(map.get("token") != null) { ConsoleProxyPasswordBasedEncryptor encryptor = new ConsoleProxyPasswordBasedEncryptor( ConsoleProxy.getEncryptorPassword()); - + ConsoleProxyClientParam param = encryptor.decryptObject(ConsoleProxyClientParam.class, map.get("token")); + + // make sure we get information from token only + map.clear(); if(param != null) { if(param.getClientHostAddress() != null) map.put("host", param.getClientHostAddress()); @@ -67,6 +70,9 @@ public class ConsoleProxyHttpHandlerHelper { if(param.getTicket() != null) map.put("ticket", param.getTicket()); } + } else { + // we no longer accept information from parameter other than token + map.clear(); } return map; diff --git a/services/console-proxy/server/systemvm-descriptor.xml b/services/console-proxy/server/systemvm-descriptor.xml index 7efe7fdfcb0..e34026bc3a6 100644 --- a/services/console-proxy/server/systemvm-descriptor.xml +++ b/services/console-proxy/server/systemvm-descriptor.xml @@ -31,13 +31,13 @@ - ../scripts/storage/secondary/ + ../../../scripts/storage/secondary/ scripts/storage/secondary 555 555 - ../scripts/storage/secondary/ + ../../../scripts/storage/secondary/ scripts/storage/secondary 555 555 @@ -60,7 +60,7 @@ - ../console-proxy/images + images images 555 555 @@ -72,7 +72,7 @@ - ../console-proxy/js + js js 555 555 @@ -81,7 +81,7 @@ - ../console-proxy/ui + ui ui 555 555 @@ -90,7 +90,7 @@ - ../console-proxy/css + css css 555 555 @@ -99,7 +99,7 @@ - ../console-proxy/certs + certs certs 555 555 diff --git a/services/pom.xml b/services/pom.xml index 26488513999..35ec2e186ba 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT ../pom.xml diff --git a/setup/bindir/cloud-setup-databases.in b/setup/bindir/cloud-setup-databases.in index 8330f35e659..1603c23eecb 100755 --- a/setup/bindir/cloud-setup-databases.in +++ b/setup/bindir/cloud-setup-databases.in @@ -211,7 +211,7 @@ for full help ""), ) - for f in ["create-database","create-schema", "create-database-premium","create-schema-premium", "create-schema-view", "4.1-new-db-schema"]: + for f in ["create-database","create-schema", "create-database-premium","create-schema-premium"]: p = os.path.join(self.dbFilesPath,"%s.sql"%f) if not os.path.exists(p): continue text = file(p).read() @@ -311,6 +311,7 @@ for example: self.errorAndExit(err) self.putDbProperty(key, value) self.info("Preparing %s"%dbpPath, True) + self.putDbProperty("region.id", self.options.regionid) prepareDBDotProperties() @@ -358,7 +359,7 @@ for example: if not os.path.exists(dbf): self.errorAndExit("Cannot find %s"%dbf) - coreSchemas = ['create-database.sql', 'create-schema.sql', 'create-schema-view.sql', 'templates.sql'] + coreSchemas = ['create-database.sql', 'create-schema.sql', 'templates.sql'] if not self.serversetup: coreSchemas.append('server-setup.sql') @@ -576,6 +577,8 @@ for example: help="Secret key used to encrypt sensitive database values. A string, default is password") self.parser.add_option("-i", "--mshost", action="store", type="string", dest="mshostip", default="", help="Cluster management server host IP. A string, by default it will try to detect a local IP") + self.parser.add_option("-r", "--regionid", action="store", type="string", dest="regionid", default="1", + help="Region Id for the management server cluster") (self.options, self.args) = self.parser.parse_args() parseCasualCredit() diff --git a/setup/db/4.1-new-db-schema.sql b/setup/db/4.1-new-db-schema.sql deleted file mode 100644 index d60eca2f890..00000000000 --- a/setup/db/4.1-new-db-schema.sql +++ /dev/null @@ -1,142 +0,0 @@ --- Licensed to the Apache Software Foundation (ASF) under one --- or more contributor license agreements. See the NOTICE file --- distributed with this work for additional information --- regarding copyright ownership. The ASF licenses this file --- to you under the Apache License, Version 2.0 (the --- "License"); you may not use this file except in compliance --- with the License. You may obtain a copy of the License at --- --- http://www.apache.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, --- software distributed under the License is distributed on an --- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY --- KIND, either express or implied. See the License for the --- specific language governing permissions and limitations --- under the License. - -use cloud; - -alter table vm_template add image_data_store_id bigint unsigned; -alter table vm_template add size bigint unsigned; -alter table vm_template add state varchar(255); -alter table vm_template add update_count bigint unsigned; -alter table vm_template add updated datetime; -alter table storage_pool add storage_provider_id bigint unsigned; -alter table storage_pool add scope varchar(255); -alter table storage_pool modify id bigint unsigned AUTO_INCREMENT UNIQUE NOT NULL; -alter table template_spool_ref add state varchar(255); -alter table template_spool_ref add update_count bigint unsigned; -alter table volumes add disk_type varchar(255); -alter table volumes drop foreign key `fk_volumes__account_id`; -alter table vm_instance add column disk_offering_id bigint unsigned; -alter table vm_instance add column cpu int(10) unsigned; -alter table vm_instance add column ram bigint unsigned; -alter table vm_instance add column owner varchar(255); -alter table vm_instance add column speed int(10) unsigned; -alter table vm_instance add column host_name varchar(255); -alter table vm_instance add column display_name varchar(255); - -alter table data_center add column owner varchar(255); -alter table data_center add column created datetime COMMENT 'date created'; -alter table data_center add column lastUpdated datetime COMMENT 'last updated'; -alter table data_center add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; -alter table host_pod_ref add column owner varchar(255); -alter table host_pod_ref add column created datetime COMMENT 'date created'; -alter table host_pod_ref add column lastUpdated datetime COMMENT 'last updated'; -alter table host_pod_ref add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; -alter table host add column owner varchar(255); -alter table host add column lastUpdated datetime COMMENT 'last updated'; -alter table host add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; - - -alter table cluster add column owner varchar(255); -alter table cluster add column created datetime COMMENT 'date created'; -alter table cluster add column lastUpdated datetime COMMENT 'last updated'; -alter table cluster add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; -CREATE TABLE `cloud`.`object_datastore_ref` ( - `id` bigint unsigned NOT NULL auto_increment, - `datastore_id` bigint unsigned NOT NULL, - `datastore_role` varchar(255) NOT NULL, - `object_id` bigint unsigned NOT NULL, - `object_type` varchar(255) NOT NULL, - `created` DATETIME NOT NULL, - `last_updated` DATETIME, - `job_id` varchar(255), - `download_pct` int(10) unsigned, - `download_state` varchar(255), - `error_str` varchar(255), - `local_path` varchar(255), - `install_path` varchar(255), - `size` bigint unsigned COMMENT 'the size of the template on the pool', - `state` varchar(255) NOT NULL, - `update_count` bigint unsigned NOT NULL, - `updated` DATETIME, - PRIMARY KEY (`id`) -) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`data_store_provider` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `name` varchar(255) NOT NULL COMMENT 'name of primary data store provider', - `uuid` varchar(255) NOT NULL COMMENT 'uuid of primary data store provider', - PRIMARY KEY(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`image_data_store` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `name` varchar(255) NOT NULL COMMENT 'name of data store', - `image_provider_id` bigint unsigned NOT NULL COMMENT 'id of image_data_store_provider', - `protocol` varchar(255) NOT NULL COMMENT 'protocol of data store', - `data_center_id` bigint unsigned COMMENT 'datacenter id of data store', - `scope` varchar(255) COMMENT 'scope of data store', - `uuid` varchar(255) COMMENT 'uuid of data store', - PRIMARY KEY(`id`), - CONSTRAINT `fk_tags__image_data_store_provider_id` FOREIGN KEY(`image_provider_id`) REFERENCES `data_store_provider`(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`vm_compute_tags` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', - `compute_tag` varchar(255) NOT NULL COMMENT 'name of tag', - PRIMARY KEY(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`vm_root_disk_tags` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', - `root_disk_tag` varchar(255) NOT NULL COMMENT 'name of tag', - PRIMARY KEY(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - - -CREATE TABLE `cloud`.`vm_network_map` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', - `network_id` bigint unsigned NOT NULL COMMENT 'network id', - PRIMARY KEY(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - - -CREATE TABLE `cloud`.`vm_reservation` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `uuid` varchar(40) NOT NULL COMMENT 'reservation id', - `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', - `data_center_id` bigint unsigned NOT NULL COMMENT 'zone id', - `pod_id` bigint unsigned NOT NULL COMMENT 'pod id', - `cluster_id` bigint unsigned NOT NULL COMMENT 'cluster id', - `host_id` bigint unsigned NOT NULL COMMENT 'host id', - `created` datetime COMMENT 'date created', - `removed` datetime COMMENT 'date removed if not null', - CONSTRAINT `uc_vm_reservation__uuid` UNIQUE (`uuid`), - PRIMARY KEY(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`volume_reservation` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `vm_reservation_id` bigint unsigned NOT NULL COMMENT 'id of the vm reservation', - `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', - `volume_id` bigint unsigned NOT NULL COMMENT 'volume id', - `pool_id` bigint unsigned NOT NULL COMMENT 'pool assigned to the volume', - CONSTRAINT `fk_vm_pool_reservation__vm_reservation_id` FOREIGN KEY (`vm_reservation_id`) REFERENCES `vm_reservation`(`id`) ON DELETE CASCADE, - PRIMARY KEY(`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/setup/db/create-schema-view.sql b/setup/db/create-schema-view.sql deleted file mode 100644 index 265779dccdc..00000000000 --- a/setup/db/create-schema-view.sql +++ /dev/null @@ -1,1152 +0,0 @@ --- Licensed to the Apache Software Foundation (ASF) under one --- or more contributor license agreements. See the NOTICE file --- distributed with this work for additional information --- regarding copyright ownership. The ASF licenses this file --- to you under the Apache License, Version 2.0 (the --- "License"); you may not use this file except in compliance --- with the License. You may obtain a copy of the License at --- --- http://www.apache.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, --- software distributed under the License is distributed on an --- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY --- KIND, either express or implied. See the License for the --- specific language governing permissions and limitations --- under the License. - --- DB views for list api - -DROP VIEW IF EXISTS `cloud`.`user_vm_view`; -CREATE VIEW `cloud`.`user_vm_view` AS - select - vm_instance.id id, - vm_instance.name name, - user_vm.display_name display_name, - user_vm.user_data user_data, - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name, - instance_group.id instance_group_id, - instance_group.uuid instance_group_uuid, - instance_group.name instance_group_name, - vm_instance.uuid uuid, - vm_instance.last_host_id last_host_id, - vm_instance.vm_type type, - vm_instance.vnc_password vnc_password, - vm_instance.limit_cpu_use limit_cpu_use, - vm_instance.created created, - vm_instance.state state, - vm_instance.removed removed, - vm_instance.ha_enabled ha_enabled, - vm_instance.hypervisor_type hypervisor_type, - vm_instance.instance_name instance_name, - vm_instance.guest_os_id guest_os_id, - guest_os.uuid guest_os_uuid, - vm_instance.pod_id pod_id, - host_pod_ref.uuid pod_uuid, - vm_instance.private_ip_address private_ip_address, - vm_instance.private_mac_address private_mac_address, - vm_instance.vm_type vm_type, - data_center.id data_center_id, - data_center.uuid data_center_uuid, - data_center.name data_center_name, - data_center.is_security_group_enabled security_group_enabled, - host.id host_id, - host.uuid host_uuid, - host.name host_name, - vm_template.id template_id, - vm_template.uuid template_uuid, - vm_template.name template_name, - vm_template.display_text template_display_text, - vm_template.enable_password password_enabled, - iso.id iso_id, - iso.uuid iso_uuid, - iso.name iso_name, - iso.display_text iso_display_text, - service_offering.id service_offering_id, - disk_offering.uuid service_offering_uuid, - service_offering.cpu cpu, - service_offering.speed speed, - service_offering.ram_size ram_size, - disk_offering.name service_offering_name, - storage_pool.id pool_id, - storage_pool.uuid pool_uuid, - storage_pool.pool_type pool_type, - volumes.id volume_id, - volumes.uuid volume_uuid, - volumes.device_id volume_device_id, - volumes.volume_type volume_type, - security_group.id security_group_id, - security_group.uuid security_group_uuid, - security_group.name security_group_name, - security_group.description security_group_description, - nics.id nic_id, - nics.uuid nic_uuid, - nics.network_id network_id, - nics.ip4_address ip_address, - nics.default_nic is_default_nic, - nics.gateway gateway, - nics.netmask netmask, - nics.mac_address mac_address, - nics.broadcast_uri broadcast_uri, - nics.isolation_uri isolation_uri, - vpc.id vpc_id, - vpc.uuid vpc_uuid, - networks.uuid network_uuid, - networks.traffic_type traffic_type, - networks.guest_type guest_type, - user_ip_address.id public_ip_id, - user_ip_address.uuid public_ip_uuid, - user_ip_address.public_ip_address public_ip_address, - ssh_keypairs.keypair_name keypair_name, - resource_tags.id tag_id, - resource_tags.uuid tag_uuid, - resource_tags.key tag_key, - resource_tags.value tag_value, - resource_tags.domain_id tag_domain_id, - resource_tags.account_id tag_account_id, - resource_tags.resource_id tag_resource_id, - resource_tags.resource_uuid tag_resource_uuid, - resource_tags.resource_type tag_resource_type, - resource_tags.customer tag_customer, - async_job.id job_id, - async_job.uuid job_uuid, - async_job.job_status job_status, - async_job.account_id job_account_id - from - `cloud`.`user_vm` - inner join - `cloud`.`vm_instance` ON vm_instance.id = user_vm.id - and vm_instance.removed is NULL - inner join - `cloud`.`account` ON vm_instance.account_id = account.id - inner join - `cloud`.`domain` ON vm_instance.domain_id = domain.id - left join - `cloud`.`guest_os` ON vm_instance.guest_os_id = guest_os.id - left join - `cloud`.`host_pod_ref` ON vm_instance.pod_id = host_pod_ref.id - left join - `cloud`.`projects` ON projects.project_account_id = account.id - left join - `cloud`.`instance_group_vm_map` ON vm_instance.id = instance_group_vm_map.instance_id - left join - `cloud`.`instance_group` ON instance_group_vm_map.group_id = instance_group.id - left join - `cloud`.`data_center` ON vm_instance.data_center_id = data_center.id - left join - `cloud`.`host` ON vm_instance.host_id = host.id - left join - `cloud`.`vm_template` ON vm_instance.vm_template_id = vm_template.id - left join - `cloud`.`vm_template` iso ON iso.id = user_vm.iso_id - left join - `cloud`.`service_offering` ON vm_instance.service_offering_id = service_offering.id - left join - `cloud`.`disk_offering` ON vm_instance.service_offering_id = disk_offering.id - left join - `cloud`.`volumes` ON vm_instance.id = volumes.instance_id - left join - `cloud`.`storage_pool` ON volumes.pool_id = storage_pool.id - left join - `cloud`.`security_group_vm_map` ON vm_instance.id = security_group_vm_map.instance_id - left join - `cloud`.`security_group` ON security_group_vm_map.security_group_id = security_group.id - left join - `cloud`.`nics` ON vm_instance.id = nics.instance_id - left join - `cloud`.`networks` ON nics.network_id = networks.id - left join - `cloud`.`vpc` ON networks.vpc_id = vpc.id - left join - `cloud`.`user_ip_address` ON user_ip_address.vm_id = vm_instance.id - left join - `cloud`.`user_vm_details` ON user_vm_details.vm_id = vm_instance.id - and user_vm_details.name = 'SSH.PublicKey' - left join - `cloud`.`ssh_keypairs` ON ssh_keypairs.public_key = user_vm_details.value - left join - `cloud`.`resource_tags` ON resource_tags.resource_id = vm_instance.id - and resource_tags.resource_type = 'UserVm' - left join - `cloud`.`async_job` ON async_job.instance_id = vm_instance.id - and async_job.instance_type = 'VirtualMachine' - and async_job.job_status = 0; - -DROP VIEW IF EXISTS `cloud`.`domain_router_view`; -CREATE VIEW `cloud`.`domain_router_view` AS - select - vm_instance.id id, - vm_instance.name name, - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name, - vm_instance.uuid uuid, - vm_instance.created created, - vm_instance.state state, - vm_instance.removed removed, - vm_instance.pod_id pod_id, - vm_instance.instance_name instance_name, - host_pod_ref.uuid pod_uuid, - data_center.id data_center_id, - data_center.uuid data_center_uuid, - data_center.name data_center_name, - data_center.dns1 dns1, - data_center.dns2 dns2, - host.id host_id, - host.uuid host_uuid, - host.name host_name, - vm_template.id template_id, - vm_template.uuid template_uuid, - service_offering.id service_offering_id, - disk_offering.uuid service_offering_uuid, - disk_offering.name service_offering_name, - nics.id nic_id, - nics.uuid nic_uuid, - nics.network_id network_id, - nics.ip4_address ip_address, - nics.default_nic is_default_nic, - nics.gateway gateway, - nics.netmask netmask, - nics.mac_address mac_address, - nics.broadcast_uri broadcast_uri, - nics.isolation_uri isolation_uri, - vpc.id vpc_id, - vpc.uuid vpc_uuid, - networks.uuid network_uuid, - networks.name network_name, - networks.network_domain network_domain, - networks.traffic_type traffic_type, - networks.guest_type guest_type, - async_job.id job_id, - async_job.uuid job_uuid, - async_job.job_status job_status, - async_job.account_id job_account_id, - domain_router.template_version template_version, - domain_router.scripts_version scripts_version, - domain_router.is_redundant_router is_redundant_router, - domain_router.redundant_state redundant_state, - domain_router.stop_pending stop_pending - from - `cloud`.`domain_router` - inner join - `cloud`.`vm_instance` ON vm_instance.id = domain_router.id - inner join - `cloud`.`account` ON vm_instance.account_id = account.id - inner join - `cloud`.`domain` ON vm_instance.domain_id = domain.id - left join - `cloud`.`host_pod_ref` ON vm_instance.pod_id = host_pod_ref.id - left join - `cloud`.`projects` ON projects.project_account_id = account.id - left join - `cloud`.`data_center` ON vm_instance.data_center_id = data_center.id - left join - `cloud`.`host` ON vm_instance.host_id = host.id - left join - `cloud`.`vm_template` ON vm_instance.vm_template_id = vm_template.id - left join - `cloud`.`service_offering` ON vm_instance.service_offering_id = service_offering.id - left join - `cloud`.`disk_offering` ON vm_instance.service_offering_id = disk_offering.id - left join - `cloud`.`volumes` ON vm_instance.id = volumes.instance_id - left join - `cloud`.`storage_pool` ON volumes.pool_id = storage_pool.id - left join - `cloud`.`nics` ON vm_instance.id = nics.instance_id - left join - `cloud`.`networks` ON nics.network_id = networks.id - left join - `cloud`.`vpc` ON networks.vpc_id = vpc.id - left join - `cloud`.`async_job` ON async_job.instance_id = vm_instance.id - and async_job.instance_type = 'DomainRouter' - and async_job.job_status = 0; - -DROP VIEW IF EXISTS `cloud`.`security_group_view`; -CREATE VIEW `cloud`.`security_group_view` AS - select - security_group.id id, - security_group.name name, - security_group.description description, - security_group.uuid uuid, - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name, - security_group_rule.id rule_id, - security_group_rule.uuid rule_uuid, - security_group_rule.type rule_type, - security_group_rule.start_port rule_start_port, - security_group_rule.end_port rule_end_port, - security_group_rule.protocol rule_protocol, - security_group_rule.allowed_network_id rule_allowed_network_id, - security_group_rule.allowed_ip_cidr rule_allowed_ip_cidr, - security_group_rule.create_status rule_create_status, - resource_tags.id tag_id, - resource_tags.uuid tag_uuid, - resource_tags.key tag_key, - resource_tags.value tag_value, - resource_tags.domain_id tag_domain_id, - resource_tags.account_id tag_account_id, - resource_tags.resource_id tag_resource_id, - resource_tags.resource_uuid tag_resource_uuid, - resource_tags.resource_type tag_resource_type, - resource_tags.customer tag_customer, - async_job.id job_id, - async_job.uuid job_uuid, - async_job.job_status job_status, - async_job.account_id job_account_id - from - `cloud`.`security_group` - left join - `cloud`.`security_group_rule` ON security_group.id = security_group_rule.security_group_id - inner join - `cloud`.`account` ON security_group.account_id = account.id - inner join - `cloud`.`domain` ON security_group.domain_id = domain.id - left join - `cloud`.`projects` ON projects.project_account_id = security_group.account_id - left join - `cloud`.`resource_tags` ON resource_tags.resource_id = security_group.id - and resource_tags.resource_type = 'SecurityGroup' - left join - `cloud`.`async_job` ON async_job.instance_id = security_group.id - and async_job.instance_type = 'SecurityGroup' - and async_job.job_status = 0; - -DROP VIEW IF EXISTS `cloud`.`resource_tag_view`; -CREATE VIEW `cloud`.`resource_tag_view` AS - select - resource_tags.id, - resource_tags.uuid, - resource_tags.key, - resource_tags.value, - resource_tags.resource_id, - resource_tags.resource_uuid, - resource_tags.resource_type, - resource_tags.customer, - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name - from - `cloud`.`resource_tags` - inner join - `cloud`.`account` ON resource_tags.account_id = account.id - inner join - `cloud`.`domain` ON resource_tags.domain_id = domain.id - left join - `cloud`.`projects` ON projects.project_account_id = resource_tags.account_id; - - -DROP VIEW IF EXISTS `cloud`.`event_view`; -CREATE VIEW `cloud`.`event_view` AS - select - event.id, - event.uuid, - event.type, - event.state, - event.description, - event.created, - event.level, - event.parameters, - event.start_id, - eve.uuid start_uuid, - event.user_id, - user.username user_name, - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name - from - `cloud`.`event` - inner join - `cloud`.`account` ON event.account_id = account.id - inner join - `cloud`.`domain` ON event.domain_id = domain.id - inner join - `cloud`.`user` ON event.user_id = user.id - left join - `cloud`.`projects` ON projects.project_account_id = event.account_id - left join - `cloud`.`event` eve ON event.start_id = eve.id; - -DROP VIEW IF EXISTS `cloud`.`instance_group_view`; -CREATE VIEW `cloud`.`instance_group_view` AS - select - instance_group.id, - instance_group.uuid, - instance_group.name, - instance_group.removed, - instance_group.created, - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name - from - `cloud`.`instance_group` - inner join - `cloud`.`account` ON instance_group.account_id = account.id - inner join - `cloud`.`domain` ON account.domain_id = domain.id - left join - `cloud`.`projects` ON projects.project_account_id = instance_group.account_id; - -DROP VIEW IF EXISTS `cloud`.`user_view`; -CREATE VIEW `cloud`.`user_view` AS - select - user.id, - user.uuid, - user.username, - user.password, - user.firstname, - user.lastname, - user.email, - user.state, - user.api_key, - user.secret_key, - user.created, - user.removed, - user.timezone, - user.registration_token, - user.is_registered, - user.incorrect_login_attempts, - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - async_job.id job_id, - async_job.uuid job_uuid, - async_job.job_status job_status, - async_job.account_id job_account_id - from - `cloud`.`user` - inner join - `cloud`.`account` ON user.account_id = account.id - inner join - `cloud`.`domain` ON account.domain_id = domain.id - left join - `cloud`.`async_job` ON async_job.instance_id = user.id - and async_job.instance_type = 'User' - and async_job.job_status = 0; - - -DROP VIEW IF EXISTS `cloud`.`project_view`; -CREATE VIEW `cloud`.`project_view` AS - select - projects.id, - projects.uuid, - projects.name, - projects.display_text, - projects.state, - projects.removed, - projects.created, - account.account_name owner, - pacct.account_id, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - resource_tags.id tag_id, - resource_tags.uuid tag_uuid, - resource_tags.key tag_key, - resource_tags.value tag_value, - resource_tags.domain_id tag_domain_id, - resource_tags.account_id tag_account_id, - resource_tags.resource_id tag_resource_id, - resource_tags.resource_uuid tag_resource_uuid, - resource_tags.resource_type tag_resource_type, - resource_tags.customer tag_customer - from - `cloud`.`projects` - inner join - `cloud`.`domain` ON projects.domain_id = domain.id - inner join - `cloud`.`project_account` ON projects.id = project_account.project_id - and project_account.account_role = 'Admin' - inner join - `cloud`.`account` ON account.id = project_account.account_id - left join - `cloud`.`resource_tags` ON resource_tags.resource_id = projects.id - and resource_tags.resource_type = 'Project' - left join - `cloud`.`project_account` pacct ON projects.id = pacct.project_id; - -DROP VIEW IF EXISTS `cloud`.`project_account_view`; -CREATE VIEW `cloud`.`project_account_view` AS - select - project_account.id, - account.id account_id, - account.uuid account_uuid, - account.account_name, - account.type account_type, - project_account.account_role, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path - from - `cloud`.`project_account` - inner join - `cloud`.`account` ON project_account.account_id = account.id - inner join - `cloud`.`domain` ON account.domain_id = domain.id - inner join - `cloud`.`projects` ON projects.id = project_account.project_id; - -DROP VIEW IF EXISTS `cloud`.`project_invitation_view`; -CREATE VIEW `cloud`.`project_invitation_view` AS - select - project_invitations.id, - project_invitations.uuid, - project_invitations.email, - project_invitations.created, - project_invitations.state, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name, - account.id account_id, - account.uuid account_uuid, - account.account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path - from - `cloud`.`project_invitations` - left join - `cloud`.`account` ON project_invitations.account_id = account.id - left join - `cloud`.`domain` ON project_invitations.domain_id = domain.id - left join - `cloud`.`projects` ON projects.id = project_invitations.project_id; - -DROP VIEW IF EXISTS `cloud`.`host_view`; -CREATE VIEW `cloud`.`host_view` AS - select - host.id, - host.uuid, - host.name, - host.status, - host.disconnected, - host.type, - host.private_ip_address, - host.version, - host.hypervisor_type, - host.hypervisor_version, - host.capabilities, - host.last_ping, - host.created, - host.removed, - host.resource_state, - host.mgmt_server_id, - host.cpus, - host.speed, - host.ram, - cluster.id cluster_id, - cluster.uuid cluster_uuid, - cluster.name cluster_name, - cluster.cluster_type, - data_center.id data_center_id, - data_center.uuid data_center_uuid, - data_center.name data_center_name, - host_pod_ref.id pod_id, - host_pod_ref.uuid pod_uuid, - host_pod_ref.name pod_name, - host_tags.tag, - guest_os_category.id guest_os_category_id, - guest_os_category.uuid guest_os_category_uuid, - guest_os_category.name guest_os_category_name, - mem_caps.used_capacity memory_used_capacity, - mem_caps.reserved_capacity memory_reserved_capacity, - cpu_caps.used_capacity cpu_used_capacity, - cpu_caps.reserved_capacity cpu_reserved_capacity, - async_job.id job_id, - async_job.uuid job_uuid, - async_job.job_status job_status, - async_job.account_id job_account_id - from - `cloud`.`host` - left join - `cloud`.`cluster` ON host.cluster_id = cluster.id - left join - `cloud`.`data_center` ON host.data_center_id = data_center.id - left join - `cloud`.`host_pod_ref` ON host.pod_id = host_pod_ref.id - left join - `cloud`.`host_details` ON host.id = host_details.id - and host_details.name = 'guest.os.category.id' - left join - `cloud`.`guest_os_category` ON guest_os_category.id = CONVERT( host_details.value , UNSIGNED) - left join - `cloud`.`host_tags` ON host_tags.host_id = host.id - left join - `cloud`.`op_host_capacity` mem_caps ON host.id = mem_caps.host_id - and mem_caps.capacity_type = 0 - left join - `cloud`.`op_host_capacity` cpu_caps ON host.id = cpu_caps.host_id - and cpu_caps.capacity_type = 1 - left join - `cloud`.`async_job` ON async_job.instance_id = host.id - and async_job.instance_type = 'Host' - and async_job.job_status = 0; - -DROP VIEW IF EXISTS `cloud`.`volume_view`; -CREATE VIEW `cloud`.`volume_view` AS - select - volumes.id, - volumes.uuid, - volumes.name, - volumes.device_id, - volumes.volume_type, - volumes.size, - volumes.created, - volumes.state, - volumes.attached, - volumes.removed, - volumes.pod_id, - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - projects.id project_id, - projects.uuid project_uuid, - projects.name project_name, - data_center.id data_center_id, - data_center.uuid data_center_uuid, - data_center.name data_center_name, - vm_instance.id vm_id, - vm_instance.uuid vm_uuid, - vm_instance.name vm_name, - vm_instance.state vm_state, - vm_instance.vm_type, - user_vm.display_name vm_display_name, - volume_host_ref.size volume_host_size, - volume_host_ref.created volume_host_created, - volume_host_ref.format, - volume_host_ref.download_pct, - volume_host_ref.download_state, - volume_host_ref.error_str, - disk_offering.id disk_offering_id, - disk_offering.uuid disk_offering_uuid, - disk_offering.name disk_offering_name, - disk_offering.display_text disk_offering_display_text, - disk_offering.use_local_storage, - disk_offering.system_use, - storage_pool.id pool_id, - storage_pool.uuid pool_uuid, - storage_pool.name pool_name, - cluster.hypervisor_type, - vm_template.id template_id, - vm_template.uuid template_uuid, - vm_template.extractable, - vm_template.type template_type, - resource_tags.id tag_id, - resource_tags.uuid tag_uuid, - resource_tags.key tag_key, - resource_tags.value tag_value, - resource_tags.domain_id tag_domain_id, - resource_tags.account_id tag_account_id, - resource_tags.resource_id tag_resource_id, - resource_tags.resource_uuid tag_resource_uuid, - resource_tags.resource_type tag_resource_type, - resource_tags.customer tag_customer, - async_job.id job_id, - async_job.uuid job_uuid, - async_job.job_status job_status, - async_job.account_id job_account_id - from - `cloud`.`volumes` - inner join - `cloud`.`account` ON volumes.account_id = account.id - inner join - `cloud`.`domain` ON volumes.domain_id = domain.id - left join - `cloud`.`projects` ON projects.project_account_id = account.id - left join - `cloud`.`data_center` ON volumes.data_center_id = data_center.id - left join - `cloud`.`vm_instance` ON volumes.instance_id = vm_instance.id - left join - `cloud`.`user_vm` ON user_vm.id = vm_instance.id - left join - `cloud`.`volume_host_ref` ON volumes.id = volume_host_ref.volume_id - and volumes.data_center_id = volume_host_ref.zone_id - left join - `cloud`.`disk_offering` ON volumes.disk_offering_id = disk_offering.id - left join - `cloud`.`storage_pool` ON volumes.pool_id = storage_pool.id - left join - `cloud`.`cluster` ON storage_pool.cluster_id = cluster.id - left join - `cloud`.`vm_template` ON volumes.template_id = vm_template.id - left join - `cloud`.`resource_tags` ON resource_tags.resource_id = volumes.id - and resource_tags.resource_type = 'Volume' - left join - `cloud`.`async_job` ON async_job.instance_id = volumes.id - and async_job.instance_type = 'Volume' - and async_job.job_status = 0; - -DROP VIEW IF EXISTS `cloud`.`account_netstats_view`; -CREATE VIEW `cloud`.`account_netstats_view` AS - SELECT - account_id, - sum(net_bytes_received) + sum(current_bytes_received) as bytesReceived, - sum(net_bytes_sent) + sum(current_bytes_sent) as bytesSent - FROM - `cloud`.`user_statistics` - group by account_id; - - -DROP VIEW IF EXISTS `cloud`.`account_vmstats_view`; -CREATE VIEW `cloud`.`account_vmstats_view` AS - SELECT - account_id, state, count(*) as vmcount - from - `cloud`.`vm_instance` - group by account_id , state; - -DROP VIEW IF EXISTS `cloud`.`free_ip_view`; -CREATE VIEW `cloud`.`free_ip_view` AS - select - count(user_ip_address.id) free_ip - from - `cloud`.`user_ip_address` - inner join - `cloud`.`vlan` ON vlan.id = user_ip_address.vlan_db_id - and vlan.vlan_type = 'VirtualNetwork' - where - state = 'Free'; - -DROP VIEW IF EXISTS `cloud`.`account_view`; -CREATE VIEW `cloud`.`account_view` AS - select - account.id, - account.uuid, - account.account_name, - account.type, - account.state, - account.removed, - account.cleanup_needed, - account.network_domain, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - data_center.id data_center_id, - data_center.uuid data_center_uuid, - data_center.name data_center_name, - account_netstats_view.bytesReceived, - account_netstats_view.bytesSent, - vmlimit.max vmLimit, - vmcount.count vmTotal, - runningvm.vmcount runningVms, - stoppedvm.vmcount stoppedVms, - iplimit.max ipLimit, - ipcount.count ipTotal, - free_ip_view.free_ip ipFree, - volumelimit.max volumeLimit, - volumecount.count volumeTotal, - snapshotlimit.max snapshotLimit, - snapshotcount.count snapshotTotal, - templatelimit.max templateLimit, - templatecount.count templateTotal, - vpclimit.max vpcLimit, - vpccount.count vpcTotal, - projectlimit.max projectLimit, - projectcount.count projectTotal, - networklimit.max networkLimit, - networkcount.count networkTotal, - cpulimit.max cpuLimit, - cpucount.count cpuTotal, - memorylimit.max memoryLimit, - memorycount.count memoryTotal, - async_job.id job_id, - async_job.uuid job_uuid, - async_job.job_status job_status, - async_job.account_id job_account_id - from - `cloud`.`free_ip_view`, - `cloud`.`account` - inner join - `cloud`.`domain` ON account.domain_id = domain.id - left join - `cloud`.`data_center` ON account.default_zone_id = data_center.id - left join - `cloud`.`account_netstats_view` ON account.id = account_netstats_view.account_id - left join - `cloud`.`resource_limit` vmlimit ON account.id = vmlimit.account_id - and vmlimit.type = 'user_vm' - left join - `cloud`.`resource_count` vmcount ON account.id = vmcount.account_id - and vmcount.type = 'user_vm' - left join - `cloud`.`account_vmstats_view` runningvm ON account.id = runningvm.account_id - and runningvm.state = 'Running' - left join - `cloud`.`account_vmstats_view` stoppedvm ON account.id = stoppedvm.account_id - and stoppedvm.state = 'Stopped' - left join - `cloud`.`resource_limit` iplimit ON account.id = iplimit.account_id - and iplimit.type = 'public_ip' - left join - `cloud`.`resource_count` ipcount ON account.id = ipcount.account_id - and ipcount.type = 'public_ip' - left join - `cloud`.`resource_limit` volumelimit ON account.id = volumelimit.account_id - and volumelimit.type = 'volume' - left join - `cloud`.`resource_count` volumecount ON account.id = volumecount.account_id - and volumecount.type = 'volume' - left join - `cloud`.`resource_limit` snapshotlimit ON account.id = snapshotlimit.account_id - and snapshotlimit.type = 'snapshot' - left join - `cloud`.`resource_count` snapshotcount ON account.id = snapshotcount.account_id - and snapshotcount.type = 'snapshot' - left join - `cloud`.`resource_limit` templatelimit ON account.id = templatelimit.account_id - and templatelimit.type = 'template' - left join - `cloud`.`resource_count` templatecount ON account.id = templatecount.account_id - and templatecount.type = 'template' - left join - `cloud`.`resource_limit` vpclimit ON account.id = vpclimit.account_id - and vpclimit.type = 'vpc' - left join - `cloud`.`resource_count` vpccount ON account.id = vpccount.account_id - and vpccount.type = 'vpc' - left join - `cloud`.`resource_limit` projectlimit ON account.id = projectlimit.account_id - and projectlimit.type = 'project' - left join - `cloud`.`resource_count` projectcount ON account.id = projectcount.account_id - and projectcount.type = 'project' - left join - `cloud`.`resource_limit` networklimit ON account.id = networklimit.account_id - and networklimit.type = 'network' - left join - `cloud`.`resource_count` networkcount ON account.id = networkcount.account_id - and networkcount.type = 'network' - left join - `cloud`.`resource_limit` cpulimit ON account.id = cpulimit.account_id - and cpulimit.type = 'cpu' - left join - `cloud`.`resource_count` cpucount ON account.id = cpucount.account_id - and cpucount.type = 'cpu' - left join - `cloud`.`resource_limit` memorylimit ON account.id = memorylimit.account_id - and memorylimit.type = 'memory' - left join - `cloud`.`resource_count` memorycount ON account.id = memorycount.account_id - and memorycount.type = 'memory' - left join - `cloud`.`async_job` ON async_job.instance_id = account.id - and async_job.instance_type = 'Account' - and async_job.job_status = 0; - -DROP VIEW IF EXISTS `cloud`.`async_job_view`; -CREATE VIEW `cloud`.`async_job_view` AS - select - account.id account_id, - account.uuid account_uuid, - account.account_name account_name, - account.type account_type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path, - user.id user_id, - user.uuid user_uuid, - async_job.id, - async_job.uuid, - async_job.job_cmd, - async_job.job_status, - async_job.job_process_status, - async_job.job_result_code, - async_job.job_result, - async_job.created, - async_job.removed, - async_job.instance_type, - async_job.instance_id, - CASE - WHEN async_job.instance_type = 'Volume' THEN volumes.uuid - WHEN - async_job.instance_type = 'Template' - or async_job.instance_type = 'Iso' - THEN - vm_template.uuid - WHEN - async_job.instance_type = 'VirtualMachine' - or async_job.instance_type = 'ConsoleProxy' - or async_job.instance_type = 'SystemVm' - or async_job.instance_type = 'DomainRouter' - THEN - vm_instance.uuid - WHEN async_job.instance_type = 'Snapshot' THEN snapshots.uuid - WHEN async_job.instance_type = 'Host' THEN host.uuid - WHEN async_job.instance_type = 'StoragePool' THEN storage_pool.uuid - WHEN async_job.instance_type = 'IpAddress' THEN user_ip_address.uuid - WHEN async_job.instance_type = 'SecurityGroup' THEN security_group.uuid - WHEN async_job.instance_type = 'PhysicalNetwork' THEN physical_network.uuid - WHEN async_job.instance_type = 'TrafficType' THEN physical_network_traffic_types.uuid - WHEN async_job.instance_type = 'PhysicalNetworkServiceProvider' THEN physical_network_service_providers.uuid - WHEN async_job.instance_type = 'FirewallRule' THEN firewall_rules.uuid - WHEN async_job.instance_type = 'Account' THEN acct.uuid - WHEN async_job.instance_type = 'User' THEN us.uuid - WHEN async_job.instance_type = 'StaticRoute' THEN static_routes.uuid - WHEN async_job.instance_type = 'PrivateGateway' THEN vpc_gateways.uuid - WHEN async_job.instance_type = 'Counter' THEN counter.uuid - WHEN async_job.instance_type = 'Condition' THEN conditions.uuid - WHEN async_job.instance_type = 'AutoScalePolicy' THEN autoscale_policies.uuid - WHEN async_job.instance_type = 'AutoScaleVmProfile' THEN autoscale_vmprofiles.uuid - WHEN async_job.instance_type = 'AutoScaleVmGroup' THEN autoscale_vmgroups.uuid - ELSE null - END instance_uuid - from - `cloud`.`async_job` - left join - `cloud`.`account` ON async_job.account_id = account.id - left join - `cloud`.`domain` ON domain.id = account.domain_id - left join - `cloud`.`user` ON async_job.user_id = user.id - left join - `cloud`.`volumes` ON async_job.instance_id = volumes.id - left join - `cloud`.`vm_template` ON async_job.instance_id = vm_template.id - left join - `cloud`.`vm_instance` ON async_job.instance_id = vm_instance.id - left join - `cloud`.`snapshots` ON async_job.instance_id = snapshots.id - left join - `cloud`.`host` ON async_job.instance_id = host.id - left join - `cloud`.`storage_pool` ON async_job.instance_id = storage_pool.id - left join - `cloud`.`user_ip_address` ON async_job.instance_id = user_ip_address.id - left join - `cloud`.`security_group` ON async_job.instance_id = security_group.id - left join - `cloud`.`physical_network` ON async_job.instance_id = physical_network.id - left join - `cloud`.`physical_network_traffic_types` ON async_job.instance_id = physical_network_traffic_types.id - left join - `cloud`.`physical_network_service_providers` ON async_job.instance_id = physical_network_service_providers.id - left join - `cloud`.`firewall_rules` ON async_job.instance_id = firewall_rules.id - left join - `cloud`.`account` acct ON async_job.instance_id = acct.id - left join - `cloud`.`user` us ON async_job.instance_id = us.id - left join - `cloud`.`static_routes` ON async_job.instance_id = static_routes.id - left join - `cloud`.`vpc_gateways` ON async_job.instance_id = vpc_gateways.id - left join - `cloud`.`counter` ON async_job.instance_id = counter.id - left join - `cloud`.`conditions` ON async_job.instance_id = conditions.id - left join - `cloud`.`autoscale_policies` ON async_job.instance_id = autoscale_policies.id - left join - `cloud`.`autoscale_vmprofiles` ON async_job.instance_id = autoscale_vmprofiles.id - left join - `cloud`.`autoscale_vmgroups` ON async_job.instance_id = autoscale_vmgroups.id; - -DROP VIEW IF EXISTS `cloud`.`storage_pool_view`; -CREATE VIEW `cloud`.`storage_pool_view` AS - select - storage_pool.id, - storage_pool.uuid, - storage_pool.name, - storage_pool.status, - storage_pool.path, - storage_pool.pool_type, - storage_pool.host_address, - storage_pool.created, - storage_pool.removed, - storage_pool.capacity_bytes, - cluster.id cluster_id, - cluster.uuid cluster_uuid, - cluster.name cluster_name, - cluster.cluster_type, - data_center.id data_center_id, - data_center.uuid data_center_uuid, - data_center.name data_center_name, - host_pod_ref.id pod_id, - host_pod_ref.uuid pod_uuid, - host_pod_ref.name pod_name, - storage_pool_details.name tag, - op_host_capacity.used_capacity disk_used_capacity, - op_host_capacity.reserved_capacity disk_reserved_capacity, - async_job.id job_id, - async_job.uuid job_uuid, - async_job.job_status job_status, - async_job.account_id job_account_id - from - `cloud`.`storage_pool` - left join - `cloud`.`cluster` ON storage_pool.cluster_id = cluster.id - left join - `cloud`.`data_center` ON storage_pool.data_center_id = data_center.id - left join - `cloud`.`host_pod_ref` ON storage_pool.pod_id = host_pod_ref.id - left join - `cloud`.`storage_pool_details` ON storage_pool_details.pool_id = storage_pool.id - and storage_pool_details.value = 'true' - left join - `cloud`.`op_host_capacity` ON storage_pool.id = op_host_capacity.host_id - and op_host_capacity.capacity_type = 3 - left join - `cloud`.`async_job` ON async_job.instance_id = storage_pool.id - and async_job.instance_type = 'StoragePool' - and async_job.job_status = 0; - -DROP VIEW IF EXISTS `cloud`.`disk_offering_view`; -CREATE VIEW `cloud`.`disk_offering_view` AS - select - disk_offering.id, - disk_offering.uuid, - disk_offering.name, - disk_offering.display_text, - disk_offering.disk_size, - disk_offering.created, - disk_offering.tags, - disk_offering.customized, - disk_offering.removed, - disk_offering.use_local_storage, - disk_offering.system_use, - disk_offering.sort_key, - disk_offering.type, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path - from - `cloud`.`disk_offering` - left join - `cloud`.`domain` ON disk_offering.domain_id = domain.id; - -DROP VIEW IF EXISTS `cloud`.`service_offering_view`; -CREATE VIEW `cloud`.`service_offering_view` AS - select - service_offering.id, - disk_offering.uuid, - disk_offering.name, - disk_offering.display_text, - disk_offering.created, - disk_offering.tags, - disk_offering.removed, - disk_offering.use_local_storage, - disk_offering.system_use, - service_offering.cpu, - service_offering.speed, - service_offering.ram_size, - service_offering.nw_rate, - service_offering.mc_rate, - service_offering.ha_enabled, - service_offering.limit_cpu_use, - service_offering.host_tag, - service_offering.default_use, - service_offering.vm_type, - service_offering.sort_key, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path - from - `cloud`.`service_offering` - inner join - `cloud`.`disk_offering` ON service_offering.id = disk_offering.id - left join - `cloud`.`domain` ON disk_offering.domain_id = domain.id; - -DROP VIEW IF EXISTS `cloud`.`data_center_view`; -CREATE VIEW `cloud`.`data_center_view` AS - select - data_center.id, - data_center.uuid, - data_center.name, - data_center.is_security_group_enabled, - data_center.is_local_storage_enabled, - data_center.description, - data_center.dns1, - data_center.dns2, - data_center.internal_dns1, - data_center.internal_dns2, - data_center.guest_network_cidr, - data_center.domain, - data_center.networktype, - data_center.allocation_state, - data_center.zone_token, - data_center.dhcp_provider, - data_center.removed, - domain.id domain_id, - domain.uuid domain_uuid, - domain.name domain_name, - domain.path domain_path - from - `cloud`.`data_center` - left join - `cloud`.`domain` ON data_center.domain_id = domain.id; \ No newline at end of file diff --git a/setup/db/create-schema.sql b/setup/db/create-schema.sql index 11ae26745e8..7361681da47 100755 --- a/setup/db/create-schema.sql +++ b/setup/db/create-schema.sql @@ -145,12 +145,12 @@ DROP TABLE IF EXISTS `cloud`.`region`; DROP TABLE IF EXISTS `cloud`.`s2s_customer_gateway`; DROP TABLE IF EXISTS `cloud`.`s2s_vpn_gateway`; DROP TABLE IF EXISTS `cloud`.`s2s_vpn_connection`; -DROP TABLE IF EXISTS `cloud`,`external_nicira_nvp_devices`; -DROP TABLE IF EXISTS `cloud`,`nicira_nvp_nic_map`; -DROP TABLE IF EXISTS `cloud`,`s3`; -DROP TABLE IF EXISTS `cloud`,`template_s3_ref`; -DROP TABLE IF EXISTS `cloud`,`nicira_nvp_router_map`; -DROP TABLE IF EXISTS `cloud`,`external_bigswitch_vns_devices`; +DROP TABLE IF EXISTS `cloud`.`external_nicira_nvp_devices`; +DROP TABLE IF EXISTS `cloud`.`nicira_nvp_nic_map`; +DROP TABLE IF EXISTS `cloud`.`s3`; +DROP TABLE IF EXISTS `cloud`.`template_s3_ref`; +DROP TABLE IF EXISTS `cloud`.`nicira_nvp_router_map`; +DROP TABLE IF EXISTS `cloud`.`external_bigswitch_vns_devices`; DROP TABLE IF EXISTS `cloud`.`autoscale_vmgroup_policy_map`; DROP TABLE IF EXISTS `cloud`.`autoscale_policy_condition_map`; DROP TABLE IF EXISTS `cloud`.`autoscale_vmgroups`; @@ -207,7 +207,7 @@ CREATE TABLE `cloud`.`version` ( ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -INSERT INTO `version` (`version`, `updated`, `step`) VALUES('4.1.0', now(), 'Complete'); +INSERT INTO `version` (`version`, `updated`, `step`) VALUES('4.0.0', now(), 'Complete'); CREATE TABLE `cloud`.`op_it_work` ( `id` char(40) COMMENT 'reservation id', @@ -247,8 +247,6 @@ CREATE TABLE `cloud`.`networks` ( `broadcast_uri` varchar(255) COMMENT 'broadcast domain specifier', `gateway` varchar(15) COMMENT 'gateway for this network configuration', `cidr` varchar(18) COMMENT 'network cidr', - `ip6_gateway` varchar(50) COMMENT 'IPv6 gateway for this network', - `ip6_cidr` varchar(50) COMMENT 'IPv6 cidr for this network', `mode` varchar(32) COMMENT 'How to retrieve ip address in this network', `network_offering_id` bigint unsigned NOT NULL COMMENT 'network offering id that this configuration is created from', `physical_network_id` bigint unsigned COMMENT 'physical network id that this configuration is based on', @@ -312,8 +310,6 @@ CREATE TABLE `cloud`.`nics` ( `update_time` timestamp NOT NULL COMMENT 'time the state was changed', `isolation_uri` varchar(255) COMMENT 'id for isolation', `ip6_address` char(40) COMMENT 'ip6 address', - `ip6_gateway` varchar(50) COMMENT 'gateway for ip6 address', - `ip6_cidr` varchar(50) COMMENT 'cidr for ip6 address', `default_nic` tinyint NOT NULL COMMENT "None", `vm_type` varchar(32) COMMENT 'type of vm: System or User vm', `created` datetime NOT NULL COMMENT 'date created', @@ -352,8 +348,6 @@ CREATE TABLE `cloud`.`network_offerings` ( `elastic_ip_service` int(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if the network offering provides elastic ip service', `elastic_lb_service` int(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if the network offering provides elastic lb service', `specify_ip_ranges` int(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if the network offering provides an ability to define ip ranges', - `inline` int(1) unsigned NOT NULL DEFAULT 0 COMMENT 'Is this network offering LB provider is in inline mode', - `is_persistent` int(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if the network offering provides an ability to create persistent networks', PRIMARY KEY (`id`), INDEX `i_network_offerings__system_only`(`system_only`), INDEX `i_network_offerings__removed`(`removed`), @@ -528,14 +522,12 @@ CREATE TABLE `cloud`.`snapshots` ( `removed` datetime COMMENT 'Date removed. not null if removed', `backup_snap_id` varchar(255) COMMENT 'Back up uuid of the snapshot', `swift_id` bigint unsigned COMMENT 'which swift', - `s3_id` bigint unsigned COMMENT 'S3 to which this snapshot will be stored', `sechost_id` bigint unsigned COMMENT 'secondary storage host id', `prev_snap_id` bigint unsigned COMMENT 'Id of the most recent snapshot', `hypervisor_type` varchar(32) NOT NULL COMMENT 'hypervisor that the snapshot was taken under', `version` varchar(32) COMMENT 'snapshot version', PRIMARY KEY (`id`), CONSTRAINT `uc_snapshots__uuid` UNIQUE (`uuid`), - CONSTRAINT `fk_snapshots__s3_id` FOREIGN KEY `fk_snapshots__s3_id` (`s3_id`) REFERENCES `s3` (`id`), INDEX `i_snapshots__removed`(`removed`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -545,10 +537,7 @@ CREATE TABLE `cloud`.`vlan` ( `vlan_id` varchar(255), `vlan_gateway` varchar(255), `vlan_netmask` varchar(255), - `ip6_gateway` varchar(255), - `ip6_cidr` varchar(255), `description` varchar(255), - `ip6_range` varchar(255), `vlan_type` varchar(255), `data_center_id` bigint unsigned NOT NULL, `network_id` bigint unsigned NOT NULL COMMENT 'id of corresponding network offering', @@ -711,7 +700,7 @@ CREATE TABLE `cloud`.`op_dc_vnet_alloc` ( PRIMARY KEY (`id`), UNIQUE `i_op_dc_vnet_alloc__vnet__data_center_id__account_id`(`vnet`, `data_center_id`, `account_id`), INDEX `i_op_dc_vnet_alloc__dc_taken`(`data_center_id`, `taken`), - UNIQUE `i_op_dc_vnet_alloc__vnet__data_center_id`(`vnet`, `physical_network_id`, `data_center_id`), + UNIQUE `i_op_dc_vnet_alloc__vnet__data_center_id`(`vnet`, `data_center_id`), CONSTRAINT `fk_op_dc_vnet_alloc__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `data_center`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_op_dc_vnet_alloc__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -794,10 +783,12 @@ CREATE TABLE `cloud`.`load_balancer_stickiness_policies` ( CREATE TABLE `cloud`.`inline_load_balancer_nic_map` ( `id` bigint unsigned NOT NULL auto_increment, + `load_balancer_id` bigint unsigned NOT NULL, `public_ip_address` char(40) NOT NULL, `nic_id` bigint unsigned NULL COMMENT 'nic id', PRIMARY KEY (`id`), UNIQUE KEY (`nic_id`), + CONSTRAINT `fk_inline_load_balancer_nic_map__load_balancer_id` FOREIGN KEY(`load_balancer_id`) REFERENCES `load_balancing_rules`(`id`) ON DELETE CASCADE, CONSTRAINT `fk_inline_load_balancer_nic_map__nic_id` FOREIGN KEY(`nic_id`) REFERENCES `nics`(`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -950,7 +941,6 @@ CREATE TABLE `cloud`.`user` ( `timezone` varchar(30) default NULL, `registration_token` varchar(255) default NULL, `is_registered` tinyint NOT NULL DEFAULT 0 COMMENT '1: yes, 0: no', - `region_id` int unsigned NOT NULL, `incorrect_login_attempts` integer unsigned NOT NULL DEFAULT 0, PRIMARY KEY (`id`), INDEX `i_user__removed`(`removed`), @@ -1079,7 +1069,6 @@ CREATE TABLE `cloud`.`vm_instance` ( `uuid` varchar(40), `instance_name` varchar(255) NOT NULL COMMENT 'name of the vm instance running on the hosts', `state` varchar(32) NOT NULL, - `desired_state` varchar(32) NULL, `vm_template_id` bigint unsigned, `guest_os_id` bigint unsigned NOT NULL, `private_mac_address` varchar(17), @@ -1169,7 +1158,6 @@ CREATE TABLE `cloud`.`upload` ( `id` bigint unsigned NOT NULL auto_increment, `host_id` bigint unsigned NOT NULL, `type_id` bigint unsigned NOT NULL, - `uuid` varchar(40), `type` varchar(255), `mode` varchar(255), `created` DATETIME NOT NULL, @@ -1302,7 +1290,6 @@ CREATE TABLE `cloud`.`domain` ( `state` char(32) NOT NULL default 'Active' COMMENT 'state of the domain', `network_domain` varchar(255), `type` varchar(255) NOT NULL DEFAULT 'Normal' COMMENT 'type of the domain - can be Normal or Project', - `region_id` int unsigned NOT NULL, PRIMARY KEY (`id`), UNIQUE (parent, name, removed), INDEX `i_domain__path`(`path`), @@ -1321,7 +1308,6 @@ CREATE TABLE `cloud`.`account` ( `cleanup_needed` tinyint(1) NOT NULL default '0', `network_domain` varchar(255), `default_zone_id` bigint unsigned, - `region_id` int unsigned NOT NULL, PRIMARY KEY (`id`), INDEX i_account__removed(`removed`), CONSTRAINT `fk_account__default_zone_id` FOREIGN KEY `fk_account__default_zone_id`(`default_zone_id`) REFERENCES `data_center`(`id`) ON DELETE CASCADE, @@ -1384,7 +1370,6 @@ CREATE TABLE `cloud`.`alert` ( `last_sent` DATETIME NULL COMMENT 'Last time the alert was sent', `resolved` DATETIME NULL COMMENT 'when the alert status was resolved (available memory no longer at critical level, etc.)', PRIMARY KEY (`id`), - INDEX `last_sent` (`last_sent` DESC), CONSTRAINT `uc_alert__uuid` UNIQUE (`uuid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -1395,7 +1380,7 @@ CREATE TABLE `cloud`.`async_job` ( `session_key` varchar(64) COMMENT 'all async-job manage to apply session based security enforcement', `instance_type` varchar(64) COMMENT 'instance_type and instance_id work together to allow attaching an instance object to a job', `instance_id` bigint unsigned, - `job_cmd` varchar(255) NOT NULL COMMENT 'command name', + `job_cmd` varchar(64) NOT NULL COMMENT 'command name', `job_cmd_originator` varchar(64) COMMENT 'command originator', `job_cmd_info` text COMMENT 'command parameter info', `job_cmd_ver` int(1) COMMENT 'command version', @@ -1428,15 +1413,16 @@ CREATE TABLE `cloud`.`sync_queue` ( `id` bigint unsigned NOT NULL auto_increment, `sync_objtype` varchar(64) NOT NULL, `sync_objid` bigint unsigned NOT NULL, + `queue_proc_msid` bigint, `queue_proc_number` bigint COMMENT 'process number, increase 1 for each iteration', + `queue_proc_time` datetime COMMENT 'last time to process the queue', `created` datetime COMMENT 'date created', `last_updated` datetime COMMENT 'date created', - `queue_size` smallint DEFAULT 0 COMMENT 'number of items being processed by the queue', - `queue_size_limit` smallint DEFAULT 1 COMMENT 'max number of items the queue can process concurrently', PRIMARY KEY (`id`), UNIQUE `i_sync_queue__objtype__objid`(`sync_objtype`, `sync_objid`), INDEX `i_sync_queue__created`(`created`), - INDEX `i_sync_queue__last_updated`(`last_updated`) + INDEX `i_sync_queue__last_updated`(`last_updated`), + INDEX `i_sync_queue__queue_proc_time`(`queue_proc_time`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE TABLE `cloud`.`stack_maid` ( @@ -1457,15 +1443,13 @@ CREATE TABLE `cloud`.`sync_queue_item` ( `content_id` bigint, `queue_proc_msid` bigint COMMENT 'owner msid when the queue item is being processed', `queue_proc_number` bigint COMMENT 'used to distinguish raw items and items being in process', - `queue_proc_time` datetime COMMENT 'when processing started for the item', `created` datetime COMMENT 'time created', PRIMARY KEY (`id`), CONSTRAINT `fk_sync_queue_item__queue_id` FOREIGN KEY `fk_sync_queue_item__queue_id` (`queue_id`) REFERENCES `sync_queue` (`id`) ON DELETE CASCADE, INDEX `i_sync_queue_item__queue_id`(`queue_id`), INDEX `i_sync_queue_item__created`(`created`), INDEX `i_sync_queue_item__queue_proc_number`(`queue_proc_number`), - INDEX `i_sync_queue_item__queue_proc_msid`(`queue_proc_msid`), - INDEX `i_sync_queue__queue_proc_time`(`queue_proc_time`) + INDEX `i_sync_queue_item__queue_proc_msid`(`queue_proc_msid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE TABLE `cloud`.`disk_offering` ( @@ -1907,37 +1891,6 @@ CREATE TABLE `cloud`.`swift` ( CONSTRAINT `uc_swift__uuid` UNIQUE (`uuid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -CREATE TABLE `cloud`.`s3` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `uuid` varchar(40), - `access_key` varchar(20) NOT NULL COMMENT ' The S3 access key', - `secret_key` varchar(40) NOT NULL COMMENT ' The S3 secret key', - `end_point` varchar(1024) COMMENT ' The S3 host', - `bucket` varchar(63) NOT NULL COMMENT ' The S3 host', - `https` tinyint unsigned DEFAULT NULL COMMENT ' Flag indicating whether or not to connect over HTTPS', - `connection_timeout` integer COMMENT ' The amount of time to wait (in milliseconds) when initially establishing a connection before giving up and timing out.', - `max_error_retry` integer COMMENT ' The maximum number of retry attempts for failed retryable requests (ex: 5xx error responses from services).', - `socket_timeout` integer COMMENT ' The amount of time to wait (in milliseconds) for data to be transfered over an established, open connection before the connection times out and is closed.', - `created` datetime COMMENT 'date the s3 first signed on', - PRIMARY KEY (`id`), - CONSTRAINT `uc_s3__uuid` UNIQUE (`uuid`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`template_s3_ref` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `s3_id` bigint unsigned NOT NULL COMMENT ' Associated S3 instance id', - `template_id` bigint unsigned NOT NULL COMMENT ' Associated template id', - `created` DATETIME NOT NULL COMMENT ' The creation timestamp', - `size` bigint unsigned COMMENT ' The size of the object', - `physical_size` bigint unsigned DEFAULT 0 COMMENT ' The physical size of the object', - PRIMARY KEY (`id`), - CONSTRAINT `uc_template_s3_ref__template_id` UNIQUE (`template_id`), - CONSTRAINT `fk_template_s3_ref__s3_id` FOREIGN KEY `fk_template_s3_ref__s3_id` (`s3_id`) REFERENCES `s3` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_template_s3_ref__template_id` FOREIGN KEY `fk_template_s3_ref__template_id` (`template_id`) REFERENCES `vm_template` (`id`), - INDEX `i_template_s3_ref__s3_id`(`s3_id`), - INDEX `i_template_s3_ref__template_id`(`template_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - CREATE TABLE `cloud`.`op_host_transfer` ( `id` bigint unsigned UNIQUE NOT NULL COMMENT 'Id of the host', `initial_mgmt_server_id` bigint unsigned COMMENT 'management server the host is transfered from', @@ -2128,6 +2081,7 @@ CREATE TABLE `cloud`.`external_load_balancer_devices` ( `device_state` varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'state (enabled/disabled/shutdown) of the device', `allocation_state` varchar(32) NOT NULL DEFAULT 'Free' COMMENT 'Allocation state (Free/Shared/Dedicated/Provider) of the device', `is_dedicated` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if device/appliance is provisioned for dedicated use only', + `is_inline` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if load balancer will be used in in-line configuration with firewall', `is_managed` int(1) unsigned NOT NULL DEFAULT 0 COMMENT '1 if load balancer appliance is provisioned and its life cycle is managed by by cloudstack', `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external load balancer device', `parent_host_id` bigint unsigned COMMENT 'if the load balancer appliance is cloudstack managed, then host id on which this appliance is provisioned', @@ -2304,16 +2258,6 @@ CREATE TABLE `cloud`.`netscaler_pod_ref` ( CONSTRAINT `fk_ns_pod_ref__device_id` FOREIGN KEY (`external_load_balancer_device_id`) REFERENCES `external_load_balancer_devices`(`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`region` ( - `id` int unsigned NOT NULL UNIQUE, - `name` varchar(255) NOT NULL UNIQUE, - `end_point` varchar(255) NOT NULL, - `api_key` varchar(255), - `secret_key` varchar(255), - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - CREATE TABLE `cloud`.`vpc` ( `id` bigint unsigned NOT NULL auto_increment COMMENT 'id', `uuid` varchar(40) NOT NULL, @@ -2475,211 +2419,5 @@ CREATE TABLE `cloud`.`nicira_nvp_nic_map` ( PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -CREATE TABLE `cloud`.`nicira_nvp_router_map` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `logicalrouter_uuid` varchar(255) NOT NULL UNIQUE COMMENT 'nicira uuid of logical router', - `network_id` bigint unsigned NOT NULL UNIQUE COMMENT 'cloudstack id of the network', - PRIMARY KEY (`id`), - CONSTRAINT `fk_nicira_nvp_router_map__network_id` FOREIGN KEY (`network_id`) REFERENCES `networks`(`id`) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`external_bigswitch_vns_devices` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `uuid` varchar(255) UNIQUE, - `physical_network_id` bigint unsigned NOT NULL COMMENT 'id of the physical network in to which bigswitch vns device is added', - `provider_name` varchar(255) NOT NULL COMMENT 'Service Provider name corresponding to this bigswitch vns device', - `device_name` varchar(255) NOT NULL COMMENT 'name of the bigswitch vns device', - `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external bigswitch vns device', - PRIMARY KEY (`id`), - CONSTRAINT `fk_external_bigswitch_vns_devices__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_external_bigswitch_vns_devices__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`counter` ( - `id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT COMMENT 'id', - `uuid` varchar(40), - `source` varchar(255) NOT NULL COMMENT 'source e.g. netscaler, snmp', - `name` varchar(255) NOT NULL COMMENT 'Counter name', - `value` varchar(255) NOT NULL COMMENT 'Value in case of source=snmp', - `removed` datetime COMMENT 'date removed if not null', - `created` datetime NOT NULL COMMENT 'date created', - PRIMARY KEY (`id`), - CONSTRAINT `uc_counter__uuid` UNIQUE (`uuid`), - INDEX `i_counter__removed`(`removed`), - INDEX `i_counter__source`(`source`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`conditions` ( - `id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT COMMENT 'id', - `uuid` varchar(40), - `counter_id` bigint unsigned NOT NULL COMMENT 'Counter Id', - `threshold` bigint unsigned NOT NULL COMMENT 'threshold value for the given counter', - `relational_operator` char(2) COMMENT 'relational operator to be used upon the counter and condition', - `domain_id` bigint unsigned NOT NULL COMMENT 'domain the Condition belongs to', - `account_id` bigint unsigned NOT NULL COMMENT 'owner of this Condition', - `removed` datetime COMMENT 'date removed if not null', - `created` datetime NOT NULL COMMENT 'date created', - PRIMARY KEY (`id`), - CONSTRAINT `fk_conditions__counter_id` FOREIGN KEY `fk_condition__counter_id`(`counter_id`) REFERENCES `counter`(`id`), - CONSTRAINT `fk_conditions__account_id` FOREIGN KEY `fk_condition__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_conditions__domain_id` FOREIGN KEY `fk_condition__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, - CONSTRAINT `uc_conditions__uuid` UNIQUE (`uuid`), - INDEX `i_conditions__removed`(`removed`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`autoscale_vmprofiles` ( - `id` bigint unsigned NOT NULL auto_increment, - `uuid` varchar(40), - `zone_id` bigint unsigned NOT NULL, - `domain_id` bigint unsigned NOT NULL, - `account_id` bigint unsigned NOT NULL, - `autoscale_user_id` bigint unsigned NOT NULL, - `service_offering_id` bigint unsigned NOT NULL, - `template_id` bigint unsigned NOT NULL, - `other_deploy_params` varchar(1024) COMMENT 'other deployment parameters that is in addition to zoneid,serviceofferingid,domainid', - `destroy_vm_grace_period` int unsigned COMMENT 'the time allowed for existing connections to get closed before a vm is destroyed', - `counter_params` varchar(1024) COMMENT 'the parameters for the counter to be used to get metric information from VMs', - `created` datetime NOT NULL COMMENT 'date created', - `removed` datetime COMMENT 'date removed if not null', - PRIMARY KEY (`id`), - CONSTRAINT `fk_autoscale_vmprofiles__domain_id` FOREIGN KEY `fk_autoscale_vmprofiles__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_autoscale_vmprofiles__account_id` FOREIGN KEY `fk_autoscale_vmprofiles__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_autoscale_vmprofiles__autoscale_user_id` FOREIGN KEY `fk_autoscale_vmprofiles__autoscale_user_id` (`autoscale_user_id`) REFERENCES `user`(`id`) ON DELETE CASCADE, - CONSTRAINT `uc_autoscale_vmprofiles__uuid` UNIQUE (`uuid`), - INDEX `i_autoscale_vmprofiles__removed`(`removed`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`autoscale_policies` ( - `id` bigint unsigned NOT NULL auto_increment, - `uuid` varchar(40), - `domain_id` bigint unsigned NOT NULL, - `account_id` bigint unsigned NOT NULL, - `duration` int unsigned NOT NULL, - `quiet_time` int unsigned NOT NULL, - `action` varchar(15), - `created` datetime NOT NULL COMMENT 'date created', - `removed` datetime COMMENT 'date removed if not null', - PRIMARY KEY (`id`), - CONSTRAINT `fk_autoscale_policies__domain_id` FOREIGN KEY `fk_autoscale_policies__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_autoscale_policies__account_id` FOREIGN KEY `fk_autoscale_policies__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, - CONSTRAINT `uc_autoscale_policies__uuid` UNIQUE (`uuid`), - INDEX `i_autoscale_policies__removed`(`removed`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`autoscale_vmgroups` ( - `id` bigint unsigned NOT NULL auto_increment, - `uuid` varchar(40), - `zone_id` bigint unsigned NOT NULL, - `domain_id` bigint unsigned NOT NULL, - `account_id` bigint unsigned NOT NULL, - `load_balancer_id` bigint unsigned NOT NULL, - `min_members` int unsigned DEFAULT 1, - `max_members` int unsigned NOT NULL, - `member_port` int unsigned NOT NULL, - `interval` int unsigned NOT NULL, - `profile_id` bigint unsigned NOT NULL, - `state` varchar(255) NOT NULL COMMENT 'enabled or disabled, a vmgroup is disabled to stop autoscaling activity', - `created` datetime NOT NULL COMMENT 'date created', - `removed` datetime COMMENT 'date removed if not null', - PRIMARY KEY (`id`), - CONSTRAINT `fk_autoscale_vmgroup__autoscale_vmprofile_id` FOREIGN KEY(`profile_id`) REFERENCES `autoscale_vmprofiles`(`id`), - CONSTRAINT `fk_autoscale_vmgroup__load_balancer_id` FOREIGN KEY(`load_balancer_id`) REFERENCES `load_balancing_rules`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_autoscale_vmgroups__domain_id` FOREIGN KEY `fk_autoscale_vmgroups__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_autoscale_vmgroups__account_id` FOREIGN KEY `fk_autoscale_vmgroups__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_autoscale_vmgroups__zone_id` FOREIGN KEY `fk_autoscale_vmgroups__zone_id`(`zone_id`) REFERENCES `data_center`(`id`), - CONSTRAINT `uc_autoscale_vmgroups__uuid` UNIQUE (`uuid`), - INDEX `i_autoscale_vmgroups__removed`(`removed`), - INDEX `i_autoscale_vmgroups__load_balancer_id`(`load_balancer_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`autoscale_policy_condition_map` ( - `id` bigint unsigned NOT NULL auto_increment, - `policy_id` bigint unsigned NOT NULL, - `condition_id` bigint unsigned NOT NULL, - PRIMARY KEY (`id`), - CONSTRAINT `fk_autoscale_policy_condition_map__policy_id` FOREIGN KEY `fk_autoscale_policy_condition_map__policy_id` (`policy_id`) REFERENCES `autoscale_policies` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_autoscale_policy_condition_map__condition_id` FOREIGN KEY `fk_autoscale_policy_condition_map__condition_id` (`condition_id`) REFERENCES `conditions` (`id`), - INDEX `i_autoscale_policy_condition_map__policy_id`(`policy_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`autoscale_vmgroup_policy_map` ( - `id` bigint unsigned NOT NULL auto_increment, - `vmgroup_id` bigint unsigned NOT NULL, - `policy_id` bigint unsigned NOT NULL, - PRIMARY KEY (`id`), - CONSTRAINT `fk_autoscale_vmgroup_policy_map__vmgroup_id` FOREIGN KEY `fk_autoscale_vmgroup_policy_map__vmgroup_id` (`vmgroup_id`) REFERENCES `autoscale_vmgroups` (`id`) ON DELETE CASCADE, - CONSTRAINT `fk_autoscale_vmgroup_policy_map__policy_id` FOREIGN KEY `fk_autoscale_vmgroup_policy_map__policy_id` (`policy_id`) REFERENCES `autoscale_policies` (`id`), - INDEX `i_autoscale_vmgroup_policy_map__vmgroup_id`(`vmgroup_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `cloud`.`region_sync` ( - `id` bigint unsigned NOT NULL auto_increment, - `region_id` int unsigned NOT NULL, - `api` varchar(1024) NOT NULL, - `created` datetime NOT NULL COMMENT 'date created', - `processed` tinyint NOT NULL default '0', - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (1, UUID(), 'snmp','Linux User CPU - percentage', '1.3.6.1.4.1.2021.11.9.0', now()); -INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (2, UUID(), 'snmp','Linux System CPU - percentage', '1.3.6.1.4.1.2021.11.10.0', now()); -INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (3, UUID(), 'snmp','Linux CPU Idle - percentage', '1.3.6.1.4.1.2021.11.11.0', now()); -INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (100, UUID(), 'netscaler','Response Time - microseconds', 'RESPTIME', now()); -CREATE TABLE `cloud`.`vm_snapshots` ( - `id` bigint(20) unsigned NOT NULL auto_increment COMMENT 'Primary Key', - `uuid` varchar(40) NOT NULL, - `name` varchar(255) NOT NULL, - `display_name` varchar(255) default NULL, - `description` varchar(255) default NULL, - `vm_id` bigint(20) unsigned NOT NULL, - `account_id` bigint(20) unsigned NOT NULL, - `domain_id` bigint(20) unsigned NOT NULL, - `vm_snapshot_type` varchar(32) default NULL, - `state` varchar(32) NOT NULL, - `parent` bigint unsigned default NULL, - `current` int(1) unsigned default NULL, - `update_count` bigint unsigned NOT NULL DEFAULT 0, - `updated` datetime default NULL, - `created` datetime default NULL, - `removed` datetime default NULL, - PRIMARY KEY (`id`), - CONSTRAINT UNIQUE KEY `uc_vm_snapshots_uuid` (`uuid`), - INDEX `vm_snapshots_name` (`name`), - INDEX `vm_snapshots_vm_id` (`vm_id`), - INDEX `vm_snapshots_account_id` (`account_id`), - INDEX `vm_snapshots_display_name` (`display_name`), - INDEX `vm_snapshots_removed` (`removed`), - INDEX `vm_snapshots_parent` (`parent`), - CONSTRAINT `fk_vm_snapshots_vm_id__vm_instance_id` FOREIGN KEY `fk_vm_snapshots_vm_id__vm_instance_id` (`vm_id`) REFERENCES `vm_instance` (`id`), - CONSTRAINT `fk_vm_snapshots_account_id__account_id` FOREIGN KEY `fk_vm_snapshots_account_id__account_id` (`account_id`) REFERENCES `account` (`id`), - CONSTRAINT `fk_vm_snapshots_domain_id__domain_id` FOREIGN KEY `fk_vm_snapshots_domain_id__domain_id` (`domain_id`) REFERENCES `domain` (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - - -CREATE TABLE `cloud`.`user_ipv6_address` ( - `id` bigint unsigned NOT NULL UNIQUE auto_increment, - `uuid` varchar(40), - `account_id` bigint unsigned NULL, - `domain_id` bigint unsigned NULL, - `ip_address` char(50) NOT NULL, - `data_center_id` bigint unsigned NOT NULL COMMENT 'zone that it belongs to', - `vlan_id` bigint unsigned NOT NULL, - `state` char(32) NOT NULL default 'Free' COMMENT 'state of the ip address', - `mac_address` varchar(40) NOT NULL COMMENT 'mac address of this ip', - `source_network_id` bigint unsigned NOT NULL COMMENT 'network id ip belongs to', - `network_id` bigint unsigned COMMENT 'network this public ip address is associated with', - `physical_network_id` bigint unsigned NOT NULL COMMENT 'physical network id that this configuration is based on', - `created` datetime NULL COMMENT 'Date this ip was allocated to someone', - PRIMARY KEY (`id`), - UNIQUE (`ip_address`, `source_network_id`), - CONSTRAINT `fk_user_ipv6_address__source_network_id` FOREIGN KEY (`source_network_id`) REFERENCES `networks`(`id`), - CONSTRAINT `fk_user_ipv6_address__network_id` FOREIGN KEY (`network_id`) REFERENCES `networks`(`id`), - CONSTRAINT `fk_user_ipv6_address__account_id` FOREIGN KEY (`account_id`) REFERENCES `account`(`id`), - CONSTRAINT `fk_user_ipv6_address__vlan_id` FOREIGN KEY (`vlan_id`) REFERENCES `vlan`(`id`) ON DELETE CASCADE, - CONSTRAINT `fk_user_ipv6_address__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `data_center`(`id`) ON DELETE CASCADE, - CONSTRAINT `uc_user_ipv6_address__uuid` UNIQUE (`uuid`), - CONSTRAINT `fk_user_ipv6_address__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - SET foreign_key_checks = 1; diff --git a/setup/db/db/schema-40to410-cleanup.sql b/setup/db/db/schema-40to410-cleanup.sql new file mode 100644 index 00000000000..411b568de4a --- /dev/null +++ b/setup/db/db/schema-40to410-cleanup.sql @@ -0,0 +1,21 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema cleanup from 4.0.0 to 4.1.0; +--; + diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql index 7f0044127f6..b9bfe1aae4f 100644 --- a/setup/db/db/schema-40to410.sql +++ b/setup/db/db/schema-40to410.sql @@ -19,6 +19,93 @@ -- Schema upgrade from 4.0.0 to 4.1.0; --; +use cloud; +SET foreign_key_checks = 0; + +alter table vm_template add size bigint unsigned; +alter table vm_template add state varchar(255); +alter table vm_template add update_count bigint unsigned; +alter table vm_template add updated datetime; +alter table storage_pool add storage_provider_id bigint unsigned; +alter table storage_pool add scope varchar(255); +alter table storage_pool modify id bigint unsigned AUTO_INCREMENT UNIQUE NOT NULL; +alter table template_spool_ref add state varchar(255); +alter table template_spool_ref add update_count bigint unsigned; +alter table volumes add disk_type varchar(255); +alter table volumes drop foreign key `fk_volumes__account_id`; +alter table vm_instance add column disk_offering_id bigint unsigned; +alter table vm_instance add column cpu int(10) unsigned; +alter table vm_instance add column ram bigint unsigned; +alter table vm_instance add column owner varchar(255); +alter table vm_instance add column speed int(10) unsigned; +alter table vm_instance add column host_name varchar(255); +alter table vm_instance add column display_name varchar(255); +alter table vm_instance add column `desired_state` varchar(32) NULL; + +alter table data_center add column owner varchar(255); +alter table data_center add column created datetime COMMENT 'date created'; +alter table data_center add column lastUpdated datetime COMMENT 'last updated'; +alter table data_center add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; +alter table host_pod_ref add column owner varchar(255); +alter table host_pod_ref add column created datetime COMMENT 'date created'; +alter table host_pod_ref add column lastUpdated datetime COMMENT 'last updated'; +alter table host_pod_ref add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; +alter table host add column owner varchar(255); +alter table host add column lastUpdated datetime COMMENT 'last updated'; +alter table host add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; + +alter table cluster add column owner varchar(255); +alter table cluster add column created datetime COMMENT 'date created'; +alter table cluster add column lastUpdated datetime COMMENT 'last updated'; +alter table cluster add column engine_state varchar(32) NOT NULL DEFAULT 'Disabled' COMMENT 'the engine state of the zone'; + +CREATE TABLE `cloud`.`vm_compute_tags` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', + `compute_tag` varchar(255) NOT NULL COMMENT 'name of tag', + PRIMARY KEY(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`vm_root_disk_tags` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', + `root_disk_tag` varchar(255) NOT NULL COMMENT 'name of tag', + PRIMARY KEY(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +CREATE TABLE `cloud`.`vm_network_map` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', + `network_id` bigint unsigned NOT NULL COMMENT 'network id', + PRIMARY KEY(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +CREATE TABLE `cloud`.`vm_reservation` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `uuid` varchar(40) NOT NULL COMMENT 'reservation id', + `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', + `data_center_id` bigint unsigned NOT NULL COMMENT 'zone id', + `pod_id` bigint unsigned NOT NULL COMMENT 'pod id', + `cluster_id` bigint unsigned NOT NULL COMMENT 'cluster id', + `host_id` bigint unsigned NOT NULL COMMENT 'host id', + `created` datetime COMMENT 'date created', + `removed` datetime COMMENT 'date removed if not null', + CONSTRAINT `uc_vm_reservation__uuid` UNIQUE (`uuid`), + PRIMARY KEY(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`volume_reservation` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `vm_reservation_id` bigint unsigned NOT NULL COMMENT 'id of the vm reservation', + `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', + `volume_id` bigint unsigned NOT NULL COMMENT 'volume id', + `pool_id` bigint unsigned NOT NULL COMMENT 'pool assigned to the volume', + CONSTRAINT `fk_vm_pool_reservation__vm_reservation_id` FOREIGN KEY (`vm_reservation_id`) REFERENCES `vm_reservation`(`id`) ON DELETE CASCADE, + PRIMARY KEY(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + CREATE TABLE `cloud`.`s3` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', `uuid` varchar(40), @@ -46,7 +133,7 @@ CREATE TABLE `cloud`.`template_s3_ref` ( CONSTRAINT `uc_template_s3_ref__template_id` UNIQUE (`template_id`), CONSTRAINT `fk_template_s3_ref__s3_id` FOREIGN KEY `fk_template_s3_ref__s3_id` (`s3_id`) REFERENCES `s3` (`id`) ON DELETE CASCADE, CONSTRAINT `fk_template_s3_ref__template_id` FOREIGN KEY `fk_template_s3_ref__template_id` (`template_id`) REFERENCES `vm_template` (`id`), - INDEX `i_template_s3_ref__swift_id`(`s3_id`), + INDEX `i_template_s3_ref__s3_id`(`s3_id`), INDEX `i_template_s3_ref__template_id`(`template_id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -64,11 +151,19 @@ ALTER TABLE `cloud`.`external_load_balancer_devices` DROP COLUMN `is_inline`; INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Network','DEFAULT','NetworkManager','network.dhcp.nondefaultnetwork.setgateway.guestos','Windows','The guest OS\'s name start with this fields would result in DHCP server response gateway information even when the network it\'s on is not default network. Names are separated by comma.'); -ALTER TABLE `sync_queue` ADD `queue_size` SMALLINT NOT NULL DEFAULT '0' COMMENT 'number of items being processed by the queue'; +ALTER TABLE `cloud`.`sync_queue` ADD `queue_size` SMALLINT NOT NULL DEFAULT '0' COMMENT 'number of items being processed by the queue'; -ALTER TABLE `sync_queue` ADD `queue_size_limit` SMALLINT NOT NULL DEFAULT '1' COMMENT 'max number of items the queue can process concurrently'; +ALTER TABLE `cloud`.`sync_queue` ADD `queue_size_limit` SMALLINT NOT NULL DEFAULT '1' COMMENT 'max number of items the queue can process concurrently'; -ALTER TABLE `sync_queue_item` ADD `queue_proc_time` DATETIME NOT NULL COMMENT 'when processing started for the item' AFTER `queue_proc_number`; +ALTER TABLE `cloud`.`sync_queue` DROP INDEX `i_sync_queue__queue_proc_time`; + +ALTER TABLE `cloud`.`sync_queue` DROP COLUMN `queue_proc_time`; + +ALTER TABLE `cloud`.`sync_queue` DROP COLUMN `queue_proc_msid`; + +ALTER TABLE `cloud`.`sync_queue_item` ADD `queue_proc_time` DATETIME COMMENT 'when processing started for the item' AFTER `queue_proc_number`; + +ALTER TABLE `cloud`.`sync_queue_item` ADD INDEX `i_sync_queue__queue_proc_time`(`queue_proc_time`); ALTER TABLE `cloud`.`inline_load_balancer_nic_map` DROP FOREIGN KEY fk_inline_load_balancer_nic_map__load_balancer_id; @@ -128,7 +223,7 @@ UPDATE `cloud`.`swift` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`upload` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`user` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`user_ip_address` set uuid=id WHERE uuid is NULL; -UPDATE `cloud`.`user_vm_temp` set uuid=id WHERE uuid is NULL; +-- UPDATE `cloud`.`user_vm_temp` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`virtual_router_providers` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`virtual_supervisor_module` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`vlan` set uuid=id WHERE uuid is NULL; @@ -139,17 +234,260 @@ UPDATE `cloud`.`vpc_gateways` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`vpc_offerings` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`vpn_users` set uuid=id WHERE uuid is NULL; UPDATE `cloud`.`volumes` set uuid=id WHERE uuid is NULL; -UPDATE `cloud`.`autoscale_vmgroups` set uuid=id WHERE uuid is NULL; -UPDATE `cloud`.`autoscale_vmprofiles` set uuid=id WHERE uuid is NULL; -UPDATE `cloud`.`autoscale_policies` set uuid=id WHERE uuid is NULL; -UPDATE `cloud`.`counter` set uuid=id WHERE uuid is NULL; -UPDATE `cloud`.`conditions` set uuid=id WHERE uuid is NULL; +-- UPDATE `cloud`.`autoscale_vmgroups` set uuid=id WHERE uuid is NULL; +-- UPDATE `cloud`.`autoscale_vmprofiles` set uuid=id WHERE uuid is NULL; +-- UPDATE `cloud`.`autoscale_policies` set uuid=id WHERE uuid is NULL; +-- UPDATE `cloud`.`counter` set uuid=id WHERE uuid is NULL; +-- UPDATE `cloud`.`conditions` set uuid=id WHERE uuid is NULL; INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'detail.batch.query.size', '2000', 'Default entity detail batch query size for listing'); +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'api.throttling.enabled', 'false', 'Enable/Disable Api rate limit'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'api.throttling.interval', '1', 'Time interval (in seconds) to reset API count'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'api.throttling.max', '25', 'Max allowed number of APIs within fixed interval'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'api.throttling.cachesize', '50000', 'Account based API count cache size'); +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'direct.agent.pool.size', '500', 'Default size for DirectAgentPool'); + +ALTER TABLE `cloud`.`op_dc_vnet_alloc` DROP INDEX i_op_dc_vnet_alloc__vnet__data_center_id; + +ALTER TABLE `cloud`.`op_dc_vnet_alloc` ADD CONSTRAINT UNIQUE `i_op_dc_vnet_alloc__vnet__data_center_id`(`vnet`, `physical_network_id`, `data_center_id`); + +ALTER TABLE `cloud`.`op_dc_vnet_alloc` DROP INDEX i_op_dc_vnet_alloc__vnet__data_center_id__account_id; + +CREATE TABLE `cloud`.`region` ( + `id` int unsigned NOT NULL UNIQUE, + `name` varchar(255) NOT NULL UNIQUE, + `end_point` varchar(255) NOT NULL, + `api_key` varchar(255), + `secret_key` varchar(255), + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +INSERT INTO `cloud`.`region` values ('1','Local','http://localhost:8080/client/api','',''); + +ALTER TABLE `cloud`.`account` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; +ALTER TABLE `cloud`.`user` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; +ALTER TABLE `cloud`.`domain` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; + +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Account Defaults', 'DEFAULT', 'management-server', 'max.account.cpus', '40', 'The default maximum number of cpu cores that can be used for an account'); + +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Account Defaults', 'DEFAULT', 'management-server', 'max.account.memory', '40960', 'The default maximum memory (in MB) that can be used for an account'); + +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Project Defaults', 'DEFAULT', 'management-server', 'max.project.cpus', '40', 'The default maximum number of cpu cores that can be used for a project'); + +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Project Defaults', 'DEFAULT', 'management-server', 'max.project.memory', '40960', 'The default maximum memory (in MB) that can be used for a project'); + +ALTER TABLE `cloud_usage`.`account` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; + +CREATE TABLE `cloud`.`nicira_nvp_router_map` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `logicalrouter_uuid` varchar(255) NOT NULL UNIQUE COMMENT 'nicira uuid of logical router', + `network_id` bigint unsigned NOT NULL UNIQUE COMMENT 'cloudstack id of the network', + PRIMARY KEY (`id`), + CONSTRAINT `fk_nicira_nvp_router_map__network_id` FOREIGN KEY (`network_id`) REFERENCES `networks`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`external_bigswitch_vns_devices` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `uuid` varchar(255) UNIQUE, + `physical_network_id` bigint unsigned NOT NULL COMMENT 'id of the physical network in to which bigswitch vns device is added', + `provider_name` varchar(255) NOT NULL COMMENT 'Service Provider name corresponding to this bigswitch vns device', + `device_name` varchar(255) NOT NULL COMMENT 'name of the bigswitch vns device', + `host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external bigswitch vns device', + PRIMARY KEY (`id`), + CONSTRAINT `fk_external_bigswitch_vns_devices__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_external_bigswitch_vns_devices__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`counter` ( + `id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT COMMENT 'id', + `uuid` varchar(40), + `source` varchar(255) NOT NULL COMMENT 'source e.g. netscaler, snmp', + `name` varchar(255) NOT NULL COMMENT 'Counter name', + `value` varchar(255) NOT NULL COMMENT 'Value in case of source=snmp', + `removed` datetime COMMENT 'date removed if not null', + `created` datetime NOT NULL COMMENT 'date created', + PRIMARY KEY (`id`), + CONSTRAINT `uc_counter__uuid` UNIQUE (`uuid`), + INDEX `i_counter__removed`(`removed`), + INDEX `i_counter__source`(`source`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`conditions` ( + `id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT COMMENT 'id', + `uuid` varchar(40), + `counter_id` bigint unsigned NOT NULL COMMENT 'Counter Id', + `threshold` bigint unsigned NOT NULL COMMENT 'threshold value for the given counter', + `relational_operator` char(2) COMMENT 'relational operator to be used upon the counter and condition', + `domain_id` bigint unsigned NOT NULL COMMENT 'domain the Condition belongs to', + `account_id` bigint unsigned NOT NULL COMMENT 'owner of this Condition', + `removed` datetime COMMENT 'date removed if not null', + `created` datetime NOT NULL COMMENT 'date created', + PRIMARY KEY (`id`), + CONSTRAINT `fk_conditions__counter_id` FOREIGN KEY `fk_condition__counter_id`(`counter_id`) REFERENCES `counter`(`id`), + CONSTRAINT `fk_conditions__account_id` FOREIGN KEY `fk_condition__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_conditions__domain_id` FOREIGN KEY `fk_condition__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, + CONSTRAINT `uc_conditions__uuid` UNIQUE (`uuid`), + INDEX `i_conditions__removed`(`removed`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`autoscale_vmprofiles` ( + `id` bigint unsigned NOT NULL auto_increment, + `uuid` varchar(40), + `zone_id` bigint unsigned NOT NULL, + `domain_id` bigint unsigned NOT NULL, + `account_id` bigint unsigned NOT NULL, + `autoscale_user_id` bigint unsigned NOT NULL, + `service_offering_id` bigint unsigned NOT NULL, + `template_id` bigint unsigned NOT NULL, + `other_deploy_params` varchar(1024) COMMENT 'other deployment parameters that is in addition to zoneid,serviceofferingid,domainid', + `destroy_vm_grace_period` int unsigned COMMENT 'the time allowed for existing connections to get closed before a vm is destroyed', + `counter_params` varchar(1024) COMMENT 'the parameters for the counter to be used to get metric information from VMs', + `created` datetime NOT NULL COMMENT 'date created', + `removed` datetime COMMENT 'date removed if not null', + PRIMARY KEY (`id`), + CONSTRAINT `fk_autoscale_vmprofiles__domain_id` FOREIGN KEY `fk_autoscale_vmprofiles__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_autoscale_vmprofiles__account_id` FOREIGN KEY `fk_autoscale_vmprofiles__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_autoscale_vmprofiles__autoscale_user_id` FOREIGN KEY `fk_autoscale_vmprofiles__autoscale_user_id` (`autoscale_user_id`) REFERENCES `user`(`id`) ON DELETE CASCADE, + CONSTRAINT `uc_autoscale_vmprofiles__uuid` UNIQUE (`uuid`), + INDEX `i_autoscale_vmprofiles__removed`(`removed`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`autoscale_policies` ( + `id` bigint unsigned NOT NULL auto_increment, + `uuid` varchar(40), + `domain_id` bigint unsigned NOT NULL, + `account_id` bigint unsigned NOT NULL, + `duration` int unsigned NOT NULL, + `quiet_time` int unsigned NOT NULL, + `action` varchar(15), + `created` datetime NOT NULL COMMENT 'date created', + `removed` datetime COMMENT 'date removed if not null', + PRIMARY KEY (`id`), + CONSTRAINT `fk_autoscale_policies__domain_id` FOREIGN KEY `fk_autoscale_policies__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_autoscale_policies__account_id` FOREIGN KEY `fk_autoscale_policies__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, + CONSTRAINT `uc_autoscale_policies__uuid` UNIQUE (`uuid`), + INDEX `i_autoscale_policies__removed`(`removed`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`autoscale_vmgroups` ( + `id` bigint unsigned NOT NULL auto_increment, + `uuid` varchar(40), + `zone_id` bigint unsigned NOT NULL, + `domain_id` bigint unsigned NOT NULL, + `account_id` bigint unsigned NOT NULL, + `load_balancer_id` bigint unsigned NOT NULL, + `min_members` int unsigned DEFAULT 1, + `max_members` int unsigned NOT NULL, + `member_port` int unsigned NOT NULL, + `interval` int unsigned NOT NULL, + `profile_id` bigint unsigned NOT NULL, + `state` varchar(255) NOT NULL COMMENT 'enabled or disabled, a vmgroup is disabled to stop autoscaling activity', + `created` datetime NOT NULL COMMENT 'date created', + `removed` datetime COMMENT 'date removed if not null', + PRIMARY KEY (`id`), + CONSTRAINT `fk_autoscale_vmgroup__autoscale_vmprofile_id` FOREIGN KEY(`profile_id`) REFERENCES `autoscale_vmprofiles`(`id`), + CONSTRAINT `fk_autoscale_vmgroup__load_balancer_id` FOREIGN KEY(`load_balancer_id`) REFERENCES `load_balancing_rules`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_autoscale_vmgroups__domain_id` FOREIGN KEY `fk_autoscale_vmgroups__domain_id` (`domain_id`) REFERENCES `domain`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_autoscale_vmgroups__account_id` FOREIGN KEY `fk_autoscale_vmgroups__account_id` (`account_id`) REFERENCES `account`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_autoscale_vmgroups__zone_id` FOREIGN KEY `fk_autoscale_vmgroups__zone_id`(`zone_id`) REFERENCES `data_center`(`id`), + CONSTRAINT `uc_autoscale_vmgroups__uuid` UNIQUE (`uuid`), + INDEX `i_autoscale_vmgroups__removed`(`removed`), + INDEX `i_autoscale_vmgroups__load_balancer_id`(`load_balancer_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`autoscale_policy_condition_map` ( + `id` bigint unsigned NOT NULL auto_increment, + `policy_id` bigint unsigned NOT NULL, + `condition_id` bigint unsigned NOT NULL, + PRIMARY KEY (`id`), + CONSTRAINT `fk_autoscale_policy_condition_map__policy_id` FOREIGN KEY `fk_autoscale_policy_condition_map__policy_id` (`policy_id`) REFERENCES `autoscale_policies` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_autoscale_policy_condition_map__condition_id` FOREIGN KEY `fk_autoscale_policy_condition_map__condition_id` (`condition_id`) REFERENCES `conditions` (`id`), + INDEX `i_autoscale_policy_condition_map__policy_id`(`policy_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`autoscale_vmgroup_policy_map` ( + `id` bigint unsigned NOT NULL auto_increment, + `vmgroup_id` bigint unsigned NOT NULL, + `policy_id` bigint unsigned NOT NULL, + PRIMARY KEY (`id`), + CONSTRAINT `fk_autoscale_vmgroup_policy_map__vmgroup_id` FOREIGN KEY `fk_autoscale_vmgroup_policy_map__vmgroup_id` (`vmgroup_id`) REFERENCES `autoscale_vmgroups` (`id`) ON DELETE CASCADE, + CONSTRAINT `fk_autoscale_vmgroup_policy_map__policy_id` FOREIGN KEY `fk_autoscale_vmgroup_policy_map__policy_id` (`policy_id`) REFERENCES `autoscale_policies` (`id`), + INDEX `i_autoscale_vmgroup_policy_map__vmgroup_id`(`vmgroup_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (1, UUID(), 'snmp','Linux User CPU - percentage', '1.3.6.1.4.1.2021.11.9.0', now()); +INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (2, UUID(), 'snmp','Linux System CPU - percentage', '1.3.6.1.4.1.2021.11.10.0', now()); +INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (3, UUID(), 'snmp','Linux CPU Idle - percentage', '1.3.6.1.4.1.2021.11.11.0', now()); +INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (100, UUID(), 'netscaler','Response Time - microseconds', 'RESPTIME', now()); +CREATE TABLE `cloud`.`vm_snapshots` ( + `id` bigint(20) unsigned NOT NULL auto_increment COMMENT 'Primary Key', + `uuid` varchar(40) NOT NULL, + `name` varchar(255) NOT NULL, + `display_name` varchar(255) default NULL, + `description` varchar(255) default NULL, + `vm_id` bigint(20) unsigned NOT NULL, + `account_id` bigint(20) unsigned NOT NULL, + `domain_id` bigint(20) unsigned NOT NULL, + `vm_snapshot_type` varchar(32) default NULL, + `state` varchar(32) NOT NULL, + `parent` bigint unsigned default NULL, + `current` int(1) unsigned default NULL, + `update_count` bigint unsigned NOT NULL DEFAULT 0, + `updated` datetime default NULL, + `created` datetime default NULL, + `removed` datetime default NULL, + PRIMARY KEY (`id`), + CONSTRAINT UNIQUE KEY `uc_vm_snapshots_uuid` (`uuid`), + INDEX `vm_snapshots_name` (`name`), + INDEX `vm_snapshots_vm_id` (`vm_id`), + INDEX `vm_snapshots_account_id` (`account_id`), + INDEX `vm_snapshots_display_name` (`display_name`), + INDEX `vm_snapshots_removed` (`removed`), + INDEX `vm_snapshots_parent` (`parent`), + CONSTRAINT `fk_vm_snapshots_vm_id__vm_instance_id` FOREIGN KEY `fk_vm_snapshots_vm_id__vm_instance_id` (`vm_id`) REFERENCES `vm_instance` (`id`), + CONSTRAINT `fk_vm_snapshots_account_id__account_id` FOREIGN KEY `fk_vm_snapshots_account_id__account_id` (`account_id`) REFERENCES `account` (`id`), + CONSTRAINT `fk_vm_snapshots_domain_id__domain_id` FOREIGN KEY `fk_vm_snapshots_domain_id__domain_id` (`domain_id`) REFERENCES `domain` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`user_ipv6_address` ( + `id` bigint unsigned NOT NULL UNIQUE auto_increment, + `uuid` varchar(40), + `account_id` bigint unsigned NULL, + `domain_id` bigint unsigned NULL, + `ip_address` char(50) NOT NULL, + `data_center_id` bigint unsigned NOT NULL COMMENT 'zone that it belongs to', + `vlan_id` bigint unsigned NOT NULL, + `state` char(32) NOT NULL default 'Free' COMMENT 'state of the ip address', + `mac_address` varchar(40) NOT NULL COMMENT 'mac address of this ip', + `source_network_id` bigint unsigned NOT NULL COMMENT 'network id ip belongs to', + `network_id` bigint unsigned COMMENT 'network this public ip address is associated with', + `physical_network_id` bigint unsigned NOT NULL COMMENT 'physical network id that this configuration is based on', + `created` datetime NULL COMMENT 'Date this ip was allocated to someone', + PRIMARY KEY (`id`), + UNIQUE (`ip_address`, `source_network_id`), + CONSTRAINT `fk_user_ipv6_address__source_network_id` FOREIGN KEY (`source_network_id`) REFERENCES `networks`(`id`), + CONSTRAINT `fk_user_ipv6_address__network_id` FOREIGN KEY (`network_id`) REFERENCES `networks`(`id`), + CONSTRAINT `fk_user_ipv6_address__account_id` FOREIGN KEY (`account_id`) REFERENCES `account`(`id`), + CONSTRAINT `fk_user_ipv6_address__vlan_id` FOREIGN KEY (`vlan_id`) REFERENCES `vlan`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_user_ipv6_address__data_center_id` FOREIGN KEY (`data_center_id`) REFERENCES `data_center`(`id`) ON DELETE CASCADE, + CONSTRAINT `uc_user_ipv6_address__uuid` UNIQUE (`uuid`), + CONSTRAINT `fk_user_ipv6_address__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `cloud`.`networks` ADD COLUMN `ip6_gateway` varchar(50) COMMENT 'IPv6 gateway for this network'; +ALTER TABLE `cloud`.`networks` ADD COLUMN `ip6_cidr` varchar(50) COMMENT 'IPv6 cidr for this network'; + +ALTER TABLE `cloud`.`nics` ADD COLUMN `ip6_gateway` varchar(50) COMMENT 'gateway for ip6 address'; +ALTER TABLE `cloud`.`nics` ADD COLUMN `ip6_cidr` varchar(50) COMMENT 'cidr for ip6 address'; + +ALTER TABLE `cloud`.`vlan` ADD COLUMN `ip6_gateway` varchar(255); +ALTER TABLE `cloud`.`vlan` ADD COLUMN `ip6_cidr` varchar(255); +ALTER TABLE `cloud`.`vlan` ADD COLUMN `ip6_range` varchar(255); + +ALTER TABLE `cloud`.`data_center` ADD COLUMN `ip6_dns1` varchar(255); +ALTER TABLE `cloud`.`data_center` ADD COLUMN `ip6_dns2` varchar(255); -- DB views for list api @@ -229,6 +567,9 @@ CREATE VIEW `cloud`.`user_vm_view` AS nics.uuid nic_uuid, nics.network_id network_id, nics.ip4_address ip_address, + nics.ip6_address ip6_address, + nics.ip6_gateway ip6_gateway, + nics.ip6_cidr ip6_cidr, nics.default_nic is_default_nic, nics.gateway gateway, nics.netmask netmask, @@ -346,6 +687,8 @@ CREATE VIEW `cloud`.`domain_router_view` AS data_center.name data_center_name, data_center.dns1 dns1, data_center.dns2 dns2, + data_center.ip6_dns1 ip6_dns1, + data_center.ip6_dns2 ip6_dns2, host.id host_id, host.uuid host_uuid, host.name host_name, @@ -358,6 +701,9 @@ CREATE VIEW `cloud`.`domain_router_view` AS nics.uuid nic_uuid, nics.network_id network_id, nics.ip4_address ip_address, + nics.ip6_address ip6_address, + nics.ip6_gateway ip6_gateway, + nics.ip6_cidr ip6_cidr, nics.default_nic is_default_nic, nics.gateway gateway, nics.netmask netmask, @@ -1269,6 +1615,8 @@ CREATE VIEW `cloud`.`data_center_view` AS data_center.description, data_center.dns1, data_center.dns2, + data_center.ip6_dns1, + data_center.ip6_dns2, data_center.internal_dns1, data_center.internal_dns2, data_center.guest_network_cidr, @@ -1287,39 +1635,50 @@ CREATE VIEW `cloud`.`data_center_view` AS left join `cloud`.`domain` ON data_center.domain_id = domain.id; -INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'direct.agent.pool.size', '500', 'Default size for DirectAgentPool'); -ALTER TABLE `cloud`.`op_dc_vnet_alloc` DROP INDEX i_op_dc_vnet_alloc__vnet__data_center_id; - -ALTER TABLE `cloud`.`op_dc_vnet_alloc` ADD CONSTRAINT UNIQUE `i_op_dc_vnet_alloc__vnet__data_center_id`(`vnet`, `physical_network_id`, `data_center_id`); - -CREATE TABLE `cloud`.`region` ( - `id` int unsigned NOT NULL UNIQUE, - `name` varchar(255) NOT NULL UNIQUE, - `end_point` varchar(255) NOT NULL, - `api_key` varchar(255), - `secret_key` varchar(255), +CREATE TABLE `cloud`.`baremetal_dhcp_devices` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `uuid` varchar(40) UNIQUE, + `nsp_id` bigint unsigned DEFAULT NULL COMMENT 'Network Service Provider ID', + `pod_id` bigint unsigned DEFAULT NULL COMMENT 'Pod id where this dhcp server in', + `device_type` varchar(255) DEFAULT NULL COMMENT 'type of the external device', + `physical_network_id` bigint unsigned DEFAULT NULL COMMENT 'id of the physical network in to which external dhcp device is added', + `host_id` bigint unsigned DEFAULT NULL COMMENT 'host id coresponding to the external dhcp device', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -CREATE TABLE `cloud`.`region_sync` ( - `id` bigint unsigned NOT NULL auto_increment, - `region_id` int unsigned NOT NULL, - `api` varchar(1024) NOT NULL, - `created` datetime NOT NULL COMMENT 'date created', - `processed` tinyint NOT NULL default '0', +CREATE TABLE `cloud`.`baremetal_pxe_devices` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `uuid` varchar(40) UNIQUE, + `nsp_id` bigint unsigned DEFAULT NULL COMMENT 'Network Service Provider ID', + `pod_id` bigint unsigned DEFAULT NULL COMMENT 'Pod id where this pxe server in, for pxe per zone this field is null', + `device_type` varchar(255) DEFAULT NULL COMMENT 'type of the pxe device', + `physical_network_id` bigint unsigned DEFAULT NULL COMMENT 'id of the physical network in to which external pxe device is added', + `host_id` bigint unsigned DEFAULT NULL COMMENT 'host id coresponding to the external pxe device', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -INSERT INTO `cloud`.`region` values ('1','Local','http://localhost:8080/client/api','',''); -ALTER TABLE `cloud`.`account` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; -ALTER TABLE `cloud`.`user` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; -ALTER TABLE `cloud`.`domain` ADD COLUMN `region_id` int unsigned NOT NULL DEFAULT '1'; -INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Account Defaults', 'DEFAULT', 'management-server', 'max.account.cpus', '40', 'The default maximum number of cpu cores that can be used for an account'); +CREATE TABLE `cloud`.`ucs_blade` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `uuid` varchar(40) UNIQUE, + `ucs_manager_id` bigint unsigned NOT NULL, + `host_id` bigint unsigned DEFAULT NULL, + `dn` varchar(512) NOT NULL, + `profile_dn` varchar(512) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; -INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Account Defaults', 'DEFAULT', 'management-server', 'max.account.memory', '40960', 'The default maximum memory (in MB) that can be used for an account'); +CREATE TABLE `cloud`.`ucs_manager` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `uuid` varchar(40) UNIQUE, + `zone_id` bigint unsigned NOT NULL, + `name` varchar(128) DEFAULT NULL, + `url` varchar(255) NOT NULL, + `username` varchar(255) NOT NULL, + `password` varchar(255) NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; -INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Project Defaults', 'DEFAULT', 'management-server', 'max.project.cpus', '40', 'The default maximum number of cpu cores that can be used for a project'); -INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Project Defaults', 'DEFAULT', 'management-server', 'max.project.memory', '40960', 'The default maximum memory (in MB) that can be used for a project'); +SET foreign_key_checks = 1; diff --git a/setup/db/db/schema-410to420-cleanup.sql b/setup/db/db/schema-410to420-cleanup.sql new file mode 100644 index 00000000000..51970b21b89 --- /dev/null +++ b/setup/db/db/schema-410to420-cleanup.sql @@ -0,0 +1,21 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema cleanup from 4.1.0 to 4.2.0; +--; + diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql new file mode 100644 index 00000000000..ca15bdaf781 --- /dev/null +++ b/setup/db/db/schema-410to420.sql @@ -0,0 +1,187 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.1.0 to 4.2.0; +--; + +-- Disable foreign key checking +SET foreign_key_checks = 0; + +ALTER TABLE `cloud`.`hypervisor_capabilities` ADD COLUMN `max_hosts_per_cluster` int unsigned DEFAULT NULL COMMENT 'Max. hosts in cluster supported by hypervisor'; +UPDATE `cloud`.`hypervisor_capabilities` SET `max_hosts_per_cluster`=32 WHERE `hypervisor_type`='VMware'; +INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '5.1', 128, 0, 32); +DELETE FROM `cloud`.`configuration` where name='vmware.percluster.host.max'; +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'AgentManager', 'xen.nics.max', '7', 'Maximum allowed nics for Vms created on Xen'); + +alter table template_host_ref add state varchar(255); +alter table template_host_ref add update_count bigint unsigned; +alter table template_host_ref add updated datetime; +alter table volume_host_ref add state varchar(255); +alter table volume_host_ref add update_count bigint unsigned; +alter table volume_host_ref add updated datetime; +alter table template_spool_ref add updated datetime; +CREATE TABLE `cloud`.`object_datastore_ref` ( + `id` bigint unsigned NOT NULL auto_increment, + `datastore_uuid` varchar(255) NOT NULL, + `datastore_role` varchar(255) NOT NULL, + `object_uuid` varchar(255) NOT NULL, + `object_type` varchar(255) NOT NULL, + `created` DATETIME NOT NULL, + `last_updated` DATETIME, + `job_id` varchar(255), + `download_pct` int(10) unsigned, + `download_state` varchar(255), + `url` varchar(255), + `format` varchar(255), + `checksum` varchar(255), + `error_str` varchar(255), + `local_path` varchar(255), + `install_path` varchar(255), + `size` bigint unsigned COMMENT 'the size of the template on the pool', + `state` varchar(255) NOT NULL, + `update_count` bigint unsigned NOT NULL, + `updated` DATETIME, + PRIMARY KEY (`id`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`data_store_provider` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `name` varchar(255) NOT NULL COMMENT 'name of primary data store provider', + `uuid` varchar(255) NOT NULL COMMENT 'uuid of primary data store provider', + PRIMARY KEY(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +CREATE TABLE `cloud`.`image_data_store` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', + `name` varchar(255) NOT NULL COMMENT 'name of data store', + `image_provider_id` bigint unsigned NOT NULL COMMENT 'id of image_data_store_provider', + `protocol` varchar(255) NOT NULL COMMENT 'protocol of data store', + `data_center_id` bigint unsigned COMMENT 'datacenter id of data store', + `scope` varchar(255) COMMENT 'scope of data store', + `uuid` varchar(255) COMMENT 'uuid of data store', + PRIMARY KEY(`id`), + CONSTRAINT `fk_tags__image_data_store_provider_id` FOREIGN KEY(`image_provider_id`) REFERENCES `data_store_provider`(`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `cloud`.`vm_template` ADD COLUMN `image_data_store_id` bigint unsigned; + +ALTER TABLE `cloud`.`service_offering` ADD COLUMN `is_volatile` tinyint(1) unsigned NOT NULL DEFAULT 0 COMMENT 'true if the vm needs to be volatile, i.e., on every reboot of vm from API root disk is discarded and creates a new root disk'; + +ALTER TABLE `cloud`.`networks` ADD COLUMN `network_cidr` VARCHAR(18) COMMENT 'The network cidr for the isolated guest network which uses IP Reservation facility.For networks not using IP reservation, network_cidr is always null.'; +ALTER TABLE `cloud`.`networks` CHANGE `cidr` `cidr` varchar(18) COMMENT 'CloudStack managed vms get IP address from cidr.In general this cidr also serves as the network CIDR. But in case IP reservation feature is being used by a Guest network, networkcidr is the Effective network CIDR for that network'; + + +CREATE TABLE `vpc_service_map` ( + `id` bigint unsigned NOT NULL auto_increment, + `vpc_id` bigint unsigned NOT NULL COMMENT 'vpc_id', + `service` varchar(255) NOT NULL COMMENT 'service', + `provider` varchar(255) COMMENT 'service provider', + `created` datetime COMMENT 'date created', + PRIMARY KEY (`id`), + CONSTRAINT `fk_vpc_service_map__vpc_id` FOREIGN KEY(`vpc_id`) REFERENCES `vpc`(`id`) ON DELETE CASCADE, + UNIQUE (`vpc_id`, `service`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + + +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vm.instancename.flag', 'false', 'Append guest VM display Name (if set) to the internal name of the VM'); + +INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (208, UUID(), 6, 'Windows 8'); +INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (209, UUID(), 6, 'Windows 8 (64 bit)'); +INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (210, UUID(), 6, 'Windows 8 Server (64 bit)'); +INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Windows 8', 208); +INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Windows 8 (64 bit)', 209); +INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Windows 8 Server (64 bit)', 210); + +CREATE TABLE `cloud`.`user_vm_clone_setting` ( + `vm_id` bigint unsigned NOT NULL COMMENT 'guest VM id', + `clone_type` varchar(10) NOT NULL COMMENT 'Full or Linked Clone (applicable to VMs on ESX)', + PRIMARY KEY (`vm_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'UserVmManager', 'vmware.create.full.clone' , 'false', 'If set to true, creates VMs as full clones on ESX hypervisor'); + +-- Re-enable foreign key checking, at the end of the upgrade path +SET foreign_key_checks = 1; + + +CREATE TABLE nic_secondary_ips ( + `id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT, + `uuid` varchar(40), + `vmId` bigint unsigned COMMENT 'vm instance id', + `nicId` bigint unsigned NOT NULL, + `ip4_address` char(40) COMMENT 'ip4 address', + `ip6_address` char(40) COMMENT 'ip6 address', + `network_id` bigint unsigned NOT NULL COMMENT 'network configuration id', + `created` datetime NOT NULL COMMENT 'date created', + `account_id` bigint unsigned NOT NULL COMMENT 'owner. foreign key to account table', + `domain_id` bigint unsigned NOT NULL COMMENT 'the domain that the owner belongs to', + PRIMARY KEY (`id`), + CONSTRAINT `fk_nic_secondary_ip__vmId` FOREIGN KEY `fk_nic_secondary_ip__vmId`(`vmId`) REFERENCES `vm_instance`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_nic_secondary_ip__networks_id` FOREIGN KEY `fk_nic_secondary_ip__networks_id`(`network_id`) REFERENCES `networks`(`id`), + CONSTRAINT `uc_nic_secondary_ip__uuid` UNIQUE (`uuid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +ALTER TABLE `cloud`.`nics` ADD COLUMN secondary_ip SMALLINT DEFAULT '0' COMMENT 'secondary ips configured for the nic'; +ALTER TABLE `cloud`.`user_ip_address` ADD COLUMN dnat_vmip VARCHAR(40); + +ALTER TABLE `cloud`.`alert` ADD COLUMN `archived` tinyint(1) unsigned NOT NULL DEFAULT 0; +ALTER TABLE `cloud`.`event` ADD COLUMN `archived` tinyint(1) unsigned NOT NULL DEFAULT 0; +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'alert.purge.interval', '86400', 'The interval (in seconds) to wait before running the alert purge thread'); +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'alert.purge.delay', '0', 'Alerts older than specified number days will be purged. Set this value to 0 to never delete alerts'); + +DROP VIEW IF EXISTS `cloud`.`event_view`; +CREATE VIEW `cloud`.`event_view` AS + select + event.id, + event.uuid, + event.type, + event.state, + event.description, + event.created, + event.level, + event.parameters, + event.start_id, + eve.uuid start_uuid, + event.user_id, + event.archived, + user.username user_name, + account.id account_id, + account.uuid account_uuid, + account.account_name account_name, + account.type account_type, + domain.id domain_id, + domain.uuid domain_uuid, + domain.name domain_name, + domain.path domain_path, + projects.id project_id, + projects.uuid project_uuid, + projects.name project_name + from + `cloud`.`event` + inner join + `cloud`.`account` ON event.account_id = account.id + inner join + `cloud`.`domain` ON event.domain_id = domain.id + inner join + `cloud`.`user` ON event.user_id = user.id + left join + `cloud`.`projects` ON projects.project_account_id = event.account_id + left join + `cloud`.`event` eve ON event.start_id = eve.id; diff --git a/setup/db/deploy-db-dev.sh b/setup/db/deploy-db-dev.sh index a40e278b002..29ec4db6050 100755 --- a/setup/db/deploy-db-dev.sh +++ b/setup/db/deploy-db-dev.sh @@ -55,11 +55,6 @@ if [ ! -f create-index-fk.sql ]; then exit 6; fi -if [ ! -f create-schema-view.sql ]; then - printf "Error: Unable to find create-schema-view.sql\n" - exit 7 -fi - PATHSEP=':' if [[ $OSTYPE == "cygwin" ]] ; then export CATALINA_HOME=`cygpath -m $CATALINA_HOME` @@ -105,12 +100,6 @@ if [ $? -ne 0 ]; then exit 11 fi -mysql --user=cloud --password=cloud cloud < create-schema-view.sql -if [ $? -ne 0 ]; then - printf "Error: Cannot execute create-schema-view.sql\n" - exit 11 -fi - CP=./ CP=${CP}$PATHSEP$CATALINA_HOME/conf diff --git a/setup/db/deploy-db-simulator.sh b/setup/db/deploy-db-simulator.sh index c918df43009..20e12d37794 100644 --- a/setup/db/deploy-db-simulator.sh +++ b/setup/db/deploy-db-simulator.sh @@ -55,11 +55,6 @@ if [ ! -f create-index-fk.sql ]; then exit 6; fi -if [ ! -f create-schema-view.sql ]; then - printf "Error: Unable to find create-schema-view.sql\n" - exit 7 -fi - PATHSEP=':' if [[ $OSTYPE == "cygwin" ]] ; then @@ -109,12 +104,6 @@ if [ $? -ne 0 ]; then exit 11 fi -mysql --user=cloud --password=cloud cloud < create-schema-view.sql -if [ $? -ne 0 ]; then - printf "Error: Cannot execute create-schema-view.sql\n" - exit 11 -fi - mysql --user=cloud --password=cloud cloud < create-schema-simulator.sql if [ $? -ne 0 ]; then printf "Error: Cannot execute create-schema-simulator.sql\n" diff --git a/setup/db/templates.sql b/setup/db/templates.sql index 9980b159630..2f95f1e00f8 100755 --- a/setup/db/templates.sql +++ b/setup/db/templates.sql @@ -16,12 +16,13 @@ -- under the License. INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type) - VALUES (1, UUID(), 'routing-1', 'SystemVM Template (XenServer)', 0, now(), 'SYSTEM', 0, 64, 1, 'http://download.cloud.com/templates/acton/acton-systemvm-02062012.vhd.bz2', 'f613f38c96bf039f2e5cbf92fa8ad4f8', 0, 'SystemVM Template (XenServer)', 'VHD', 133, 0, 1, 'XenServer'); + VALUES (1, UUID(), 'routing-1', 'SystemVM Template (XenServer)', 0, now(), 'SYSTEM', 0, 64, 1, 'http://download.cloud.com/templates/acton/acton-systemvm-02062012.vhd.bz2', 'f613f38c96bf039f2e5cbf92fa8ad4f8', 0, 'SystemVM Template (XenServer)', 'VHD', 133, 0, 1, 'XenServer' ); + INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, removed, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type, extractable) VALUES (2, UUID(), 'centos53-x86_64', 'CentOS 5.3(64-bit) no GUI (XenServer)', 1, now(), now(), 'BUILTIN', 0, 64, 1, 'http://download.cloud.com/templates/builtin/f59f18fb-ae94-4f97-afd2-f84755767aca.vhd.bz2', 'b63d854a9560c013142567bbae8d98cf', 0, 'CentOS 5.3(64-bit) no GUI (XenServer)', 'VHD', 12, 1, 1, 'XenServer', 1); INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type) - VALUES (3, UUID(), 'routing-3', 'SystemVM Template (KVM)', 0, now(), 'SYSTEM', 0, 64, 1, 'http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2', '2755de1f9ef2ce4d6f2bee2efbb4da92', 0, 'SystemVM Template (KVM)', 'QCOW2', 15, 0, 1, 'KVM'); + VALUES (3, UUID(), 'routing-3', 'SystemVM Template (KVM)', 0, now(), 'SYSTEM', 0, 64, 1, 'http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2', '2755de1f9ef2ce4d6f2bee2efbb4da92', 0, 'SystemVM Template (KVM)', 'QCOW2', 15, 0, 1, 'KVM' ); INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, display_text, enable_password, format, guest_os_id, featured, cross_zones, hypervisor_type, extractable) VALUES (4, UUID(), 'centos55-x86_64', 'CentOS 5.5(64-bit) no GUI (KVM)', 1, now(), 'BUILTIN', 0, 64, 1, 'http://download.cloud.com/releases/2.2.0/eec2209b-9875-3c8d-92be-c001bd8a0faf.qcow2.bz2', 'ed0e788280ff2912ea40f7f91ca7a249', 'CentOS 5.5(64-bit) no GUI (KVM)', 0, 'QCOW2', 112, 1, 1, 'KVM', 1); @@ -33,10 +34,10 @@ INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, VALUES (7, UUID(), 'centos53-x64', 'CentOS 5.3(64-bit) no GUI (vSphere)', 1, now(), 'BUILTIN', 0, 64, 1, 'http://download.cloud.com/releases/2.2.0/CentOS5.3-x86_64.ova', 'f6f881b7f2292948d8494db837fe0f47', 0, 'CentOS 5.3(64-bit) no GUI (vSphere)', 'OVA', 12, 1, 1, 'VMware', 1); INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type) - VALUES (8, UUID(), 'routing-8', 'SystemVM Template (vSphere)', 0, now(), 'SYSTEM', 0, 32, 1, 'http://download.cloud.com/templates/burbank/burbank-systemvm-08012012.ova', '7137e453f950079ea2ba6feaafd939e8', 0, 'SystemVM Template (vSphere)', 'OVA', 15, 0, 1, 'VMware'); + VALUES (8, UUID(), 'routing-8', 'SystemVM Template (vSphere)', 0, now(), 'SYSTEM', 0, 32, 1, 'http://download.cloud.com/templates/burbank/burbank-systemvm-08012012.ova', '7137e453f950079ea2ba6feaafd939e8', 0, 'SystemVM Template (vSphere)', 'OVA', 15, 0, 1, 'VMware' ); INSERT INTO `cloud`.`vm_template` (id, uuid, unique_name, name, public, created, type, hvm, bits, account_id, url, checksum, enable_password, display_text, format, guest_os_id, featured, cross_zones, hypervisor_type) - VALUES (9, UUID(), 'routing-9', 'SystemVM Template (HyperV)', 0, now(), 'SYSTEM', 0, 32, 1, 'http://download.cloud.com/templates/acton/acton-systemvm-02062012.vhd.bz2', 'f613f38c96bf039f2e5cbf92fa8ad4f8', 0, 'SystemVM Template (HyperV)', 'VHD', 15, 0, 1, 'Hyperv'); + VALUES (9, UUID(), 'routing-9', 'SystemVM Template (HyperV)', 0, now(), 'SYSTEM', 0, 32, 1, 'http://download.cloud.com/templates/acton/acton-systemvm-02062012.vhd.bz2', 'f613f38c96bf039f2e5cbf92fa8ad4f8', 0, 'SystemVM Template (HyperV)', 'VHD', 15, 0, 1, 'Hyperv' ); INSERT INTO `cloud`.`guest_os_category` (id, uuid, name) VALUES (1, UUID(), 'CentOS'); INSERT INTO `cloud`.`guest_os_category` (id, uuid, name) VALUES (2, UUID(), 'Debian'); diff --git a/test/integration/component/test_accounts.py b/test/integration/component/test_accounts.py index d0b4434bb4e..cdb3e58dd94 100644 --- a/test/integration/component/test_accounts.py +++ b/test/integration/component/test_accounts.py @@ -23,6 +23,7 @@ from marvin.integration.lib.utils import * from marvin.integration.lib.base import * from marvin.integration.lib.common import * from marvin import remoteSSHClient +from nose.plugins.attrib import attr import datetime @@ -90,7 +91,7 @@ class Services: "privateport": 22, "protocol": 'TCP', }, - "ostypeid": 'bc66ada0-99e7-483b-befc-8fb0c2129b70', + "ostype": 'CentOS 5.3 (64-bit)', # Cent OS 5.3 (64 bit) "sleep": 60, "timeout": 10, @@ -112,7 +113,7 @@ class TestAccounts(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id diff --git a/test/integration/component/test_allocation_states.py b/test/integration/component/test_allocation_states.py index 006f8795fbf..103cb10c0cc 100644 --- a/test/integration/component/test_allocation_states.py +++ b/test/integration/component/test_allocation_states.py @@ -22,6 +22,7 @@ from marvin.cloudstackAPI import * from marvin.integration.lib.utils import * from marvin.integration.lib.base import * from marvin.integration.lib.common import * +from nose.plugins.attrib import attr import datetime @@ -74,7 +75,7 @@ class Services: "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', "templatefilter": 'self', }, - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', # Cent OS 5.3 (64 bit) "sleep": 60, "timeout": 10, @@ -87,7 +88,7 @@ class TestAllocationState(cloudstackTestCase): @classmethod def setUpClass(cls): cls.api_client = super( - TestResources, + TestAllocationState, cls ).getClsTestClient().getApiClient() cls.services = Services().services diff --git a/test/integration/component/test_blocker_bugs.py b/test/integration/component/test_blocker_bugs.py index cc5da0ace4a..33e4a73f712 100644 --- a/test/integration/component/test_blocker_bugs.py +++ b/test/integration/component/test_blocker_bugs.py @@ -76,7 +76,7 @@ class Services: "templates": { "displaytext": 'Template from snapshot', "name": 'Template from snapshot', - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', "templatefilter": 'self', "url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2", "hypervisor": 'XenServer', @@ -98,7 +98,7 @@ class Services: "endport": 22, "protocol": "TCP" }, - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', # Cent OS 5.3 (64 bit) "sleep": 60, "mode": 'advanced', @@ -122,7 +122,7 @@ class TestSnapshots(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["volume"]["zoneid"] = cls.zone.id @@ -546,7 +546,7 @@ class TestNATRules(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) #Create an account, network, VM and IP addresses cls.account = Account.create( @@ -740,7 +740,7 @@ class TestRouters(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) # Create an account, domain etc @@ -866,7 +866,7 @@ class TestRouterRestart(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -996,7 +996,7 @@ class TestTemplates(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.account = Account.create( diff --git a/test/integration/component/test_egress_rules.py b/test/integration/component/test_egress_rules.py index 73a91f41bfe..6b9cd4f25fc 100644 --- a/test/integration/component/test_egress_rules.py +++ b/test/integration/component/test_egress_rules.py @@ -118,7 +118,7 @@ class Services: "password": "password", "ipaddress": "192.168.100.21" }, - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', # CentOS 5.3 (64-bit) "sleep": 60, "timeout": 10, @@ -160,7 +160,7 @@ class TestDefaultSecurityGroupEgress(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -318,7 +318,7 @@ class TestAuthorizeIngressRule(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -475,7 +475,7 @@ class TestDefaultGroupEgress(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -675,7 +675,7 @@ class TestDefaultGroupEgressAfterDeploy(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -857,7 +857,7 @@ class TestRevokeEgressRule(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -1118,7 +1118,7 @@ class TestInvalidAccountAuthroize(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -1239,7 +1239,7 @@ class TestMultipleAccountsEgressRuleNeg(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -1486,7 +1486,7 @@ class TestMultipleAccountsEgressRule(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -1782,7 +1782,7 @@ class TestStartStopVMWithEgressRule(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -1993,7 +1993,7 @@ class TestInvalidParametersForEgress(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -2179,7 +2179,7 @@ class TestEgressAfterHostMaintainance(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id diff --git a/test/integration/component/test_eip_elb.py b/test/integration/component/test_eip_elb.py index 4c8dcbe2c07..89fdd25fb25 100644 --- a/test/integration/component/test_eip_elb.py +++ b/test/integration/component/test_eip_elb.py @@ -81,7 +81,7 @@ class Services: "username": 'nsroot', "password": 'nsroot' }, - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', # Cent OS 5.3 (64 bit) "sleep": 60, "timeout": 10, @@ -100,7 +100,7 @@ class TestEIP(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id @@ -922,7 +922,7 @@ class TestELB(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id diff --git a/test/integration/component/test_network_offering.py b/test/integration/component/test_network_offering.py index c1a518b458a..0de03aa0938 100644 --- a/test/integration/component/test_network_offering.py +++ b/test/integration/component/test_network_offering.py @@ -138,7 +138,7 @@ class Services: "publicport": 22, "protocol": 'TCP', }, - "ostypeid": 'bc66ada0-99e7-483b-befc-8fb0c2129b70', + "ostype": 'CentOS 5.3 (64-bit)', # Cent OS 5.3 (64 bit) "sleep": 60, "timeout": 10, @@ -161,7 +161,7 @@ class TestNOVirtualRouter(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id @@ -739,7 +739,7 @@ class TestNOWithNetscaler(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id @@ -1374,7 +1374,7 @@ class TestNetworkUpgrade(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id @@ -1830,7 +1830,7 @@ class TestSharedNetworkWithoutIp(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id diff --git a/test/integration/component/test_project_configs.py b/test/integration/component/test_project_configs.py index d5ce9d6024d..854b5a42853 100644 --- a/test/integration/component/test_project_configs.py +++ b/test/integration/component/test_project_configs.py @@ -87,7 +87,7 @@ class Services: "template": { "displaytext": "Public Template", "name": "Public template", - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', "url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2", "hypervisor": 'XenServer', "format": 'VHD', @@ -98,7 +98,7 @@ class Services: "configs": { "project.invite.timeout": 300, }, - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', # Cent OS 5.3 (64 bit) "sleep": 60, "timeout": 10, diff --git a/test/integration/component/test_project_limits.py b/test/integration/component/test_project_limits.py index 8f7c1281b23..afae1808d04 100644 --- a/test/integration/component/test_project_limits.py +++ b/test/integration/component/test_project_limits.py @@ -86,7 +86,7 @@ class Services: "template": { "displaytext": "Cent OS Template", "name": "Cent OS Template", - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', "templatefilter": 'self', }, "network_offering": { @@ -112,7 +112,7 @@ class Services: "name": "Test Network", "displaytext": "Test Network", }, - "ostypeid": 'bc66ada0-99e7-483b-befc-8fb0c2129b70', + "ostype": 'CentOS 5.3 (64-bit)', # Cent OS 5.3 (64 bit) "sleep": 60, "timeout": 10, @@ -485,7 +485,7 @@ class TestResourceLimitsProject(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -924,7 +924,7 @@ class TestMaxProjectNetworks(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.service_offering = ServiceOffering.create( cls.api_client, diff --git a/test/integration/component/test_project_resources.py b/test/integration/component/test_project_resources.py index 27452be811f..24091d2f89c 100644 --- a/test/integration/component/test_project_resources.py +++ b/test/integration/component/test_project_resources.py @@ -87,7 +87,7 @@ class Services: "template": { "displaytext": "Cent OS Template", "name": "Cent OS Template", - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', "templatefilter": 'self', "ispublic": False, }, @@ -130,7 +130,7 @@ class Services: "endport": 22, "cidrlist": '0.0.0.0/0', }, - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', # Cent OS 5.3 (64 bit) "sleep": 60, "timeout": 10, @@ -152,7 +152,7 @@ class TestOfferings(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -332,7 +332,7 @@ class TestNetwork(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -520,7 +520,7 @@ class TestTemplates(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -749,7 +749,7 @@ class TestSnapshots(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -895,7 +895,7 @@ class TestPublicIpAddress(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -1194,7 +1194,7 @@ class TestSecurityGroup(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id cls.services["server"]["zoneid"] = cls.zone.id diff --git a/test/integration/component/test_project_usage.py b/test/integration/component/test_project_usage.py index 4561576543c..16d51068deb 100644 --- a/test/integration/component/test_project_usage.py +++ b/test/integration/component/test_project_usage.py @@ -75,7 +75,7 @@ class Services: "templates": { "displaytext": 'Template', "name": 'Template', - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', "templatefilter": 'self', "url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.qcow2.bz2" }, @@ -87,7 +87,7 @@ class Services: "isextractable": True, "isfeatured": True, "ispublic": True, - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', }, "lbrule": { "name": "SSH", @@ -105,7 +105,7 @@ class Services: "username": "test", "password": "test", }, - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', # Cent OS 5.3 (64 bit) "sleep": 60, "timeout": 10, @@ -129,7 +129,7 @@ class TestVmUsage(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -323,7 +323,7 @@ class TestPublicIPUsage(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -497,7 +497,7 @@ class TestVolumeUsage(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id cls.services["server"]["diskoffering"] = cls.disk_offering.id @@ -678,7 +678,7 @@ class TestTemplateUsage(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id cls.account = Account.create( @@ -996,7 +996,7 @@ class TestLBRuleUsage(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -1179,7 +1179,7 @@ class TestSnapshotUsage(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -1355,7 +1355,7 @@ class TestNatRuleUsage(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -1537,7 +1537,7 @@ class TestVpnUsage(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id diff --git a/test/integration/component/test_projects.py b/test/integration/component/test_projects.py index 811d092d18e..3e45cae6651 100644 --- a/test/integration/component/test_projects.py +++ b/test/integration/component/test_projects.py @@ -92,7 +92,7 @@ class Services: "publicport": 22, "protocol": 'TCP', }, - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', # Cent OS 5.3 (64 bit) "sleep": 60, "timeout": 10, @@ -1451,7 +1451,7 @@ class TestProjectSuspendActivate(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) configs = Configurations.list( cls.api_client, diff --git a/test/integration/component/test_resource_limits.py b/test/integration/component/test_resource_limits.py index cd007f1595e..c20770ab24f 100644 --- a/test/integration/component/test_resource_limits.py +++ b/test/integration/component/test_resource_limits.py @@ -75,7 +75,7 @@ class Services: "template": { "displaytext": "Cent OS Template", "name": "Cent OS Template", - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', "templatefilter": 'self', }, "network_offering": { @@ -101,7 +101,7 @@ class Services: "name": "test network", "displaytext": "test network" }, - "ostypeid": 'bc66ada0-99e7-483b-befc-8fb0c2129b70', + "ostype": 'CentOS 5.3 (64-bit)', # Cent OS 5.3 (64 bit) "sleep": 60, "timeout": 10, @@ -122,7 +122,7 @@ class TestResourceLimitsAccount(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -885,7 +885,7 @@ class TestResourceLimitsDomain(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -1341,7 +1341,7 @@ class TestMaxAccountNetworks(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.service_offering = ServiceOffering.create( diff --git a/test/integration/component/test_routers.py b/test/integration/component/test_routers.py index a65c5c34005..02a08dee91c 100644 --- a/test/integration/component/test_routers.py +++ b/test/integration/component/test_routers.py @@ -89,7 +89,7 @@ class Services: "cidr": '55.55.0.0/11', # Any network (For creating FW rule }, - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', # Used for Get_Template : CentOS 5.3 (64 bit) "mode": 'advanced', # Networking mode: Advanced, basic } @@ -108,7 +108,7 @@ class TestRouterServices(cloudstackTestCase): cls.template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -595,7 +595,7 @@ class TestRouterStopCreatePF(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -806,7 +806,7 @@ class TestRouterStopCreateLB(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -1017,7 +1017,7 @@ class TestRouterStopCreateFW(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id diff --git a/test/integration/component/test_security_groups.py b/test/integration/component/test_security_groups.py index 13a87b67c83..39f6d6fcd26 100644 --- a/test/integration/component/test_security_groups.py +++ b/test/integration/component/test_security_groups.py @@ -90,7 +90,7 @@ class Services: "endport": -1, "cidrlist": '0.0.0.0/0', }, - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', # CentOS 5.3 (64-bit) "sleep": 60, "timeout": 10, @@ -129,7 +129,7 @@ class TestDefaultSecurityGroup(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -401,7 +401,7 @@ class TestAuthorizeIngressRule(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -536,7 +536,7 @@ class TestRevokeIngressRule(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -694,7 +694,7 @@ class TestDhcpOnlyRouter(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id @@ -830,7 +830,7 @@ class TestdeployVMWithUserData(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["domainid"] = cls.domain.id @@ -989,7 +989,7 @@ class TestDeleteSecurityGroup(cloudstackTestCase): template = get_template( self.apiclient, self.zone.id, - self.services["ostypeid"] + self.services["ostype"] ) self.services["domainid"] = self.domain.id @@ -1234,7 +1234,7 @@ class TestIngressRule(cloudstackTestCase): template = get_template( self.apiclient, self.zone.id, - self.services["ostypeid"] + self.services["ostype"] ) self.services["domainid"] = self.domain.id diff --git a/test/integration/component/test_templates.py b/test/integration/component/test_templates.py index e9be63d66b8..65d9fe0a764 100644 --- a/test/integration/component/test_templates.py +++ b/test/integration/component/test_templates.py @@ -77,7 +77,7 @@ class Services: 0: { "displaytext": "Public Template", "name": "Public template", - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', "url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2", "hypervisor": 'XenServer', "format": 'VHD', @@ -89,12 +89,12 @@ class Services: "template": { "displaytext": "Cent OS Template", "name": "Cent OS Template", - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', "templatefilter": 'self', }, "templatefilter": 'self', "destzoneid": 2, # For Copy template (Destination zone) - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', "sleep": 60, "timeout": 10, "mode": 'advanced', # Networking mode: Advanced, basic @@ -294,7 +294,7 @@ class TestTemplates(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.account = Account.create( diff --git a/test/integration/component/test_usage.py b/test/integration/component/test_usage.py index 34dbc3780d0..4251eab9555 100644 --- a/test/integration/component/test_usage.py +++ b/test/integration/component/test_usage.py @@ -71,7 +71,7 @@ class Services: "templates": { "displaytext": 'Template', "name": 'Template', - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', "templatefilter": 'self', "url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.qcow2.bz2" }, @@ -83,7 +83,7 @@ class Services: "isextractable": True, "isfeatured": True, "ispublic": True, - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', }, "lbrule": { "name": "SSH", @@ -101,7 +101,7 @@ class Services: "username": "test", "password": "test", }, - "ostypeid": '01853327-513e-4508-9628-f1f55db1946f', + "ostype": 'CentOS 5.3 (64-bit)', # Cent OS 5.3 (64 bit) "sleep": 60, "timeout": 10, @@ -122,7 +122,7 @@ class TestVmUsage(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -305,7 +305,7 @@ class TestPublicIPUsage(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -459,7 +459,7 @@ class TestVolumeUsage(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id cls.services["server"]["diskoffering"] = cls.disk_offering.id @@ -629,7 +629,7 @@ class TestTemplateUsage(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id cls.account = Account.create( @@ -928,7 +928,7 @@ class TestLBRuleUsage(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -1090,7 +1090,7 @@ class TestSnapshotUsage(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -1255,7 +1255,7 @@ class TestNatRuleUsage(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id @@ -1416,7 +1416,7 @@ class TestVpnUsage(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["server"]["zoneid"] = cls.zone.id diff --git a/test/integration/component/test_volumes.py b/test/integration/component/test_volumes.py index 5819001e1b9..0a7813065ae 100644 --- a/test/integration/component/test_volumes.py +++ b/test/integration/component/test_volumes.py @@ -79,14 +79,14 @@ class Services: "name": "testISO", "url": "http://iso.linuxquestions.org/download/504/1819/http/gd4.tuwien.ac.at/dsl-4.4.10.iso", # Source URL where ISO is located - "ostypeid": 'bc66ada0-99e7-483b-befc-8fb0c2129b70', + "ostype": 'CentOS 5.3 (64-bit)', }, "custom_volume": { "customdisksize": 2, "diskname": "Custom disk", }, "sleep": 50, - "ostypeid": 'bc66ada0-99e7-483b-befc-8fb0c2129b70', + "ostype": 'CentOS 5.3 (64-bit)', "mode": 'advanced', } @@ -108,7 +108,7 @@ class TestAttachVolume(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["zoneid"] = cls.zone.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -378,7 +378,7 @@ class TestAttachDetachVolume(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["zoneid"] = cls.zone.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -623,7 +623,7 @@ class TestAttachVolumeISO(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["zoneid"] = cls.zone.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -814,7 +814,7 @@ class TestVolumes(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["zoneid"] = cls.zone.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id @@ -1054,7 +1054,7 @@ class TestDeployVmWithCustomDisk(cloudstackTestCase): template = get_template( cls.api_client, cls.zone.id, - cls.services["ostypeid"] + cls.services["ostype"] ) cls.services["zoneid"] = cls.zone.id cls.services["virtual_machine"]["zoneid"] = cls.zone.id diff --git a/test/integration/smoke/test_vm_life_cycle.py b/test/integration/smoke/test_vm_life_cycle.py index 3596ca244ba..8d65c00c896 100644 --- a/test/integration/smoke/test_vm_life_cycle.py +++ b/test/integration/smoke/test_vm_life_cycle.py @@ -854,9 +854,6 @@ class TestVMLifeCycle(cloudstackTestCase): ) expunge_delay = int(config[0].value) - if expunge_delay < 600: - expunge_delay = 600 - # Wait for some time more than expunge.delay time.sleep(expunge_delay * 2) #VM should be destroyed unless expunge thread hasn't run @@ -866,9 +863,6 @@ class TestVMLifeCycle(cloudstackTestCase): name='expunge.interval' ) expunge_cycle = int(config[0].value) - if expunge_cycle < 600: - expunge_cycle = 600 - wait_time = expunge_cycle * 2 while wait_time >= 0: list_vm_response = list_virtual_machines( diff --git a/test/pom.xml b/test/pom.xml index 4507e8cc916..d4b88326fa2 100644 --- a/test/pom.xml +++ b/test/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack - 4.1.0-SNAPSHOT + 4.2.0-SNAPSHOT diff --git a/test/selenium/ReadMe.txt b/test/selenium/ReadMe.txt new file mode 100644 index 00000000000..30b0e0df7a0 --- /dev/null +++ b/test/selenium/ReadMe.txt @@ -0,0 +1,52 @@ +############################################## +This files contains following: + +1) Installation requirements +2) Test Pre requisites +3) Running the Test and Generating the report +############################################## + + + +########################################################################################################################################## + +1) Installtion Requirements + + +1)Firefox depending on your OS (Good to have Firebug and Selenium IDE for troubleshooting and dev work) + + +2)Install Python 2.7. Recommend to use Active State Python + + +3) Now Open CMD/Terminal and type all of following + +- pypm install pycrypto (Installs Pycrypto) +- pypm install paramiko (Install paramiko) +- pip install unittest-xml-reporting (Install XML Test Runner) +- pip install -U selenium (Installs Selenium) + + +5) Now get the HTMLTestRunner for nice looking report generation. +- http://tungwaiyip.info/software/HTMLTestRunner.html +- Download and put this file into Lib of your python installation. + + +########################################################################################################################################## + +2) Test Prerequisites + +- Download and install CS +- Log into the management server nad Add a Zone. (Must be Advance Zone and Hypervisor type must be Xen) + +########################################################################################################################################## + +3) Running the Test and Generating the report + +- Folder smoke contains main.py +- main.py is the file where all the tests are serialized. +- main.py supports HTML and XML reporting. Please refer to end of file to choose either. +- Typical usage is: python main.py for XML Reporting +- And python main.py >> results.html for HTML Reporting. + +########################################################################################################################################## diff --git a/test/selenium/lib/Global_Locators.py b/test/selenium/lib/Global_Locators.py new file mode 100644 index 00000000000..b2d93cd997e --- /dev/null +++ b/test/selenium/lib/Global_Locators.py @@ -0,0 +1,224 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +''' +Variable Names are as follows +Logical Page Descriptor_____What Element Represents and/or where it is_____LocatorType + + +For Example :: + +instances_xpath = "//div[@id='navigation']/ul/li[2]/span[2]" + +Means this is:: xpath link for Instances which is present on Dashboard. +Any test cases that requires to go into Instances from Dashboard can use this variable now. + +This may not be intuitive as you go deep into the tree. + + + +for example + +stopinstanceforce_id + +The best way to know what this represents is to track by variable name +Under Instances / any instance is click on any instance (applies to any instance) / stop instance has a force stop check box when you click. +This link represents that. + + +Steps below do not have global locators. + +PF rule steps including and after filling port numbers. (Refer to vmLifeAndNetwork.py / def test_PF) +FW rule steps including and after filling port numbers. (Refer to vmLifeAndNetwork.py / def test_PF) +ADD Disk Offering page has Names, description, storage type etc etc +ADD Compute Offering page has Names, description, CPU Cores, CPU clocks type etc etc + +Create Acc, Delete Acc, Login and Logout are for test flow and are not test cases. They do not have global Locators. + +Such and many more data entry points that appear only once and hence we do not need glonal names for them. They are hard coded as and when needed in the scripts. + + +''' + +################################################################################################################################################################################################ + +## Links on the Main UI page (Dash board). Listed in the order they appear on screen +dashboard_xpath = "//div[@id='navigation']/ul/li" +instances_xpath = "//div[@id='navigation']/ul/li[2]/span[2]" # Link for Instance and following as self explanatory +storage_xpath = "//div[@id='navigation']/ul/li[3]/span[2]" +network_xpath = "//div[@id='navigation']/ul/li[4]/span[2]" +templates_xpath = "//div[@id='navigation']/ul/li[5]/span[2]" +events_xpath = "//div[@id='navigation']/ul/li[6]/span[2]" +projects_xpath = "//div[@id='navigation']/ul/li[7]/span[2]" +accounts_xpath = "//div[@id='navigation']/ul/li[8]/span[2]" +domains_xpath = "//div[@id='navigation']/ul/li[9]/span[2]" +infrastructure_xpath = "//div[@id='navigation']/ul/li[10]/span[2]" +globalSettings_xpath = "//div[@id='navigation']/ul/li[11]/span[2]" +serviceOfferings_xpath = "//div[@id='navigation']/ul/li[12]/span[2]" + +################################################################################################################################################################################################ + +## Instances Page +## Instances Main page + + +# Add Instance Button on top right corner of Instances page +add_instance_xpath = "//div[2]/div/div[2]/div/div[2]/span" + +# Add Instance Wizard next button +add_instance_next_xpath = "//div[4]/div[2]/div[3]/div[3]/span" + +# Table that lists all VM's under Instances page; General usage is to traverse through this table and search for the VM we are interested in. +instances_table_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div[2]/table/tbody/tr/td/span" + + +# Click any instance and following are available + +# Click ok on confirmation pop-up box for most actions listed below +actionconfirm_xpath = ("//button[@type='button']") + +# status of VM running. Click on VM > 3rd row in table +state_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div[2]/div[2]/div/div/div[2]/div/table/tbody/tr[3]/td[2]/span" + +# Stop instance icon +stopinstance_css = "a[alt=\"Stop Instance\"] > span.icon" + +# stop instance forcefully check box available after stop instance is executed in separate pop up +stopinstanceforce_id = ("force_stop") + +# start instance icon +startinstance_css = "a[alt=\"Start Instance\"] > span.icon" + +yesconfirmation_xapth = "(//button[@type='button'])[2]" + + +# Destroy instance icon +destroyinstance_css = "a[alt=\"Destroy Instance\"] > span.icon" + +#Restore Instance icon +restoreinstance_css = "a[alt=\"Restore Instance\"] > span.icon" + +# Reboot instance +rebootinstance_css = "a[alt=\"Reboot Instance\"] > span.icon" + +################################################################################################################################################################################################ + + +## Network Page + +# Table that lists all Networks under Network page; General usage is to traverse through this table and search for the network we are interested in. +network_networktable_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div[3]/div[2]/div/div[2]/table/tbody/tr/td/span" + +# View IP addresses button on each network page +viewIp_css="div.view-all > a > span" + +# Acquire a new ip +acquireIP_xpath="//div[2]/div/div/div[2]/span" +# List of IP's within a netork table +network_iptables_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div[3]/div[2]/div/div[2]/table/tbody/tr/td/span" +# Configuration tab for each IP +ipConfiguration_text="Configuration" +# PF under configuration for each IP +ip_PF = "li.portForwarding > div.view-details" + + +################################################################################################################################################################################################ + + +## Servivce Offering Page + +# Selects Compute offering from drop down menu +Offering_compute_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div/div/div/select/option[1]" + +# Selects System offering from drop down menu +Offering_system_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div/div/div/select/option[2]" + +# Selects Disk offering from drop down menu +Offering_disk_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div/div/div/select/option[3]" + +# Selects Network offering from drop down menu +Offering_network_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div/div/div/select/option[4]" + +# Add Offering +Offering_add_xpath ="//div[3]/span" + +# Points to tbale that lists Offerings +Offering_table_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div[2]/table/tbody/tr/td/span" + +# Edit Button +Offering_edit_css = "a[alt=\"Edit\"] > span.icon" + +# Edit name box +Offering_editname_name = "name" + +# Edit description box +Offering_editdescription_name = "displaytext" + +# Edit finished click ok +Offering_editdone_css="div.button.done" + +# delete offering button for Disk only +Offering_delete_css = "a[alt=\"Delete Disk Offering\"] > span.icon" + +# delete offering button for Compute only +Offering_deletecompute_css = "a[alt=\"Delete Service Offering\"] > span.icon" + + + + +################################################################################################################################################################################################ + + +#### Templates Page + +# Selects Templates from drop down +template_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div/div/div/select/option[1]" + +# Selects ISO from drop down +iso_xpath = "/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div/div/div/select/option[2]" + +# Add Template +AddTemplate_xpath = "//div[3]/span" + +# Points to table where all templates are +template_table_xpath ="/html/body/div/div/div[2]/div[2]/div[2]/div/div[2]/div[2]/table/tbody/tr/td/span" + +# Edit Template Button +template_edit_css = "a[alt=\"Edit\"] > span.icon" + +# Edit finished click OK +template_editdone_css = "div.button.done" + +# Delete Template button +template_delete_css = "a[alt=\"Delete Template\"] > span.icon" + + +################################################################################################################################################################################################ + + +## Login Page + +# Username box +login_username_css = "body.login > div.login > form > div.fields > div.field.username > input[name=\"username\"]" # Login>Username TextBox + +# Password Box +login_password_css = "body.login > div.login > form > div.fields > div.field.password > input[name=\"password\"]" # LoginPassword TextBox + +# Click ok to login +login_submit_css = "body.login > div.login > form > div.fields > input[type=\"submit\"]" # Login>Login Button (Submit button) + + diff --git a/debian/cloud-client-ui.install b/test/selenium/lib/initialize.py similarity index 76% rename from debian/cloud-client-ui.install rename to test/selenium/lib/initialize.py index ba1408afcc5..e8cc49adff4 100644 --- a/debian/cloud-client-ui.install +++ b/test/selenium/lib/initialize.py @@ -5,15 +5,27 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +''' +This will help pass webdriver (Browser instance) across our test cases. +''' -/usr/share/cloud/management/webapps/client/* + +from selenium import webdriver + +DRIVER = None + +def getOrCreateWebdriver(): + global DRIVER + DRIVER = DRIVER or webdriver.Firefox() + return DRIVER + diff --git a/test/selenium/smoke/Login_and_Accounts.py b/test/selenium/smoke/Login_and_Accounts.py new file mode 100644 index 00000000000..c5132d9754c --- /dev/null +++ b/test/selenium/smoke/Login_and_Accounts.py @@ -0,0 +1,253 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import sys, os +sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/'+'../lib')) + + +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import Select +from selenium.common.exceptions import NoSuchElementException +import unittest, time +import Global_Locators +import initialize + + + +class login(unittest.TestCase): + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.base_url = "http://10.223.49.206:8080/" # Your management Server IP goes here + self.verificationErrors = [] + + + def test_login(self): + + # Here we will clear the test box for Username and Password and fill them with actual login data. + # After that we will click Login (Submit button) + driver = self.driver + driver.maximize_window() + driver.get(self.base_url + "client/") + driver.find_element_by_css_selector(Global_Locators.login_username_css).clear() + driver.find_element_by_css_selector(Global_Locators.login_username_css).send_keys("admin") + driver.find_element_by_css_selector(Global_Locators.login_password_css).clear() + driver.find_element_by_css_selector(Global_Locators.login_password_css).send_keys("password") + driver.find_element_by_css_selector(Global_Locators.login_submit_css).click() + time.sleep(5) + + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + +################################################################################################################################################ + + + +class logout(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.driver.implicitly_wait(100) + self.verificationErrors = [] + + + + def test_logout(self): + + # Here we will clear the test box for Username and Password and fill them with actual login data. + # After that we will click Login (Submit button) + driver = self.driver + driver.find_element_by_xpath("//div[@id='navigation']/ul/li").click() + driver.find_element_by_css_selector("div.icon.options").click() + driver.find_element_by_link_text("Logout").click() + + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + +################################################################################################################################################ + + + +class login_test(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_logintest(self): + + # Here we will clear the test box for Username and Password and fill them with actual login data. + # After that we will click Login (Submit button) + driver = self.driver + driver.find_element_by_css_selector(Global_Locators.login_username_css).clear() + driver.find_element_by_css_selector(Global_Locators.login_username_css).send_keys("test") + driver.find_element_by_css_selector(Global_Locators.login_password_css).clear() + driver.find_element_by_css_selector(Global_Locators.login_password_css).send_keys("password") + driver.find_element_by_css_selector(Global_Locators.login_submit_css).click() + time.sleep(5) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + +################################################################################################################################################ + + +class createAcc(unittest.TestCase): + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_createacc(self): + + driver = self.driver + self.driver.implicitly_wait(100) + driver.find_element_by_xpath("//div[@id='navigation']/ul/li[8]/span[2]").click() + driver.find_element_by_xpath("//div[3]/span").click() + driver.find_element_by_id("label_username").clear() + driver.find_element_by_id("label_username").send_keys("test") + driver.find_element_by_id("password").clear() + driver.find_element_by_id("password").send_keys("password") + driver.find_element_by_id("label_confirm_password").clear() + driver.find_element_by_id("label_confirm_password").send_keys("password") + driver.find_element_by_id("label_email").clear() + driver.find_element_by_id("label_email").send_keys("test@citrix.com") + driver.find_element_by_id("label_first_name").clear() + driver.find_element_by_id("label_first_name").send_keys("test") + driver.find_element_by_id("label_last_name").clear() + driver.find_element_by_id("label_last_name").send_keys("test") + driver.find_element_by_id("label_domain").click() + Select(driver.find_element_by_id("label_type")).select_by_visible_text("Admin") + Select(driver.find_element_by_id("label_timezone")).select_by_visible_text("[UTC-08:00] Pacific Standard Time") + driver.find_element_by_xpath("//button[@type='button']").click() + + # Go to Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(30) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + +################################################################################################################################################ + + +class tearAcc(unittest.TestCase): + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_tearacc(self): + + driver = self.driver + driver.find_element_by_css_selector("li.navigation-item.accounts").click() + driver.find_element_by_css_selector("tr.odd > td.name.first").click() + driver.find_element_by_css_selector("a[alt=\"Delete account\"] > span.icon").click() + driver.find_element_by_xpath("(//button[@type='button'])[2]").click() + + # Go to Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(30) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + def tearDown(self): + + self.driver.quit() + self.assertEqual([], self.verificationErrors) + + + +################################################################################################################################################ diff --git a/test/selenium/smoke/Service_Offering.py b/test/selenium/smoke/Service_Offering.py new file mode 100644 index 00000000000..66478e60414 --- /dev/null +++ b/test/selenium/smoke/Service_Offering.py @@ -0,0 +1,426 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import sys, os +sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/'+'../lib')) + +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import Select +from selenium.common.exceptions import NoSuchElementException +import unittest, time +import initialize +import Global_Locators + + + + +class Disk_offering_Add(unittest.TestCase): + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_diskadd(self): + + driver = self.driver + self.driver.implicitly_wait(200) + + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + # Go to Service Offerings + driver.find_element_by_xpath(Global_Locators.serviceOfferings_xpath).click() + + #Select Disk offering + driver.find_element_by_xpath(Global_Locators.Offering_disk_xpath).click() + + # Add offering + driver.find_element_by_xpath(Global_Locators.Offering_add_xpath).click() + + # Following have names.. so they do not have their global entries. + driver.find_element_by_name("name").clear() + driver.find_element_by_name("name").send_keys("Test Disk Name") + driver.find_element_by_name("description").clear() + driver.find_element_by_name("description").send_keys("Test Disk Description") + driver.find_element_by_name("disksize").clear() + driver.find_element_by_name("disksize").send_keys("1") + driver.find_element_by_xpath("//button[@type='button']").click() + time.sleep(20) + + ##Verification will be if this offering shows up into table and we can actually edit it. + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + def tearDown(self): + self.assertEqual([], self.verificationErrors) + + + + + +class Disk_offering_Edit(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_diskedit(self): + + driver = self.driver + self.driver.implicitly_wait(200) + + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + # Go to Service Offerings + driver.find_element_by_xpath(Global_Locators.serviceOfferings_xpath).click() + + #Select Disk offering + driver.find_element_by_xpath(Global_Locators.Offering_disk_xpath).click() + + # We will be searching for our disk offering into the table + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.Offering_table_xpath) # This returns a list of all Offerings in table + + for link in linkclass: + + if link.text == "Test Disk Name": + link.click() + + time.sleep(2) + + # Click Edit + driver.find_element_by_css_selector(Global_Locators.Offering_edit_css).click() + + #Change name + driver.find_element_by_name(Global_Locators.Offering_editname_name).clear() + driver.find_element_by_name(Global_Locators.Offering_editname_name).send_keys("Test Name") + + # Change Description + driver.find_element_by_name(Global_Locators.Offering_editdescription_name).clear() + driver.find_element_by_name(Global_Locators.Offering_editdescription_name).send_keys("Test Description") + + #Click Done + driver.find_element_by_css_selector(Global_Locators.Offering_editdone_css).click() + time.sleep(10) + + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + self.assertEqual([], self.verificationErrors) + + # Now we will find this offering and delete it!! + + + + + + +class Disk_offering_Delete(unittest.TestCase): + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_diskdelete(self): + + driver = self.driver + self.driver.implicitly_wait(200) + + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + # Go to Service Offerings + driver.find_element_by_xpath(Global_Locators.serviceOfferings_xpath).click() + + #Select Disk offering + driver.find_element_by_xpath(Global_Locators.Offering_disk_xpath).click() + + ## Action part + # We will be searching for our disk offering into the table + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.Offering_table_xpath) # This returns a list of all Offerings in table + + for link in linkclass: + + if link.text == "Test Name": + link.click() + + time.sleep(2) + + # Click Delete + driver.find_element_by_css_selector(Global_Locators.Offering_delete_css).click() + time.sleep(2) + driver.find_element_by_xpath(Global_Locators.yesconfirmation_xapth).click() + time.sleep(20) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + + + + + + +class Compute_offering_Add(unittest.TestCase): + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_computeadd(self): + + driver = self.driver + self.driver.implicitly_wait(200) + + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + # Go to Service Offerings + driver.find_element_by_xpath(Global_Locators.serviceOfferings_xpath).click() + + #Select Compute offering + driver.find_element_by_xpath(Global_Locators.Offering_compute_xpath).click() + + ## Action part + + # Add offering + driver.find_element_by_xpath(Global_Locators.Offering_add_xpath).click() + + # Following do not have Global locators + driver.find_element_by_id("label_name").clear() + driver.find_element_by_id("label_name").send_keys("Test Compute Name") + driver.find_element_by_id("label_description").clear() + driver.find_element_by_id("label_description").send_keys("Test Compute Description") + driver.find_element_by_id("label_num_cpu_cores").clear() + driver.find_element_by_id("label_num_cpu_cores").send_keys("2") + driver.find_element_by_id("label_cpu_mhz").clear() + driver.find_element_by_id("label_cpu_mhz").send_keys("2000") + driver.find_element_by_id("label_memory_mb").clear() + driver.find_element_by_id("label_memory_mb").send_keys("2048") + driver.find_element_by_id("label_network_rate").clear() + driver.find_element_by_id("label_network_rate").send_keys("10") + driver.find_element_by_id("label_offer_ha").click() + driver.find_element_by_xpath("//button[@type='button']").click() + + time.sleep(2) + + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + + time.sleep(30) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + + + + + +class Compute_offering_Edit(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_computeedit(self): + + + driver = self.driver + self.driver.implicitly_wait(200) + + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + ## Action part + # Go to Service Offerings + driver.find_element_by_xpath(Global_Locators.serviceOfferings_xpath).click() + + #Select Compute offering + driver.find_element_by_xpath(Global_Locators.Offering_compute_xpath).click() + + # We will be searching for our disk offering into the table + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.Offering_table_xpath) # This returns a list of all Offerings in table + + for link in linkclass: + + if link.text == "Test Compute Name": + link.click() + + time.sleep(2) + + + # Click Edit + driver.find_element_by_css_selector(Global_Locators.Offering_edit_css).click() + + #Change name + driver.find_element_by_name(Global_Locators.Offering_editname_name).clear() + driver.find_element_by_name(Global_Locators.Offering_editname_name).send_keys("Test Name") + + # Change Description + driver.find_element_by_name(Global_Locators.Offering_editdescription_name).clear() + driver.find_element_by_name(Global_Locators.Offering_editdescription_name).send_keys("Test Description") + + #Click Done + driver.find_element_by_css_selector(Global_Locators.Offering_editdone_css).click() + time.sleep(10) + + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + self.assertEqual([], self.verificationErrors) + + + + + + +class Compute_offering_Delete(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_computedelete(self): + + + driver = self.driver + self.driver.implicitly_wait(200) + + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + # Go to Service Offerings + driver.find_element_by_xpath(Global_Locators.serviceOfferings_xpath).click() + + #Select Compute offering + driver.find_element_by_xpath(Global_Locators.Offering_compute_xpath).click() + + ## Action part + # We will be searching for our disk offering into the table + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.Offering_table_xpath) # This returns a list of all Offerings in table + + for link in linkclass: + + if link.text == "Test Name": + link.click() + + time.sleep(2) + + # Click Delete + + driver.find_element_by_css_selector(Global_Locators.Offering_deletecompute_css).click() + driver.find_element_by_xpath(Global_Locators.yesconfirmation_xapth).click() + + time.sleep(20) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) diff --git a/test/selenium/smoke/TemplatesAndISO.py b/test/selenium/smoke/TemplatesAndISO.py new file mode 100644 index 00000000000..120c8d10d9f --- /dev/null +++ b/test/selenium/smoke/TemplatesAndISO.py @@ -0,0 +1,244 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +''' +ISO PART YET TO BE ADDED:: remove this after adding it. +''' + +import sys, os +sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/'+'../lib')) + + + +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import Select +from selenium.common.exceptions import NoSuchElementException +import unittest, time +import initialize +import Global_Locators + + + + +class Template_Add(unittest.TestCase): + + + + def setUp(self): + + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_templateadd(self): + + + driver = self.driver + + ## Action part + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + # Go to Templates + driver.find_element_by_xpath(Global_Locators.templates_xpath).click() + + #Select Template from drop down list + driver.find_element_by_xpath(Global_Locators.template_xpath).click() + + # Add Template + driver.find_element_by_xpath(Global_Locators.AddTemplate_xpath).click() + + # Following have names.. so they do not have their global entries. + driver.find_element_by_id("label_name").clear() + driver.find_element_by_id("label_name").send_keys("Test Template Ubuntu") + driver.find_element_by_id("label_description").clear() + driver.find_element_by_id("label_description").send_keys("Ubuntu 10.04") + driver.find_element_by_id("URL").clear() + driver.find_element_by_id("URL").send_keys("http://nfs1.lab.vmops.com/templates/Ubuntu/Ubuntuu-10-04-64bit-server.vhd") + Select(driver.find_element_by_id("label_os_type")).select_by_visible_text("Ubuntu 10.04 (64-bit)") + driver.find_element_by_id("label_public").click() + driver.find_element_by_id("label_featured").click() + driver.find_element_by_xpath("//button[@type='button']").click() + + time.sleep(2) + + # Go to Dash Board + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + + + time.sleep(600) + + ##Verification will be if this offering shows up into table and we can actually edit it. + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + + + + + +class Template_Edit(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_templateedit(self): + + driver = self.driver + + ## Action part + + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + # Go to Templates + driver.find_element_by_xpath(Global_Locators.templates_xpath).click() + + #Select Template from drop down list + driver.find_element_by_xpath(Global_Locators.template_xpath).click() + + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.template_table_xpath) # This returns a list + + for link in linkclass: + + if link.text == "Test Template Ubuntu": # We will search for our VM in this table + link.click() + + time.sleep(2) + + # Change name + driver.find_element_by_name("name").clear() + driver.find_element_by_name("name").send_keys("Test template") + + + # Change Description + driver.find_element_by_name("displaytext").clear() + driver.find_element_by_name("displaytext").send_keys("ubuntu") + + driver.find_element_by_css_selector(Global_Locators.template_editdone_css).click() + time.sleep(2) + + #Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(10) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + +# Now we will find this offering and delete it!! + + + + + + +class Template_Delete(unittest.TestCase): + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + + def test_templatedelete(self): + + driver = self.driver + + ## Action part + #Make sure you are on Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + # Go to Templates + driver.find_element_by_xpath(Global_Locators.templates_xpath).click() + + #Select Template from drop down list + driver.find_element_by_xpath(Global_Locators.template_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.template_table_xpath) # This returns a list + + for link in linkclass: + + if link.text == "Test Template": # We will search for our VM in this table + link.click() + + time.sleep(2) + + driver.find_element_by_css_selector(Gloabl_Locators.template_delete_css).click() + driver.find_element_by_xpath(Global_Locators.yesconfirmation_xapth).click() + + time.sleep(2) + + #Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + + time.sleep(20) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) diff --git a/test/selenium/smoke/VM_lifeCycle.py b/test/selenium/smoke/VM_lifeCycle.py new file mode 100644 index 00000000000..845a5cb316f --- /dev/null +++ b/test/selenium/smoke/VM_lifeCycle.py @@ -0,0 +1,613 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import sys, os +sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/'+'../lib')) + + + +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import Select +from selenium.common.exceptions import NoSuchElementException +import unittest, time +import initialize +import Global_Locators + + + +class deployVM(unittest.TestCase): + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_deployvm(self): + + + ## Action Part + # VM will be named Auto-VM and this VM will be used in all subsequent tests. + # Deploy an Instance named Auto-VM Default CentOS no GUI Template + + driver = self.driver + self.driver.implicitly_wait(30) + driver.refresh() ## Most Important step. Failure to do this will change XPATH location and Scripts will fail. + + + # Click on Instances link + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + # Click on add Instance on Instances page + driver.find_element_by_xpath(Global_Locators.add_instance_xpath).click() + + # Following select template action will fire automatically... ignore it. And leave following commented. + # driver.find_element_by_xpath("(//input[@name='select-template'])[3]").click() + #Click on Next button on Instances Wizard. + driver.find_element_by_xpath(Global_Locators.add_instance_next_xpath).click() + + # Nothing to do here as we will be using all default settings. (Default CentOS no GUI template should be highlighted here. Click Next + driver.find_element_by_xpath(Global_Locators.add_instance_next_xpath).click() + + # Nothing to do here. Medium Instance compute offering should be selected here. Click Next + driver.find_element_by_xpath(Global_Locators.add_instance_next_xpath).click() + + # Nothing to do here. Data Disk Offering : No Thanks!!. Click Next + driver.find_element_by_xpath(Global_Locators.add_instance_next_xpath).click() + + # Since this is our first instance; we must provide a network name. We will use Test-Network as out network name. + driver.find_element_by_xpath("(//input[@name='new-network-name'])[2]").click() + driver.find_element_by_xpath("(//input[@name='new-network-name'])[2]").clear() + driver.find_element_by_xpath("(//input[@name='new-network-name'])[2]").send_keys("Test-Network") + + #Click next + driver.find_element_by_xpath(Global_Locators.add_instance_next_xpath).click() + + # Give our VM a name here. Use Auto-VM as name + driver.find_element_by_xpath("(//input[@name='displayname'])[2]").click() + + driver.find_element_by_xpath("(//input[@name='displayname'])[2]").clear() + + driver.find_element_by_xpath("(//input[@name='displayname'])[2]").send_keys("Auto-VM") + + # All data filled. Click Launch VM. (It has the same xpath as Next button. So we will use Next Variable here. + driver.find_element_by_xpath(Global_Locators.add_instance_next_xpath).click() + + print '\n' + '\n' + "VM Deployment is complete... wait for 5 mins to check deployment status" + '\n' + '\n' + + + + ## Verification Part + + + ## Now we must wait for some random time (Educated guess based on experience) and check if VM has been deployed and if it is in running state. + ## Should take about 4 min to deploy VM.. but we will wait 5 mins and check the status , we will do this twice. So total 2 check within 10 mins with first check occuring at 5th min. + + + driver.refresh() # Refresh UI Page; This polls latest status. + + # Click on Instances link + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + count = 1 + + while (count > 0): + + time.sleep(300) + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + print "found VM in table .. checking status..." + '\n' + '\n' + link.click() + + status = driver.find_element_by_xpath(Global_Locators.state_xpath).text ## get the status of our VM + + if status == "Running" : + print "VM is in running state... continuing with other tests."+ '\n' + '\n' + break + else: + print "Need to check one more time after 5 mins" + continue + count = count - 1 + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + + def tearDown(self): + self.assertEqual([], self.verificationErrors) + + + + + +################################################################################################################################################################################################ + + + +class destroyVM(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_destroyvm(self): + + driver = self.driver + self.driver.implicitly_wait(100) + + ## Action part + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + time.sleep(2) + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + link.click() + + # Click on Destroy Instance button and confirm + time.sleep(2) + driver.find_element_by_css_selector(Global_Locators.destroyinstance_css).click() + time.sleep(2) + + # Click ok on confirmation + driver.find_element_by_xpath(Global_Locators.yesconfirmation_xapth).click() + time.sleep(2) + + # Go to Dashboard + # driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + driver.refresh() + + ## Verification part + time.sleep(60) + + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + time.sleep(2) + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + link.click() + + + status = driver.find_element_by_xpath(Global_Locators.state_xpath).text ## get the status of our VM + if status == "Destroyed" : + print "VM is Destroyed...."+ '\n' + '\n' + else: + print "Something went wrong" + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + + + +################################################################################################################################################################################################ + + + + +class rebootVM(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_rebootvm(self): + + driver = self.driver + self.driver.implicitly_wait(30) + print "Verify this test manually for now" + + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(vmLifeAndNetwork.Server_Ip, username='root', password='password') + print '\n' + '\n' + "Before Reboot ...Executing command date ... " + '\n' + '\n' + stdin, stdout, stderr = ssh.exec_command('date') + print stdout.readlines() + print '\n' + '\n' + "Before Reboot ...Executing command last reboot | head -1 ..." + '\n' + '\n' + stdin, stdout, stderr = ssh.exec_command('last reboot | head -1') + print '\n' + '\n' + "Before Reboot ...Executing command uptime..." + '\n' + '\n' + stdin, stdout, stderr = ssh.exec_command('uptime') + print stdout.readlines() + ssh.close() + + + driver.refresh() + + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + count = 1 + + while (count > 0): + + #time.sleep(300) + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + print "found VM in table .. Rebooting now..." + '\n' + '\n' + link.click() + + driver.find_element_by_css_selector(Global_Locators.rebootinstance_css).click() + driver.find_element_by_xpath(Global_Locators.actionconfirm_xpath).click() + + # Sleep for 5 mins to ensure system gets rebooted. + time.sleep(300) + + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(vmLifeAndNetwork.Server_Ip, username='root', password='password') + print '\n' + '\n' + "After Reboot ...Executing command date ... " + '\n' + '\n' + stdin, stdout, stderr = ssh.exec_command('date') + print stdout.readlines() + print '\n' + '\n' + "After Reboot ...Executing command last reboot | head -1 ..." + '\n' + '\n' + stdin, stdout, stderr = ssh.exec_command('last reboot | head -1') + print '\n' + '\n' + "After Reboot ...Executing command uptime..." + '\n' + '\n' + stdin, stdout, stderr = ssh.exec_command('uptime') + print stdout.readlines() + ssh.close() + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + def tearDown(self): + self.assertEqual([], self.verificationErrors) + + +######################################################################################################################################################### + + + +class restoreVM(unittest.TestCase): + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_restorevm(self): + + driver = self.driver + self.driver.implicitly_wait(100) + + ## Action part + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + + link.click() + + # Click on Destroy Instance button and confirm + driver.find_element_by_css_selector(Global_Locators.restoreinstance_css).click() + + # Click ok on confirmation + driver.find_element_by_xpath(Global_Locators.yesconfirmation_xapth).click() + + # Go to Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + + + ## Verification part + + time.sleep(60) + + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + link.click() + + + status = driver.find_element_by_xpath(Global_Locators.state_xpath).text ## get the status of our VM + + if status == "Stopped" : + print "VM is Restored. but in stopped state.. will start now."+ '\n' + '\n' + + else: + print "Something went wrong" + + + + + #VM will be in stop state so we must start it now + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + link.click() + + # Click on Start Instance. + driver.find_element_by_css_selector(Global_Locators.startinstance_css).click() + time.sleep(2) + + # Dismiss confirmation by clicking Yes + driver.find_element_by_xpath(Global_Locators.yesconfirmation_xapth).click() + time.sleep(2) + + # Go to Dashboard + driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + time.sleep(2) + + print "VM is Started."+ '\n' + '\n' + + # status = None + time.sleep(60) + + # Dismiss the Start Instance information box. + driver.find_element_by_xpath(Global_Locators.actionconfirm_xpath).click() + time.sleep(2) + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + +######################################################################################################################################################### + + + +class startVM(unittest.TestCase): + + + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_startvm(self): + + driver = self.driver + self.driver.implicitly_wait(100) + + ## Action part + #driver.refresh() ## Most Important step. Failure to do this will change XPATH location and Scripts will fail. + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + print "found VM in table .. checking status..." + '\n' + '\n' + link.click() + + + + # Click on Start Instance. + driver.find_element_by_css_selector(Global_Locators.startinstance_css).click() + time.sleep(2) + + # Dismiss confirmation by clicking Yes + driver.find_element_by_xpath(Global_Locators.yesconfirmation_xapth).click() + time.sleep(2) + + # Go to Dashboard + #driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + driver.refresh() + + + ## Verification part + # status = None + time.sleep(60) + + # Dismiss the Start Instance information box. + driver.find_element_by_xpath(Global_Locators.actionconfirm_xpath).click() + time.sleep(2) + + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + time.sleep(2) + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + link.click() + + + status = driver.find_element_by_xpath(Global_Locators.state_xpath).text ## get the status of our VM + + if status == "Running" : + print "VM is in Running state..."+ '\n' + '\n' + + else: + print "Something went wrong" + + # Go to Dashboard + driver.refresh() + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + + + +######################################################################################################################################################### + + + +class stopVM(unittest.TestCase): + + def setUp(self): + + self.driver = initialize.getOrCreateWebdriver() + self.verificationErrors = [] + + + def test_stopvm(self): + + driver = self.driver + self.driver.implicitly_wait(100) + + ## Action part + driver.refresh() ## Important step. + + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + print "found VM in table .. checking status..." + '\n' + '\n' + link.click() + + + # HWe are on our VM information page. + driver.find_element_by_css_selector(Global_Locators.stopinstance_css).click() + time.sleep(2) + + # a Pop up must appear; below we will check the force stop check box and then we will click ok. + driver.find_element_by_id(Global_Locators.stopinstanceforce_id).click() + driver.find_element_by_xpath(Global_Locators.actionconfirm_xpath).click() + time.sleep(2) + + # Go to Dahsboard + #driver.find_element_by_xpath(Global_Locators.dashboard_xpath).click() + driver.refresh() + + # Should take less than min to stop the instance. We will check twice at interval of 45 seconds o be safe. + ## Verification part + time.sleep(60) + + # Click on Instances link and find our instance + driver.find_element_by_xpath(Global_Locators.instances_xpath).click() + + linkclass = None + linkclass = driver.find_elements_by_xpath(Global_Locators.instances_table_xpath) # This returns a list of all VM names in tables + + for link in linkclass: + + if link.text == "Auto-VM": # We will search for our VM in this table + link.click() + + + status = driver.find_element_by_xpath(Global_Locators.state_xpath).text ## get the status of our VM + + if status == "Stopped" : + print "VM is in Stopped state...."+ '\n' + '\n' + else: + print "Something went wrong" + + + + def is_element_present(self, how, what): + + try: self.driver.find_element(by=how, value=what) + except NoSuchElementException, e: return False + return True + + + + def tearDown(self): + + self.assertEqual([], self.verificationErrors) + + +######################################################################################################################################################### diff --git a/test/selenium/smoke/main.py b/test/selenium/smoke/main.py new file mode 100644 index 00000000000..86bb9308c2f --- /dev/null +++ b/test/selenium/smoke/main.py @@ -0,0 +1,145 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import unittest +import HTMLTestRunner +import xmlrunner + + +global DRIVER + + +# Import test cases + +################################## +from Login_and_Accounts import * +from Service_Offering import * + +from TemplatesAndISO import * +from VM_lifeCycle import * + +################################### + + +# Following are BVT Tests +# serialize the test cases + + +suite = unittest.TestSuite() # setup new test suite + + +#################################################################################################### + +# Following logs admin user in and creates test account then logs admin user out and logs in as test to run tests. +# You should leave this as is for all the tests. + +suite.addTest(unittest.makeSuite(login)) #Login Admin + +time.sleep(5) +suite.addTest(unittest.makeSuite(createAcc)) # Create an Account test. We will use test account for all our tests + +time.sleep(5) +suite.addTest(unittest.makeSuite(logout)) #Logout Admin + +time.sleep(5) +suite.addTest(unittest.makeSuite(login_test)) # Login Test + + + +#################################################################################################### + + + +time.sleep(5) +suite.addTest(unittest.makeSuite(Disk_offering_Add)) + +time.sleep(5) +suite.addTest(unittest.makeSuite(Disk_offering_Edit)) + +time.sleep(5) +suite.addTest(unittest.makeSuite(Disk_offering_Delete)) + +time.sleep(5) +suite.addTest(unittest.makeSuite(Compute_offering_Add)) + +time.sleep(5) +suite.addTest(unittest.makeSuite(Compute_offering_Edit)) + +time.sleep(5) +suite.addTest(unittest.makeSuite(Compute_offering_Delete)) + + +# time.sleep(5) +# suite.addTest(unittest.makeSuite(deployVM)) + +# time.sleep(5) +# suite.addTest(unittest.makeSuite(stopVM)) + +# time.sleep(5) +# suite.addTest(unittest.makeSuite(startVM)) + +# time.sleep(5) +# suite.addTest(unittest.makeSuite(destroyVM)) + +# time.sleep(5) +# suite.addTest(unittest.makeSuite(restoreVM)) + + +# time.sleep(5) +# suite.addTest(unittest.makeSuite(Template_Add)) + +# time.sleep(5) +# suite.addTest(unittest.makeSuite(Template_Edit)) + +# time.sleep(5) +# suite.addTest(unittest.makeSuite(Template_Delete)) + + +#################################################################################################### + +# Following logs test user out and logs back in as Admin and tears down the test account. +# You should leave this as is for all the tests. + +suite.addTest(unittest.makeSuite(logout)) #Logout test +time.sleep(5) +suite.addTest(unittest.makeSuite(login)) #Login Admin +time.sleep(5) +suite.addTest(unittest.makeSuite(tearAcc)) # Delete Account test + +#################################################################################################### + + + +# If XML reports compatible with junit's XML output are desired then leave folowing code as is. +# If HTML reports are desired follow instructions + + +#Comment following line for HTML and uncomment for XML +runner = xmlrunner.XMLTestRunner(output='test-reports') + +#Comment following line for XML and uncomment for HTML +#runner = HTMLTestRunner.HTMLTestRunner() + +#header is required for displaying the website +#Comment following line for XML and uncomment for HTML +#print "Content-Type: text/html\n" + +# Leave following as is for either XML or HTML +runner.run(suite) + + + diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index 2fd855d75ad..6292c536a9d 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -132,6 +132,9 @@ known_categories = { 'Condition': 'AutoScale', 'Api': 'API Discovery', 'Region': 'Region', + 'addIpToNic': 'Nic', + 'removeIpFromNic': 'Nic', + 'listNics':'Nic', } diff --git a/tools/apidoc/generateadmincommands.xsl b/tools/apidoc/generateadmincommands.xsl index 3e9c6c598fa..a33e7baf20d 100644 --- a/tools/apidoc/generateadmincommands.xsl +++ b/tools/apidoc/generateadmincommands.xsl @@ -138,8 +138,16 @@ version="1.0"> +