diff --git a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateCacheStoreCmd.java b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateCacheStoreCmd.java index ff01a40c1fa..f94207fda91 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/storage/CreateCacheStoreCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/storage/CreateCacheStoreCmd.java @@ -18,22 +18,22 @@ */ package org.apache.cloudstack.api.command.admin.storage; -import java.util.Map; - +import com.cloud.storage.ImageStore; +import com.cloud.user.Account; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; -import org.apache.cloudstack.api.BaseCmd.CommandType; import org.apache.cloudstack.api.response.ImageStoreResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.log4j.Logger; -import com.cloud.exception.DiscoveryException; -import com.cloud.storage.ImageStore; -import com.cloud.user.Account; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; @APICommand(name = "createCacheStore", description="create cache store.", responseObject=ImageStoreResponse.class) public class CreateCacheStoreCmd extends BaseCmd { @@ -76,7 +76,19 @@ public class CreateCacheStoreCmd extends BaseCmd { } public Map getDetails() { - return details; + Map detailsMap = null; + if (details != null && !details.isEmpty()) { + detailsMap = new HashMap(); + Collection props = details.values(); + Iterator iter = props.iterator(); + while (iter.hasNext()) { + HashMap detail = (HashMap) iter.next(); + String key = detail.get("key"); + String value = detail.get("value"); + detailsMap.put(key, value); + } + } + return detailsMap; } public String getScope() { diff --git a/docs/en-US/CloudStack_GSoC_Guide.xml b/docs/en-US/CloudStack_GSoC_Guide.xml index cd8205d34ba..2f537d40cef 100644 --- a/docs/en-US/CloudStack_GSoC_Guide.xml +++ b/docs/en-US/CloudStack_GSoC_Guide.xml @@ -46,10 +46,7 @@ - - - - - + + diff --git a/docs/en-US/add-loadbalancer-rule-vpc.xml b/docs/en-US/add-loadbalancer-rule-vpc.xml index 82e870243d1..0f2a83dcbfd 100644 --- a/docs/en-US/add-loadbalancer-rule-vpc.xml +++ b/docs/en-US/add-loadbalancer-rule-vpc.xml @@ -35,113 +35,216 @@ received at a public IP to one or more VMs that belong to a network tier that provides load balancing service in a VPC. A user creates a rule, specifies an algorithm, and assigns the rule to a set of VMs within a tier. - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPCs that you have created for the account is listed in the page. - - - Click the Configure button of the VPC, for which you want to configure load balancing - rules. - The VPC page is displayed where all the tiers you created listed in a diagram. - For each tier, the following options are displayed: - - - Internal LB - - - Public LB IP - - - Static NAT - - - Virtual Machines - - - CIDR - - - The following router information is displayed: - - - Private Gateways - - - Public IP Addresses - - - Site-to-Site VPNs - - - Network ACL Lists - - - - - In the Router node, select Public IP Addresses. - The IP Addresses page is displayed. - - - Click the IP address for which you want to create the rule, then click the - Configuration tab. - - - In the Load Balancing node of the diagram, click View All. - - - Select the tier to which you want to apply the rule. - - - Specify the following: - - - Name: A name for the load balancer rule. - - - Public Port: The port that receives the incoming - traffic to be balanced. - - - Private Port: The port that the VMs will use to - receive the traffic. - - - Algorithm. Choose the load balancing algorithm - you want &PRODUCT; to use. &PRODUCT; supports the following well-known - algorithms: - - - Round-robin - - - Least connections - - - Source - - - - - Stickiness. (Optional) Click Configure and choose - the algorithm for the stickiness policy. See Sticky Session Policies for Load Balancer - Rules. - - - Add VMs: Click Add VMs, then select two or more - VMs that will divide the load of incoming traffic, and click Apply. - - - - - The new load balancing rule appears in the list. You can repeat these steps to add more - load balancing rules for this IP address. +
+ Enabling NetScaler as the LB Provider on a VPC Tier + + + Add and enable Netscaler VPX in dedicated mode. + Netscaler can be used in a VPC environment only if it is in dedicated mode. + + + Create a network offering, as given in . + + + Create a VPC with Netscaler as the Public LB provider. + For more information, see . + + + For the VPC, acquire an IP. + + + Create an external load balancing rule and apply, as given in . + + +
+
+ Creating a Network Offering for External LB + To have internal LB support on VPC, create a network offering as follows: + + + Log in to the &PRODUCT; UI as a user or admin. + + + From the Select Offering drop-down, choose Network Offering. + + + Click Add Network Offering. + + + In the dialog, make the following choices: + + + Name: Any desired name for the network + offering. + + + Description: A short description of the + offering that can be displayed to users. + + + Network Rate: Allowed data transfer rate in MB + per second. + + + Traffic Type: The type of network traffic that + will be carried on the network. + + + Guest Type: Choose whether the guest network is + isolated or shared. + + + Persistent: Indicate whether the guest network + is persistent or not. The network that you can provision without having to deploy a + VM on it is termed persistent network. + + + VPC: This option indicate whether the guest + network is Virtual Private Cloud-enabled. A Virtual Private Cloud (VPC) is a + private, isolated part of &PRODUCT;. A VPC can have its own virtual network topology + that resembles a traditional physical network. For more information on VPCs, see + . + + + Specify VLAN: (Isolated guest networks only) + Indicate whether a VLAN should be specified when this offering is used. + + + Supported Services: Select Load Balancer. + Select InternalLbVM from the provider list. + + + Load Balancer Type: Select external LB from the + drop-down. Use Netscaler + + + System Offering: Choose the system service + offering that you want virtual routers to use in this network. + + + Conserve mode: Indicate whether to use conserve + mode. In this mode, network resources are allocated only when the first virtual + machine starts in the network. + + + + + Click OK and the network offering is created. + + +
+
+ Creating an External LB Rule + + + Log in to the &PRODUCT; UI as an administrator or end user. + + + In the left navigation, choose Network. + + + In the Select view, select VPC. + All the VPCs that you have created for the account is listed in the page. + + + Click the Configure button of the VPC, for which you want to configure load + balancing rules. + The VPC page is displayed where all the tiers you created listed in a + diagram. + For each tier, the following options are displayed: + + + Internal LB + + + Public LB IP + + + Static NAT + + + Virtual Machines + + + CIDR + + + The following router information is displayed: + + + Private Gateways + + + Public IP Addresses + + + Site-to-Site VPNs + + + Network ACL Lists + + + + + In the Router node, select Public IP Addresses. + The IP Addresses page is displayed. + + + Click the IP address for which you want to create the rule, then click the + Configuration tab. + + + In the Load Balancing node of the diagram, click View All. + + + Select the tier to which you want to apply the rule. + + + Specify the following: + + + Name: A name for the load balancer rule. + + + Public Port: The port that receives the + incoming traffic to be balanced. + + + Private Port: The port that the VMs will use to + receive the traffic. + + + Algorithm. Choose the load balancing algorithm + you want &PRODUCT; to use. &PRODUCT; supports the following well-known + algorithms: + + + Round-robin + + + Least connections + + + Source + + + + + Stickiness. (Optional) Click Configure and + choose the algorithm for the stickiness policy. See Sticky Session Policies for Load + Balancer Rules. + + + Add VMs: Click Add VMs, then select two or more + VMs that will divide the load of incoming traffic, and click Apply. + + + + + The new load balancing rule appears in the list. You can repeat these steps to add more + load balancing rules for this IP address. +
Load Balancing Across Tiers diff --git a/docs/en-US/add-vm-tier-sharednw.xml b/docs/en-US/add-vm-tier-sharednw.xml new file mode 100644 index 00000000000..a68860419eb --- /dev/null +++ b/docs/en-US/add-vm-tier-sharednw.xml @@ -0,0 +1,62 @@ + + +%BOOK_ENTITIES; +]> + +
+ Deploying VMs to VPC Tier and Shared Networks + &PRODUCT; allows you deploy VMs on a VPC tier and one or more shared networks. With this + feature, VMs deployed in a multi-tier application can receive monitoring services via a shared + network provided by a service provider. + + + Log in to the &PRODUCT; UI as an administrator. + + + In the left navigation, choose Instances. + + + Click Add Instance. + + + Select a zone. + + + Select a template or ISO, then follow the steps in the wizard. + + + Ensure that the hardware you have allows starting the selected service offering. + + + Under Networks, select the desired networks for the VM you are launching. + You can deploy a VM to a VPC tier and multiple shared networks. + + + + + + addvm-tier-sharednw.png: adding a VM to a VPC tier and shared network. + + + + + Click Next, review the configuration and click Launch. + Your VM will be deployed to the selected VPC tier and shared network. + + +
diff --git a/docs/en-US/add-vpc.xml b/docs/en-US/add-vpc.xml index 8c088a0e1fd..b8034c4b4c8 100644 --- a/docs/en-US/add-vpc.xml +++ b/docs/en-US/add-vpc.xml @@ -67,9 +67,14 @@ assign a special domain name, specify the DNS suffix. This parameter is applied to all the tiers within the VPC. That implies, all the tiers you create in the VPC belong to the same DNS domain. If the parameter is not specified, a DNS domain name is generated - automatically. + automatically. + + + Public Load Balancer Provider: You have two + options: VPC Virtual Router and Netscaler. + Click OK.
diff --git a/docs/en-US/configure-vpc.xml b/docs/en-US/configure-vpc.xml index 45237d21cbb..e0e2ee93f19 100644 --- a/docs/en-US/configure-vpc.xml +++ b/docs/en-US/configure-vpc.xml @@ -26,6 +26,7 @@ + diff --git a/docs/en-US/creating-vms.xml b/docs/en-US/creating-vms.xml index 18995979a80..86d05d3f7bc 100644 --- a/docs/en-US/creating-vms.xml +++ b/docs/en-US/creating-vms.xml @@ -21,35 +21,70 @@ under the License. -->
- Creating VMs - Virtual machines are usually created from a template. Users can also create blank virtual machines. A blank virtual machine is a virtual machine without an OS template. Users can attach an ISO file and install the OS from the CD/DVD-ROM. - You can create a VM without starting it. You can determine whether the VM needs to be started as part of the VM deployment. A request parameter, startVM, in the deployVm API provides this feature. For more information, see the Developer's Guide - To create a VM from a template: - - Log in to the &PRODUCT; UI as an administrator or user. - In the left navigation bar, click Instances. - - Click Add Instance. - - - Select a zone. - - Select a template, then follow the steps in the wizard. For more information about how the templates came to be in this list, see . - Be sure that the hardware you have allows starting the selected service offering. - Click Submit and your VM will be created and started. - For security reason, the internal name of the VM is visible only to the root admin. - - - To create a VM from an ISO: - (XenServer) Windows VMs running on XenServer require PV drivers, which may be provided in the template or added after the VM is created. The PV drivers are necessary for essential management functions such as mounting additional volumes and ISO images, live migration, and graceful shutdown. - - Log in to the &PRODUCT; UI as an administrator or user. - In the left navigation bar, click Instances. - Click Add Instance. - Select a zone. - Select ISO Boot, and follow the steps in the wizard. - Click Submit and your VM will be created and started. - - + Creating VMs + Virtual machines are usually created from a template. Users can also create blank virtual + machines. A blank virtual machine is a virtual machine without an OS template. Users can attach + an ISO file and install the OS from the CD/DVD-ROM. + + You can create a VM without starting it. You can determine whether the VM needs to be + started as part of the VM deployment. A request parameter, startVM, in the deployVm API + provides this feature. For more information, see the Developer's Guide + + To create a VM from a template: + + + Log in to the &PRODUCT; UI as an administrator or user. + + + In the left navigation bar, click Instances. + + + Click Add Instance. + + + Select a zone. + + + Select a template, then follow the steps in the wizard. For more information about how + the templates came to be in this list, see . + + + Be sure that the hardware you have allows starting the selected service offering. + + + Click Submit and your VM will be created and started. + + For security reason, the internal name of the VM is visible only to the root + admin. + + + + To create a VM from an ISO: + + (XenServer) Windows VMs running on XenServer require PV drivers, which may be provided in + the template or added after the VM is created. The PV drivers are necessary for essential + management functions such as mounting additional volumes and ISO images, live migration, and + graceful shutdown. + + + + Log in to the &PRODUCT; UI as an administrator or user. + + + In the left navigation bar, click Instances. + + + Click Add Instance. + + + Select a zone. + + + Select ISO Boot, and follow the steps in the wizard. + + + Click Submit and your VM will be created and started. + + +
- diff --git a/docs/en-US/gsoc-dharmesh.xml b/docs/en-US/gsoc-dharmesh.xml index 5e2bf734d7f..01a77c70ab0 100644 --- a/docs/en-US/gsoc-dharmesh.xml +++ b/docs/en-US/gsoc-dharmesh.xml @@ -22,7 +22,7 @@ under the License. --> - +
Dharmesh's 2013 GSoC Proposal This chapter describes Dharmrsh's 2013 Google Summer of Code project within the &PRODUCT; ASF project. It is a copy paste of the submitted proposal.
@@ -146,4 +146,4 @@
- +
diff --git a/docs/en-US/gsoc-imduffy15.xml b/docs/en-US/gsoc-imduffy15.xml index 652152fcc4b..f78cb540704 100644 --- a/docs/en-US/gsoc-imduffy15.xml +++ b/docs/en-US/gsoc-imduffy15.xml @@ -22,7 +22,7 @@ under the License. --> - +
Ians's 2013 GSoC Proposal This chapter describes Ians 2013 Google Summer of Code project within the &PRODUCT; ASF project. It is a copy paste of the submitted proposal.
@@ -392,4 +392,4 @@ cool. I’m excited at the opportunity and learning experience that cloudstack are offering with this project.
- +
diff --git a/docs/en-US/gsoc-meng.xml b/docs/en-US/gsoc-meng.xml index 1de259dcac1..8ea2b4cfda7 100644 --- a/docs/en-US/gsoc-meng.xml +++ b/docs/en-US/gsoc-meng.xml @@ -22,7 +22,7 @@ under the License. --> - +
Meng's 2013 GSoC Proposal This chapter describes Meng's 2013 Google Summer of Code project within the &PRODUCT; ASF project. It is a copy paste of the submitted proposal.
@@ -232,4 +232,4 @@ http://chriskleban-internet.blogspot.com/2012/03/build-cloud-cloudstack-instance
- +
diff --git a/docs/en-US/gsoc-midsummer-dharmesh.xml b/docs/en-US/gsoc-midsummer-dharmesh.xml new file mode 100644 index 00000000000..69e417aeac5 --- /dev/null +++ b/docs/en-US/gsoc-midsummer-dharmesh.xml @@ -0,0 +1,28 @@ + + +%BOOK_ENTITIES; +]> + + + +
+ Mid-Summer Progress Updates + This section describes ... +
diff --git a/docs/en-US/gsoc-midsummer-ian.xml b/docs/en-US/gsoc-midsummer-ian.xml new file mode 100644 index 00000000000..348418f6969 --- /dev/null +++ b/docs/en-US/gsoc-midsummer-ian.xml @@ -0,0 +1,28 @@ + + +%BOOK_ENTITIES; +]> + + + +
+ Mid-Summer Progress Updates + This section describes ... +
diff --git a/docs/en-US/gsoc-midsummer-meng.xml b/docs/en-US/gsoc-midsummer-meng.xml new file mode 100644 index 00000000000..1ab07cb93b8 --- /dev/null +++ b/docs/en-US/gsoc-midsummer-meng.xml @@ -0,0 +1,28 @@ + + +%BOOK_ENTITIES; +]> + + + +
+ Mid-Summer Progress Updates + This section describes ... +
diff --git a/docs/en-US/gsoc-midsummer-nguyen.xml b/docs/en-US/gsoc-midsummer-nguyen.xml new file mode 100644 index 00000000000..96c2de10259 --- /dev/null +++ b/docs/en-US/gsoc-midsummer-nguyen.xml @@ -0,0 +1,28 @@ + + +%BOOK_ENTITIES; +]> + + + +
+ Mid-Summer Progress Updates + This section describes ... +
diff --git a/docs/en-US/gsoc-midsummer-shiva.xml b/docs/en-US/gsoc-midsummer-shiva.xml new file mode 100644 index 00000000000..67755c46eb1 --- /dev/null +++ b/docs/en-US/gsoc-midsummer-shiva.xml @@ -0,0 +1,28 @@ + + +%BOOK_ENTITIES; +]> + + + +
+ Mid-Summer Progress Updates + This section describes ... +
diff --git a/docs/en-US/gsoc-midsummer.xml b/docs/en-US/gsoc-midsummer.xml new file mode 100644 index 00000000000..ffb031a90c7 --- /dev/null +++ b/docs/en-US/gsoc-midsummer.xml @@ -0,0 +1,35 @@ + + +%BOOK_ENTITIES; +]> + + + + + Mid-Summer Progress Updates + This chapter describes the progress of each &PRODUCT; Google Summer of Code project. + + + + + + + + diff --git a/docs/en-US/gsoc-proposals.xml b/docs/en-US/gsoc-proposals.xml new file mode 100644 index 00000000000..7c4b50c6511 --- /dev/null +++ b/docs/en-US/gsoc-proposals.xml @@ -0,0 +1,35 @@ + + +%BOOK_ENTITIES; +]> + + + + + Google Summer of Code Proposals + This chapter contains the five proposals awarded to &PRODUCT; for the 2013 Google Summer of Code project. + + + + + + + + diff --git a/docs/en-US/gsoc-shiva.xml b/docs/en-US/gsoc-shiva.xml index 400af3c82f6..fe36d8ef050 100644 --- a/docs/en-US/gsoc-shiva.xml +++ b/docs/en-US/gsoc-shiva.xml @@ -22,7 +22,7 @@ under the License. --> - +
Shiva Teja's 2013 GSoC Proposal This chapter describes Shiva Teja's 2013 Google Summer of Code project within the &PRODUCT; ASF project. It is a copy paste of the submitted proposal.
@@ -67,4 +67,4 @@ About Me I am a 2nd year computer science undergrad studying at IIT Mandi, India. I've been using Python for an year and a half now. I've used Django, Flask and Tornado for my small projects. Along with Python, I use C++ for competitive programming. Recently, I fell in love with Haskell. I've always been fascinated about web technologies.
- +
diff --git a/docs/en-US/gsoc-tuna.xml b/docs/en-US/gsoc-tuna.xml index 0988734a465..aa9726f095c 100644 --- a/docs/en-US/gsoc-tuna.xml +++ b/docs/en-US/gsoc-tuna.xml @@ -22,7 +22,7 @@ under the License. --> - +
Nguyen's 2013 GSoC Proposal This chapter describes Nguyen 2013 Google Summer of Code project within the &PRODUCT; ASF project. It is a copy paste of the submitted proposal.
@@ -228,4 +228,4 @@
- +
diff --git a/docs/en-US/images/add-vpc.png b/docs/en-US/images/add-vpc.png index 6b9c6379d9f..f3348623416 100644 Binary files a/docs/en-US/images/add-vpc.png and b/docs/en-US/images/add-vpc.png differ diff --git a/docs/en-US/images/addvm-tier-sharednw.png b/docs/en-US/images/addvm-tier-sharednw.png new file mode 100644 index 00000000000..e60205f7219 Binary files /dev/null and b/docs/en-US/images/addvm-tier-sharednw.png differ diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageStoreHelper.java b/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageStoreHelper.java index a2d61f91fbe..a64114691c8 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageStoreHelper.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageStoreHelper.java @@ -95,7 +95,7 @@ public class ImageStoreHelper { if (details != null) { Iterator keyIter = details.keySet().iterator(); while (keyIter.hasNext()) { - String key = keyIter.next(); + String key = keyIter.next().toString(); ImageStoreDetailVO detail = new ImageStoreDetailVO(); detail.setStoreId(store.getId()); detail.setName(key); diff --git a/patches/systemvm/debian/config/root/edithosts.sh b/patches/systemvm/debian/config/root/edithosts.sh index eaa82927803..513571e69d3 100755 --- a/patches/systemvm/debian/config/root/edithosts.sh +++ b/patches/systemvm/debian/config/root/edithosts.sh @@ -100,7 +100,7 @@ if [ $no_dhcp_release -eq 0 ] then #release previous dhcp lease if present logger -t cloud "edithosts: releasing $ipv4" - dhcp_release lo $ipv4 $(grep $ipv4 $DHCP_LEASES | awk '{print $2}') > /dev/null 2>&1 + dhcp_release eth0 $ipv4 $(grep $ipv4 $DHCP_LEASES | awk '{print $2}') > /dev/null 2>&1 logger -t cloud "edithosts: released $ipv4" fi diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java index e07df0b2ab9..c8e00da1f19 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java @@ -650,7 +650,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe Long newDynamicMemoryMin = vmSpec.getMinRam(); Long newDynamicMemoryMax = vmSpec.getMaxRam(); if (staticMemoryMin > newDynamicMemoryMin || newDynamicMemoryMax > staticMemoryMax) { - throw new CloudRuntimeException("Cannot scale up the vm because of memory constraint violation: 0 <= memory-static-min <= memory-dynamic-min <= memory-dynamic-max <= memory-static-max "); + throw new CloudRuntimeException("Cannot scale up the vm because of memory constraint violation: " + + "0 <= memory-static-min(" +staticMemoryMin+ ") <= memory-dynamic-min(" +newDynamicMemoryMin+ ") <= memory-dynamic-max(" +newDynamicMemoryMax+ ") <= memory-static-max(" +staticMemoryMax+ ")"); } vm.setMemoryDynamicRange(conn, newDynamicMemoryMin, newDynamicMemoryMax); diff --git a/server/src/com/cloud/capacity/CapacityManagerImpl.java b/server/src/com/cloud/capacity/CapacityManagerImpl.java index 108de3bb077..101902c9b74 100755 --- a/server/src/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/com/cloud/capacity/CapacityManagerImpl.java @@ -501,8 +501,8 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, @Override public long getAllocatedPoolCapacity(StoragePoolVO pool, VMTemplateVO templateForVmCreation){ - // Get size for all the volumes - Pair sizes = _volumeDao.getCountAndTotalByPool(pool.getId()); + // Get size for all the non-destroyed volumes + Pair sizes = _volumeDao.getNonDestroyedCountAndTotalByPool(pool.getId()); long totalAllocatedSize = sizes.second() + sizes.first() * _extraBytesPerVolume; // Get size for VM Snapshots diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java index dac6a3a42e7..810c242689b 100755 --- a/server/src/com/cloud/network/NetworkManagerImpl.java +++ b/server/src/com/cloud/network/NetworkManagerImpl.java @@ -2976,7 +2976,7 @@ public class NetworkManagerImpl extends ManagerBase implements NetworkManager, L public void run() { try { List shutdownList = new ArrayList(); - long currentTime = System.currentTimeMillis() >> 10; + long currentTime = System.currentTimeMillis() / 1000 ; HashMap stillFree = new HashMap(); List networkIds = _networksDao.findNetworksToGarbageCollect(); diff --git a/server/src/com/cloud/network/NetworkModelImpl.java b/server/src/com/cloud/network/NetworkModelImpl.java index 21ef8bcf0ea..407bf705826 100755 --- a/server/src/com/cloud/network/NetworkModelImpl.java +++ b/server/src/com/cloud/network/NetworkModelImpl.java @@ -2143,6 +2143,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel { // network rules. So prevent network GC. if (secondaryIps != null && !secondaryIps.isEmpty() && networkIsConfiguredForExternalNetworking(network.getDataCenterId(), networkId)) { + return false; } return true; diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java index 83468718fe6..05df742400f 100755 --- a/server/src/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/com/cloud/network/NetworkServiceImpl.java @@ -3903,8 +3903,11 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { s_logger.debug("Created private network " + privateNetwork); } else { s_logger.debug("Private network already exists: " + privateNetwork); - throw new InvalidParameterValueException("Private network for the vlan: " + vlan + " and cidr "+ cidr +" already exists " + - " in zone " + _configMgr.getZone(pNtwk.getDataCenterId()).getName()); + //Do not allow multiple private gateways with same Vlan within a VPC + if(vpcId.equals(privateNetwork.getVpcId())){ + throw new InvalidParameterValueException("Private network for the vlan: " + vlan + " and cidr "+ cidr +" already exists " + + "for Vpc "+vpcId+" in zone " + _configMgr.getZone(pNtwk.getDataCenterId()).getName()); + } } //add entry to private_ip_address table diff --git a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java index 470d9b850b1..6e0d0d77ed3 100755 --- a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java +++ b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java @@ -503,8 +503,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements /* Validation : check for the multiple policies to the rule id */ List stickinessPolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId( cmd.getLbRuleId(), false); - if (stickinessPolicies.size() > 0) { - throw new InvalidParameterValueException("Failed to create Stickiness policy: Already policy attached " + if (stickinessPolicies.size() > 1) { + throw new InvalidParameterValueException("Failed to create Stickiness policy: Already two policies attached " + cmd.getLbRuleId()); } return true; @@ -651,14 +651,25 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @ActionEvent(eventType = EventTypes.EVENT_LB_STICKINESSPOLICY_CREATE, eventDescription = "Apply Stickinesspolicy to load balancer ", async = true) public boolean applyLBStickinessPolicy(CreateLBStickinessPolicyCmd cmd) { boolean success = true; + FirewallRule.State backupState = null; + long oldStickinessPolicyId = 0; LoadBalancerVO loadBalancer = _lbDao.findById(cmd.getLbRuleId()); if (loadBalancer == null) { throw new InvalidParameterException("Invalid Load balancer Id:" + cmd.getLbRuleId()); } - FirewallRule.State backupState = loadBalancer.getState(); - loadBalancer.setState(FirewallRule.State.Add); - _lbDao.persist(loadBalancer); + List stickinessPolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId(cmd.getLbRuleId(), false); + for (LBStickinessPolicyVO stickinessPolicy: stickinessPolicies) { + if (stickinessPolicy.getId() == cmd.getEntityId()) { + backupState = loadBalancer.getState(); + loadBalancer.setState(FirewallRule.State.Add); + _lbDao.persist(loadBalancer); + } else { + oldStickinessPolicyId = stickinessPolicy.getId(); + stickinessPolicy.setRevoke(true); + _lb2stickinesspoliciesDao.persist(stickinessPolicy); + } + } try { applyLoadBalancerConfig(cmd.getLbRuleId()); } catch (ResourceUnavailableException e) { @@ -667,10 +678,25 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (isRollBackAllowedForProvider(loadBalancer)) { loadBalancer.setState(backupState); _lbDao.persist(loadBalancer); + deleteLBStickinessPolicy(cmd.getEntityId(), false); s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " lb state rolback while creating sticky policy"); + } else { + deleteLBStickinessPolicy(cmd.getEntityId(), false); + if (oldStickinessPolicyId != 0) { + LBStickinessPolicyVO stickinessPolicy = _lb2stickinesspoliciesDao.findById(oldStickinessPolicyId); + stickinessPolicy.setRevoke(false); + _lb2stickinesspoliciesDao.persist(stickinessPolicy); + try { + if (backupState.equals(FirewallRule.State.Active)) + applyLoadBalancerConfig(cmd.getLbRuleId()); + } catch (ResourceUnavailableException e1) { + } finally { + loadBalancer.setState(backupState); + _lbDao.persist(loadBalancer); + } + } } - deleteLBStickinessPolicy(cmd.getEntityId(), false); success = false; } @@ -759,7 +785,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements success = false; } } else { - _lb2stickinesspoliciesDao.remove(stickinessPolicy.getLoadBalancerId()); + _lb2stickinesspoliciesDao.expunge(stickinessPolicyId); } return success; } @@ -1690,7 +1716,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Override public List getStickinessPolicies(long lbId) { List stickinessPolicies = new ArrayList(); - List sDbpolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId(lbId); + List sDbpolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId(lbId, false); for (LBStickinessPolicyVO sDbPolicy : sDbpolicies) { LbStickinessPolicy sPolicy = new LbStickinessPolicy(sDbPolicy.getMethodName(), sDbPolicy.getParams(), diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java index 054ac40e01b..41c6ad74b96 100755 --- a/server/src/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/com/cloud/resource/ResourceManagerImpl.java @@ -472,7 +472,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, long clusterId = 0; ClusterVO cluster = new ClusterVO(dcId, podId, clusterName); - cluster.setHypervisorType(cmd.getHypervisor()); + cluster.setHypervisorType(hypervisorType.toString()); cluster.setClusterType(clusterType); cluster.setAllocationState(allocationState); diff --git a/test/integration/smoke/test_loadbalance.py b/test/integration/smoke/test_loadbalance.py new file mode 100644 index 00000000000..7bf560cd71a --- /dev/null +++ b/test/integration/smoke/test_loadbalance.py @@ -0,0 +1,598 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from marvin.cloudstackTestCase import * +from marvin.cloudstackAPI import * +from marvin import remoteSSHClient +from marvin.integration.lib.utils import * +from marvin.integration.lib.base import * +from marvin.integration.lib.common import * +from nose.plugins.attrib import attr +#Import System modules +import time + +_multiprocess_shared_ = True + +class Services: + """Test Network Services + """ + + def __init__(self): + self.services = { + "ostype": "CentOS 5.3 (64-bit)", + # Cent OS 5.3 (64 bit) + "lb_switch_wait": 10, + # Time interval after which LB switches the requests + "sleep": 60, + "timeout":10, + "network_offering": { + "name": 'Test Network offering', + "displaytext": 'Test Network offering', + "guestiptype": 'Isolated', + "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding', + "traffictype": 'GUEST', + "availability": 'Optional', + "serviceProviderList" : { + "Dhcp": 'VirtualRouter', + "Dns": 'VirtualRouter', + "SourceNat": 'VirtualRouter', + "PortForwarding": 'VirtualRouter', + }, + }, + "network": { + "name": "Test Network", + "displaytext": "Test Network", + }, + "service_offering": { + "name": "Tiny Instance", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 100, + # in MHz + "memory": 256, + # In MBs + }, + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + "password": "password", + }, + "server": + { + "displayname": "Small Instance", + "username": "root", + "password": "password", + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "ssh_port": 22, + "protocol": 'TCP', + }, + "natrule": + { + "privateport": 22, + "publicport": 2222, + "protocol": "TCP" + }, + "lbrule": + { + "name": "SSH", + "alg": "roundrobin", + # Algorithm used for load balancing + "privateport": 22, + "publicport": 2222, + "protocol": 'TCP' + } + } + +class TestLoadBalance(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + + cls.api_client = super(TestLoadBalance, cls).getClsTestClient().getApiClient() + cls.services = Services().services + # Get Zone, Domain and templates + cls.domain = get_domain(cls.api_client, cls.services) + cls.zone = get_zone(cls.api_client, cls.services) + template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) + cls.services["server"]["zoneid"] = cls.zone.id + + #Create an account, network, VM and IP addresses + cls.account = Account.create( + cls.api_client, + cls.services["account"], + admin=True, + domainid=cls.domain.id + ) + cls.service_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offering"] + ) + cls.vm_1 = VirtualMachine.create( + cls.api_client, + cls.services["server"], + templateid=template.id, + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id + ) + cls.vm_2 = VirtualMachine.create( + cls.api_client, + cls.services["server"], + templateid=template.id, + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id + ) + cls.vm_3 = VirtualMachine.create( + cls.api_client, + cls.services["server"], + templateid=template.id, + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id + ) + cls.non_src_nat_ip = PublicIPAddress.create( + cls.api_client, + cls.account.name, + cls.zone.id, + cls.account.domainid, + cls.services["server"] + ) + # Open up firewall port for SSH + cls.fw_rule = FireWallRule.create( + cls.api_client, + ipaddressid=cls.non_src_nat_ip.ipaddress.id, + protocol=cls.services["lbrule"]["protocol"], + cidrlist=['0.0.0.0/0'], + startport=cls.services["lbrule"]["publicport"], + endport=cls.services["lbrule"]["publicport"] + ) + cls._cleanup = [ + cls.account, + cls.service_offering + ] + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.cleanup = [] + return + + def tearDown(self): + cleanup_resources(self.apiclient, self.cleanup) + return + + @classmethod + def tearDownClass(cls): + cleanup_resources(cls.api_client, cls._cleanup) + return + + def try_ssh(self, src_nat_ip_addr, hostnames): + try: + self.debug( + "SSH into VM (IPaddress: %s) & NAT Rule (Public IP: %s)" % + (self.vm_1.ipaddress, src_nat_ip_addr.ipaddress) + ) + # If Round Robin Algorithm is chosen, + # each ssh command should alternate between VMs + + ssh_1 = remoteSSHClient( + src_nat_ip_addr.ipaddress, + self.services['lbrule']["publicport"], + self.vm_1.username, + self.vm_1.password + ) + hostnames.append(ssh_1.execute("hostname")[0]) + self.debug(hostnames) + except Exception as e: + self.fail("%s: SSH failed for VM with IP Address: %s" % + (e, src_nat_ip_addr.ipaddress)) + time.sleep(self.services["lb_switch_wait"]) + return + + @attr(tags = ["advanced", "advancedns", "smoke", "needle"]) + def test_01_create_lb_rule_src_nat(self): + """Test to create Load balancing rule with source NAT""" + + # Validate the Following: + #1. listLoadBalancerRules should return the added rule + #2. attempt to ssh twice on the load balanced IP + #3. verify using the hostname of the VM + # that round robin is indeed happening as expected + src_nat_ip_addrs = PublicIPAddress.list( + self.apiclient, + account=self.account.name, + domainid=self.account.domainid + ) + self.assertEqual( + isinstance(src_nat_ip_addrs, list), + True, + "Check list response returns a valid list" + ) + src_nat_ip_addr = src_nat_ip_addrs[0] + + # Check if VM is in Running state before creating LB rule + vm_response = VirtualMachine.list( + self.apiclient, + account=self.account.name, + domainid=self.account.domainid + ) + + self.assertEqual( + isinstance(vm_response, list), + True, + "Check list VM returns a valid list" + ) + + self.assertNotEqual( + len(vm_response), + 0, + "Check Port Forwarding Rule is created" + ) + for vm in vm_response: + self.assertEqual( + vm.state, + 'Running', + "VM state should be Running before creating a NAT rule." + ) + + #Create Load Balancer rule and assign VMs to rule + lb_rule = LoadBalancerRule.create( + self.apiclient, + self.services["lbrule"], + src_nat_ip_addr.id, + accountid=self.account.name + ) + self.cleanup.append(lb_rule) + lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2]) + lb_rules = list_lb_rules( + self.apiclient, + id=lb_rule.id + ) + self.assertEqual( + isinstance(lb_rules, list), + True, + "Check list response returns a valid list" + ) + #verify listLoadBalancerRules lists the added load balancing rule + self.assertNotEqual( + len(lb_rules), + 0, + "Check Load Balancer Rule in its List" + ) + self.assertEqual( + lb_rules[0].id, + lb_rule.id, + "Check List Load Balancer Rules returns valid Rule" + ) + + # listLoadBalancerRuleInstances should list all + # instances associated with that LB rule + lb_instance_rules = list_lb_instances( + self.apiclient, + id=lb_rule.id + ) + self.assertEqual( + isinstance(lb_instance_rules, list), + True, + "Check list response returns a valid list" + ) + self.assertNotEqual( + len(lb_instance_rules), + 0, + "Check Load Balancer instances Rule in its List" + ) + self.debug("lb_instance_rules Ids: %s, %s" % ( + lb_instance_rules[0].id, + lb_instance_rules[1].id + )) + self.debug("VM ids: %s, %s" % (self.vm_1.id, self.vm_2.id)) + + self.assertIn( + lb_instance_rules[0].id, + [self.vm_1.id, self.vm_2.id], + "Check List Load Balancer instances Rules returns valid VM ID" + ) + + self.assertIn( + lb_instance_rules[1].id, + [self.vm_1.id, self.vm_2.id], + "Check List Load Balancer instances Rules returns valid VM ID" + ) + + + hostnames = [] + self.try_ssh(src_nat_ip_addr, hostnames) + self.try_ssh(src_nat_ip_addr, hostnames) + self.try_ssh(src_nat_ip_addr, hostnames) + self.try_ssh(src_nat_ip_addr, hostnames) + self.try_ssh(src_nat_ip_addr, hostnames) + + self.debug("Hostnames: %s" % str(hostnames)) + self.assertIn( + self.vm_1.name, + hostnames, + "Check if ssh succeeded for server1" + ) + self.assertIn( + self.vm_2.name, + hostnames, + "Check if ssh succeeded for server2" + ) + + #SSH should pass till there is a last VM associated with LB rule + lb_rule.remove(self.apiclient, [self.vm_2]) + + # making hostnames list empty + hostnames[:] = [] + + try: + self.debug("SSHing into IP address: %s after removing VM (ID: %s)" % + ( + src_nat_ip_addr.ipaddress, + self.vm_2.id + )) + + self.try_ssh(src_nat_ip_addr, hostnames) + self.assertIn( + self.vm_1.name, + hostnames, + "Check if ssh succeeded for server1" + ) + except Exception as e: + self.fail("%s: SSH failed for VM with IP Address: %s" % + (e, src_nat_ip_addr.ipaddress)) + + lb_rule.remove(self.apiclient, [self.vm_1]) + + with self.assertRaises(Exception): + self.debug("Removed all VMs, trying to SSH") + self.try_ssh(src_nat_ip_addr, hostnames) + return + + @attr(tags = ["advanced", "advancedns", "smoke"]) + def test_02_create_lb_rule_non_nat(self): + """Test to create Load balancing rule with non source NAT""" + + # Validate the Following: + #1. listLoadBalancerRules should return the added rule + #2. attempt to ssh twice on the load balanced IP + #3. verify using the hostname of the VM that + # round robin is indeed happening as expected + + #Create Load Balancer rule and assign VMs to rule + lb_rule = LoadBalancerRule.create( + self.apiclient, + self.services["lbrule"], + self.non_src_nat_ip.ipaddress.id, + accountid=self.account.name + ) + self.cleanup.append(lb_rule) + lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2]) + lb_rules = list_lb_rules( + self.apiclient, + id=lb_rule.id + ) + self.assertEqual( + isinstance(lb_rules, list), + True, + "Check list response returns a valid list" + ) + #verify listLoadBalancerRules lists the added load balancing rule + self.assertNotEqual( + len(lb_rules), + 0, + "Check Load Balancer Rule in its List" + ) + self.assertEqual( + lb_rules[0].id, + lb_rule.id, + "Check List Load Balancer Rules returns valid Rule" + ) + # listLoadBalancerRuleInstances should list + # all instances associated with that LB rule + lb_instance_rules = list_lb_instances( + self.apiclient, + id=lb_rule.id + ) + self.assertEqual( + isinstance(lb_instance_rules, list), + True, + "Check list response returns a valid list" + ) + self.assertNotEqual( + len(lb_instance_rules), + 0, + "Check Load Balancer instances Rule in its List" + ) + + self.assertIn( + lb_instance_rules[0].id, + [self.vm_1.id, self.vm_2.id], + "Check List Load Balancer instances Rules returns valid VM ID" + ) + + self.assertIn( + lb_instance_rules[1].id, + [self.vm_1.id, self.vm_2.id], + "Check List Load Balancer instances Rules returns valid VM ID" + ) + try: + hostnames = [] + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + + self.debug("Hostnames: %s" % str(hostnames)) + self.assertIn( + self.vm_1.name, + hostnames, + "Check if ssh succeeded for server1" + ) + self.assertIn( + self.vm_2.name, + hostnames, + "Check if ssh succeeded for server2" + ) + + #SSH should pass till there is a last VM associated with LB rule + lb_rule.remove(self.apiclient, [self.vm_2]) + self.debug("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" % + ( + self.non_src_nat_ip.ipaddress.ipaddress, + self.vm_2.id + )) + # Making host list empty + hostnames[:] = [] + + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + self.assertIn( + self.vm_1.name, + hostnames, + "Check if ssh succeeded for server1" + ) + self.debug("Hostnames after removing VM2: %s" % str(hostnames)) + except Exception as e: + self.fail("%s: SSH failed for VM with IP Address: %s" % + (e, self.non_src_nat_ip.ipaddress.ipaddress)) + + lb_rule.remove(self.apiclient, [self.vm_1]) + with self.assertRaises(Exception): + self.debug("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" % + ( + self.non_src_nat_ip.ipaddress.ipaddress, + self.vm_1.id + )) + self.try_ssh(self.non_src_nat_ip, hostnames) + return + + @attr(tags = ["advanced", "advancedns", "smoke"]) + def test_assign_and_removal_lb(self): + """Test for assign & removing load balancing rule""" + + # Validate: + #1. Verify list API - listLoadBalancerRules lists + # all the rules with the relevant ports + #2. listLoadBalancerInstances will list + # the instances associated with the corresponding rule. + #3. verify ssh attempts should pass as long as there + # is at least one instance associated with the rule + + # Check if VM is in Running state before creating LB rule + vm_response = VirtualMachine.list( + self.apiclient, + account=self.account.name, + domainid=self.account.domainid + ) + + self.assertEqual( + isinstance(vm_response, list), + True, + "Check list VM returns a valid list" + ) + + self.assertNotEqual( + len(vm_response), + 0, + "Check Port Forwarding Rule is created" + ) + for vm in vm_response: + self.assertEqual( + vm.state, + 'Running', + "VM state should be Running before creating a NAT rule." + ) + + lb_rule = LoadBalancerRule.create( + self.apiclient, + self.services["lbrule"], + self.non_src_nat_ip.ipaddress.id, + self.account.name + ) + lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2]) + + hostnames = [] + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + + self.debug("Hostnames: %s" % str(hostnames)) + self.assertIn( + self.vm_1.name, + hostnames, + "Check if ssh succeeded for server1" + ) + self.assertIn( + self.vm_2.name, + hostnames, + "Check if ssh succeeded for server2" + ) + #Removing VM and assigning another VM to LB rule + lb_rule.remove(self.apiclient, [self.vm_2]) + + # making hostnames list empty + hostnames[:] = [] + + try: + self.debug("SSHing again into IP address: %s with VM (ID: %s) added to LB rule" % + ( + self.non_src_nat_ip.ipaddress, + self.vm_1.id, + )) + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + + self.assertIn( + self.vm_1.name, + hostnames, + "Check if ssh succeeded for server1" + ) + except Exception as e: + self.fail("SSH failed for VM with IP: %s" % + self.non_src_nat_ip.ipaddress) + + lb_rule.assign(self.apiclient, [self.vm_3]) + +# # Making hostnames list empty + hostnames[:] = [] + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) + self.debug("Hostnames: %s" % str(hostnames)) + self.assertIn( + self.vm_1.name, + hostnames, + "Check if ssh succeeded for server1" + ) + self.assertIn( + self.vm_3.name, + hostnames, + "Check if ssh succeeded for server3" + ) + return diff --git a/test/integration/smoke/test_network.py b/test/integration/smoke/test_network.py index ca671210c18..dad5630eccb 100644 --- a/test/integration/smoke/test_network.py +++ b/test/integration/smoke/test_network.py @@ -585,432 +585,7 @@ class TestPortForwarding(cloudstackTestCase): return -class TestLoadBalancingRule(cloudstackTestCase): - @classmethod - def setUpClass(cls): - - cls.api_client = super(TestLoadBalancingRule, cls).getClsTestClient().getApiClient() - cls.services = Services().services - # Get Zone, Domain and templates - cls.domain = get_domain(cls.api_client, cls.services) - cls.zone = get_zone(cls.api_client, cls.services) - template = get_template( - cls.api_client, - cls.zone.id, - cls.services["ostype"] - ) - cls.services["server"]["zoneid"] = cls.zone.id - - #Create an account, network, VM and IP addresses - cls.account = Account.create( - cls.api_client, - cls.services["account"], - admin=True, - domainid=cls.domain.id - ) - cls.service_offering = ServiceOffering.create( - cls.api_client, - cls.services["service_offering"] - ) - cls.vm_1 = VirtualMachine.create( - cls.api_client, - cls.services["server"], - templateid=template.id, - accountid=cls.account.name, - domainid=cls.account.domainid, - serviceofferingid=cls.service_offering.id - ) - cls.vm_2 = VirtualMachine.create( - cls.api_client, - cls.services["server"], - templateid=template.id, - accountid=cls.account.name, - domainid=cls.account.domainid, - serviceofferingid=cls.service_offering.id - ) - cls.non_src_nat_ip = PublicIPAddress.create( - cls.api_client, - cls.account.name, - cls.zone.id, - cls.account.domainid, - cls.services["server"] - ) - # Open up firewall port for SSH - cls.fw_rule = FireWallRule.create( - cls.api_client, - ipaddressid=cls.non_src_nat_ip.ipaddress.id, - protocol=cls.services["lbrule"]["protocol"], - cidrlist=['0.0.0.0/0'], - startport=cls.services["lbrule"]["publicport"], - endport=cls.services["lbrule"]["publicport"] - ) - cls._cleanup = [ - cls.account, - cls.service_offering - ] - - def setUp(self): - self.apiclient = self.testClient.getApiClient() - self.cleanup = [] - return - - def tearDown(self): - cleanup_resources(self.apiclient, self.cleanup) - return - - @classmethod - def tearDownClass(cls): - cleanup_resources(cls.api_client, cls._cleanup) - return - - def try_ssh(self, src_nat_ip_addr, hostnames): - try: - self.debug( - "SSH into VM (IPaddress: %s) & NAT Rule (Public IP: %s)" % - (self.vm_1.ipaddress, src_nat_ip_addr.ipaddress) - ) - - ssh_1 = remoteSSHClient( - src_nat_ip_addr.ipaddress, - self.services['lbrule']["publicport"], - self.vm_1.username, - self.vm_1.password - ) - - # If Round Robin Algorithm is chosen, - # each ssh command should alternate between VMs - # hostnames = [ssh_1.execute("hostname")[0]] - hostnames.append(ssh_1.execute("hostname")[0]) - - except Exception as e: - self.fail("%s: SSH failed for VM with IP Address: %s" % - (e, src_nat_ip_addr.ipaddress)) - - time.sleep(self.services["lb_switch_wait"]) - return - - @attr(tags = ["advanced", "advancedns", "smoke"]) - def test_01_create_lb_rule_src_nat(self): - """Test to create Load balancing rule with source NAT""" - - # Validate the Following: - #1. listLoadBalancerRules should return the added rule - #2. attempt to ssh twice on the load balanced IP - #3. verify using the hostname of the VM - # that round robin is indeed happening as expected - - src_nat_ip_addrs = list_publicIP( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid - ) - self.assertEqual( - isinstance(src_nat_ip_addrs, list), - True, - "Check list response returns a valid list" - ) - src_nat_ip_addr = src_nat_ip_addrs[0] - - # Check if VM is in Running state before creating LB rule - vm_response = VirtualMachine.list( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid - ) - - self.assertEqual( - isinstance(vm_response, list), - True, - "Check list VM returns a valid list" - ) - - self.assertNotEqual( - len(vm_response), - 0, - "Check Port Forwarding Rule is created" - ) - for vm in vm_response: - self.assertEqual( - vm.state, - 'Running', - "VM state should be Running before creating a NAT rule." - ) - - #Create Load Balancer rule and assign VMs to rule - lb_rule = LoadBalancerRule.create( - self.apiclient, - self.services["lbrule"], - src_nat_ip_addr.id, - accountid=self.account.name - ) - self.cleanup.append(lb_rule) - - lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2]) - - lb_rules = list_lb_rules( - self.apiclient, - id=lb_rule.id - ) - self.assertEqual( - isinstance(lb_rules, list), - True, - "Check list response returns a valid list" - ) - #verify listLoadBalancerRules lists the added load balancing rule - self.assertNotEqual( - len(lb_rules), - 0, - "Check Load Balancer Rule in its List" - ) - self.assertEqual( - lb_rules[0].id, - lb_rule.id, - "Check List Load Balancer Rules returns valid Rule" - ) - - # listLoadBalancerRuleInstances should list all - # instances associated with that LB rule - lb_instance_rules = list_lb_instances( - self.apiclient, - id=lb_rule.id - ) - self.assertEqual( - isinstance(lb_instance_rules, list), - True, - "Check list response returns a valid list" - ) - self.assertNotEqual( - len(lb_instance_rules), - 0, - "Check Load Balancer instances Rule in its List" - ) - self.debug("lb_instance_rules Ids: %s, %s" % ( - lb_instance_rules[0].id, - lb_instance_rules[1].id - )) - self.debug("VM ids: %s, %s" % (self.vm_1.id, self.vm_2.id)) - - self.assertIn( - lb_instance_rules[0].id, - [self.vm_1.id, self.vm_2.id], - "Check List Load Balancer instances Rules returns valid VM ID" - ) - - self.assertIn( - lb_instance_rules[1].id, - [self.vm_1.id, self.vm_2.id], - "Check List Load Balancer instances Rules returns valid VM ID" - ) - - - hostnames = [] - self.try_ssh(src_nat_ip_addr, hostnames) - self.try_ssh(src_nat_ip_addr, hostnames) - self.try_ssh(src_nat_ip_addr, hostnames) - self.try_ssh(src_nat_ip_addr, hostnames) - self.try_ssh(src_nat_ip_addr, hostnames) - - self.debug("Hostnames: %s" % str(hostnames)) - self.assertIn( - self.vm_1.name, - hostnames, - "Check if ssh succeeded for server1" - ) - self.assertIn( - self.vm_2.name, - hostnames, - "Check if ssh succeeded for server2" - ) - - #SSH should pass till there is a last VM associated with LB rule - lb_rule.remove(self.apiclient, [self.vm_2]) - - # making hostnames list empty - hostnames[:] = [] - - try: - self.debug("SSHing into IP address: %s after removing VM (ID: %s)" % - ( - src_nat_ip_addr.ipaddress, - self.vm_2.id - )) - - self.try_ssh(src_nat_ip_addr, hostnames) - self.assertIn( - self.vm_1.name, - hostnames, - "Check if ssh succeeded for server1" - ) - except Exception as e: - self.fail("%s: SSH failed for VM with IP Address: %s" % - (e, src_nat_ip_addr.ipaddress)) - - lb_rule.remove(self.apiclient, [self.vm_1]) - - with self.assertRaises(Exception): - self.debug("Removed all VMs, trying to SSH") - ssh_1 = remoteSSHClient( - src_nat_ip_addr.ipaddress, - self.services['lbrule']["publicport"], - self.vm_1.username, - self.vm_1.password - ) - ssh_1.execute("hostname")[0] - return - - @attr(tags = ["advanced", "advancedns", "smoke"]) - def test_02_create_lb_rule_non_nat(self): - """Test to create Load balancing rule with source NAT""" - - # Validate the Following: - #1. listLoadBalancerRules should return the added rule - #2. attempt to ssh twice on the load balanced IP - #3. verify using the hostname of the VM that - # round robin is indeed happening as expected - - # Check if VM is in Running state before creating LB rule - vm_response = VirtualMachine.list( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid - ) - - self.assertEqual( - isinstance(vm_response, list), - True, - "Check list VM returns a valid list" - ) - - self.assertNotEqual( - len(vm_response), - 0, - "Check Port Forwarding Rule is created" - ) - for vm in vm_response: - self.assertEqual( - vm.state, - 'Running', - "VM state should be Running before creating a NAT rule." - ) - - #Create Load Balancer rule and assign VMs to rule - lb_rule = LoadBalancerRule.create( - self.apiclient, - self.services["lbrule"], - self.non_src_nat_ip.ipaddress.id, - accountid=self.account.name - ) - self.cleanup.append(lb_rule) - - lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2]) - - lb_rules = list_lb_rules( - self.apiclient, - id=lb_rule.id - ) - - self.assertEqual( - isinstance(lb_rules, list), - True, - "Check list response returns a valid list" - ) - #verify listLoadBalancerRules lists the added load balancing rule - self.assertNotEqual( - len(lb_rules), - 0, - "Check Load Balancer Rule in its List" - ) - self.assertEqual( - lb_rules[0].id, - lb_rule.id, - "Check List Load Balancer Rules returns valid Rule" - ) - # listLoadBalancerRuleInstances should list - # all instances associated with that LB rule - lb_instance_rules = list_lb_instances( - self.apiclient, - id=lb_rule.id - ) - self.assertEqual( - isinstance(lb_instance_rules, list), - True, - "Check list response returns a valid list" - ) - self.assertNotEqual( - len(lb_instance_rules), - 0, - "Check Load Balancer instances Rule in its List" - ) - - self.assertIn( - lb_instance_rules[0].id, - [self.vm_1.id, self.vm_2.id], - "Check List Load Balancer instances Rules returns valid VM ID" - ) - - self.assertIn( - lb_instance_rules[1].id, - [self.vm_1.id, self.vm_2.id], - "Check List Load Balancer instances Rules returns valid VM ID" - ) - try: - hostnames = [] - self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) - self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) - self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) - self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) - self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) - - self.debug("Hostnames: %s" % str(hostnames)) - self.assertIn( - self.vm_1.name, - hostnames, - "Check if ssh succeeded for server1" - ) - self.assertIn( - self.vm_2.name, - hostnames, - "Check if ssh succeeded for server2" - ) - - #SSH should pass till there is a last VM associated with LB rule - lb_rule.remove(self.apiclient, [self.vm_2]) - - self.debug("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" % - ( - self.non_src_nat_ip.ipaddress.ipaddress, - self.vm_2.id - )) - # Making host list empty - hostnames[:] = [] - - self.try_ssh(self.non_src_nat_ip.ipaddress, hostnames) - self.assertIn( - self.vm_1.name, - hostnames, - "Check if ssh succeeded for server1" - ) - self.debug("Hostnames after removing VM2: %s" % str(hostnames)) - except Exception as e: - self.fail("%s: SSH failed for VM with IP Address: %s" % - (e, self.non_src_nat_ip.ipaddress.ipaddress)) - - lb_rule.remove(self.apiclient, [self.vm_1]) - with self.assertRaises(Exception): - self.debug("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" % - ( - self.non_src_nat_ip.ipaddress.ipaddress, - self.vm_1.id - )) - ssh_1 = remoteSSHClient( - self.non_src_nat_ip.ipaddress.ipaddress, - self.services['lbrule']["publicport"], - self.vm_1.username, - self.vm_1.password - ) - ssh_1.execute("hostname")[0] - return class TestRebootRouter(cloudstackTestCase): @@ -1173,226 +748,6 @@ class TestRebootRouter(cloudstackTestCase): return -class TestAssignRemoveLB(cloudstackTestCase): - - def setUp(self): - self.apiclient = self.testClient.getApiClient() - self.services = Services().services - # Get Zone, Domain and templates - self.domain = get_domain(self.apiclient, self.services) - self.zone = get_zone(self.apiclient, self.services) - template = get_template( - self.apiclient, - self.zone.id, - self.services["ostype"] - ) - self.services["server"]["zoneid"] = self.zone.id - - #Create VMs, accounts - self.account = Account.create( - self.apiclient, - self.services["account"], - admin=True, - domainid=self.domain.id - ) - self.service_offering = ServiceOffering.create( - self.apiclient, - self.services["service_offering"] - ) - - self.vm_1 = VirtualMachine.create( - self.apiclient, - self.services["server"], - templateid=template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering.id - ) - - self.vm_2 = VirtualMachine.create( - self.apiclient, - self.services["server"], - templateid=template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering.id - ) - - self.vm_3 = VirtualMachine.create( - self.apiclient, - self.services["server"], - templateid=template.id, - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering.id - ) - - self.cleanup = [ - self.account, - self.service_offering - ] - return - - def tearDown(self): - cleanup_resources(self.apiclient, self.cleanup) - return - - def try_ssh(self, src_nat_ip_addr, hostnames): - try: - self.debug( - "SSH into VM (IPaddress: %s) & NAT Rule (Public IP: %s)" % - (self.vm_1.ipaddress, src_nat_ip_addr.ipaddress) - ) - - ssh_1 = remoteSSHClient( - src_nat_ip_addr.ipaddress, - self.services['lbrule']["publicport"], - self.vm_1.username, - self.vm_1.password - ) - - # If Round Robin Algorithm is chosen, - # each ssh command should alternate between VMs - # hostnames = [ssh_1.execute("hostname")[0]] - hostnames.append(ssh_1.execute("hostname")[0]) - - except Exception as e: - self.fail("%s: SSH failed for VM with IP Address: %s" % - (e, src_nat_ip_addr.ipaddress)) - - time.sleep(self.services["lb_switch_wait"]) - return - - @attr(tags = ["advanced", "advancedns", "smoke"]) - def test_assign_and_removal_lb(self): - """Test for assign & removing load balancing rule""" - - # Validate: - #1. Verify list API - listLoadBalancerRules lists - # all the rules with the relevant ports - #2. listLoadBalancerInstances will list - # the instances associated with the corresponding rule. - #3. verify ssh attempts should pass as long as there - # is at least one instance associated with the rule - - src_nat_ip_addrs = list_publicIP( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid - ) - self.assertEqual( - isinstance(src_nat_ip_addrs, list), - True, - "Check list response returns a valid list" - ) - self.non_src_nat_ip = src_nat_ip_addrs[0] - - # Open up firewall port for SSH - fw_rule = FireWallRule.create( - self.apiclient, - ipaddressid=self.non_src_nat_ip.id, - protocol=self.services["lbrule"]["protocol"], - cidrlist=['0.0.0.0/0'], - startport=self.services["lbrule"]["publicport"], - endport=self.services["lbrule"]["publicport"] - ) - - # Check if VM is in Running state before creating LB rule - vm_response = VirtualMachine.list( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid - ) - - self.assertEqual( - isinstance(vm_response, list), - True, - "Check list VM returns a valid list" - ) - - self.assertNotEqual( - len(vm_response), - 0, - "Check Port Forwarding Rule is created" - ) - for vm in vm_response: - self.assertEqual( - vm.state, - 'Running', - "VM state should be Running before creating a NAT rule." - ) - - lb_rule = LoadBalancerRule.create( - self.apiclient, - self.services["lbrule"], - self.non_src_nat_ip.id, - self.account.name - ) - lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2]) - - hostnames = [] - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - - self.debug("Hostnames: %s" % str(hostnames)) - self.assertIn( - self.vm_1.name, - hostnames, - "Check if ssh succeeded for server1" - ) - self.assertIn( - self.vm_2.name, - hostnames, - "Check if ssh succeeded for server2" - ) - #Removing VM and assigning another VM to LB rule - lb_rule.remove(self.apiclient, [self.vm_2]) - - # making hostnames list empty - hostnames[:] = [] - - try: - self.debug("SSHing again into IP address: %s with VM (ID: %s) added to LB rule" % - ( - self.non_src_nat_ip.ipaddress, - self.vm_1.id, - )) - self.try_ssh(self.non_src_nat_ip, hostnames) - - self.assertIn( - self.vm_1.name, - hostnames, - "Check if ssh succeeded for server1" - ) - except Exception as e: - self.fail("SSH failed for VM with IP: %s" % - self.non_src_nat_ip.ipaddress) - - lb_rule.assign(self.apiclient, [self.vm_3]) - - # Making hostnames list empty - hostnames[:] = [] - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - self.debug("Hostnames: %s" % str(hostnames)) - self.assertIn( - self.vm_1.name, - hostnames, - "Check if ssh succeeded for server1" - ) - self.assertIn( - self.vm_3.name, - hostnames, - "Check if ssh succeeded for server3" - ) - return - class TestReleaseIP(cloudstackTestCase): def setUp(self): diff --git a/test/integration/smoke/test_routers.py b/test/integration/smoke/test_routers.py index 6fa08e0ba79..22de29bed72 100644 --- a/test/integration/smoke/test_routers.py +++ b/test/integration/smoke/test_routers.py @@ -850,6 +850,7 @@ class TestRouterServices(cloudstackTestCase): gcwait = config[0] total_wait = int(gcinterval.value) + int(gcwait.value) + self.debug("Waiting %s seconds for network cleanup" % str(total_wait * 2)) # Wait for wait_time * 2 time to cleanup all the resources time.sleep(total_wait * 2) diff --git a/test/integration/smoke/test_scale_vm.py b/test/integration/smoke/test_scale_vm.py index fd8d61e58f7..1d1726dc90b 100644 --- a/test/integration/smoke/test_scale_vm.py +++ b/test/integration/smoke/test_scale_vm.py @@ -20,15 +20,13 @@ import marvin from marvin.cloudstackTestCase import * from marvin.cloudstackAPI import * -from marvin.remoteSSHClient import remoteSSHClient from marvin.integration.lib.utils import * from marvin.integration.lib.base import * from marvin.integration.lib.common import * from nose.plugins.attrib import attr -#Import System modules -import time _multiprocess_shared_ = True + class Services: """Test VM Life Cycle Services """ @@ -36,17 +34,17 @@ class Services: def __init__(self): self.services = { - "account": { - "email": "test@test.com", - "firstname": "Test", - "lastname": "User", - "username": "test", - # Random characters are appended in create account to - # ensure unique username generated each time - "password": "password", - }, - "small": - # Create a small virtual machine instance with disk offering + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + # Random characters are appended in create account to + # ensure unique username generated each time + "password": "password", + }, + "small": + # Create a small virtual machine instance with disk offering { "displayname": "testserver", "username": "root", # VM creds for SSH @@ -57,34 +55,34 @@ class Services: "publicport": 22, "protocol": 'TCP', }, - "service_offerings": + "service_offerings": { - "small": - { - # Small service offering ID to for change VM - # service offering from medium to small - "name": "SmallInstance", - "displaytext": "SmallInstance", - "cpunumber": 1, - "cpuspeed": 100, - "memory": 256, - }, - "big": - { - # Big service offering ID to for change VM - "name": "BigInstance", - "displaytext": "BigInstance", - "cpunumber": 1, - "cpuspeed": 100, - "memory": 512, - } - }, - #Change this - "template": { - "displaytext": "xs", - "name": "xs", - "passwordenabled": False, + "small": + { + # Small service offering ID to for change VM + # service offering from medium to small + "name": "SmallInstance", + "displaytext": "SmallInstance", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 256, + }, + "big": + { + # Big service offering ID to for change VM + "name": "BigInstance", + "displaytext": "BigInstance", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 512, + } }, + #Change this + "template": { + "displaytext": "xs", + "name": "xs", + "passwordenabled": False, + }, "sleep": 60, "timeout": 10, #Migrate VM to hostid @@ -92,8 +90,8 @@ class Services: # CentOS 5.3 (64-bit) } -class TestScaleVm(cloudstackTestCase): +class TestScaleVm(cloudstackTestCase): @classmethod def setUpClass(cls): cls.api_client = super(TestScaleVm, cls).getClsTestClient().getApiClient() @@ -105,45 +103,44 @@ class TestScaleVm(cloudstackTestCase): cls.services['mode'] = zone.networktype template = get_template( - cls.api_client, - zone.id, - cls.services["ostype"] - ) + cls.api_client, + zone.id, + cls.services["ostype"] + ) # Set Zones and disk offerings ?? cls.services["small"]["zoneid"] = zone.id cls.services["small"]["template"] = template.id # Create account, service offerings, vm. cls.account = Account.create( - cls.api_client, - cls.services["account"], - domainid=domain.id - ) + cls.api_client, + cls.services["account"], + domainid=domain.id + ) cls.small_offering = ServiceOffering.create( - cls.api_client, - cls.services["service_offerings"]["small"] - ) - + cls.api_client, + cls.services["service_offerings"]["small"] + ) + cls.big_offering = ServiceOffering.create( - cls.api_client, - cls.services["service_offerings"]["big"] - ) + cls.api_client, + cls.services["service_offerings"]["big"] + ) #create a virtual machine cls.virtual_machine = VirtualMachine.create( - cls.api_client, - cls.services["small"], - accountid=cls.account.name, - domainid=cls.account.domainid, - serviceofferingid=cls.small_offering.id, - mode=cls.services["mode"] - ) - #how does it work ?? + cls.api_client, + cls.services["small"], + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.small_offering.id, + mode=cls.services["mode"] + ) cls._cleanup = [ - cls.small_offering, - #cls.account - ] + cls.small_offering, + cls.account + ] @classmethod def tearDownClass(cls): @@ -161,65 +158,69 @@ class TestScaleVm(cloudstackTestCase): cleanup_resources(self.apiclient, self.cleanup) return - @attr(tags = ["xenserver", "advanced", "basic"]) + @attr(hypervisor="xenserver") + @attr(tags=["advanced", "basic"]) def test_01_scale_vm(self): """Test scale virtual machine """ # Validate the following # Scale up the vm and see if it scales to the new svc offering and is finally in running state - - - + self.debug("Scaling VM-ID: %s to service offering: %s and state %s" % ( - self.virtual_machine.id, - self.big_offering.id, - self.virtual_machine.state - )) - + self.virtual_machine.id, + self.big_offering.id, + self.virtual_machine.state + )) + cmd = scaleVirtualMachine.scaleVirtualMachineCmd() cmd.serviceofferingid = self.big_offering.id cmd.id = self.virtual_machine.id - self.apiclient.scaleVirtualMachine(cmd) + self.apiclient.scaleVirtualMachine(cmd) - list_vm_response = list_virtual_machines( - self.apiclient, - id=self.virtual_machine.id - ) + list_vm_response = VirtualMachine.list( + self.apiclient, + id=self.virtual_machine.id + ) self.assertEqual( - isinstance(list_vm_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(list_vm_response, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - list_vm_response, - None, - "Check virtual machine is listVirtualMachines" - ) + list_vm_response, + None, + "Check virtual machine is listVirtualMachines" + ) vm_response = list_vm_response[0] - self.assertEqual( - vm_response.id, - self.virtual_machine.id, - "Check virtual machine ID of scaled VM" - ) + vm_response.id, + self.virtual_machine.id, + "Check virtual machine ID of scaled VM" + ) + + # VirtualMachine should be updated to tell cloudstack it has PV tools + # available and successfully scaled. We will only mock that behaviour + # here but it is not expected in production since the VM scaling is not + # guaranteed until tools are installed, vm rebooted + self.virtual_machine.update(self.apiclient, isdynamicallyscalable='true') self.debug("Scaling VM-ID: %s from service offering: %s to new service offering %s and the response says %s" % ( - self.virtual_machine.id, - self.virtual_machine.serviceofferingid, - self.big_offering.id, - vm_response.serviceofferingid - )) + self.virtual_machine.id, + self.virtual_machine.serviceofferingid, + self.big_offering.id, + vm_response.serviceofferingid + )) self.assertEqual( - vm_response.serviceofferingid, - self.big_offering.id, - "Check service offering of the VM" - ) + vm_response.serviceofferingid, + self.big_offering.id, + "Check service offering of the VM" + ) self.assertEqual( - vm_response.state, - 'Running', - "Check the state of VM" - ) + vm_response.state, + 'Running', + "Check the state of VM" + ) return diff --git a/test/integration/smoke/test_service_offerings.py b/test/integration/smoke/test_service_offerings.py index 0213c04cb02..22273d766e4 100644 --- a/test/integration/smoke/test_service_offerings.py +++ b/test/integration/smoke/test_service_offerings.py @@ -20,7 +20,7 @@ import marvin from marvin.cloudstackTestCase import * from marvin.cloudstackAPI import * -from marvin.integration.lib.utils import * +from marvin.integration.lib.utils import isAlmostEqual from marvin.integration.lib.base import * from marvin.integration.lib.common import * from nose.plugins.attrib import attr @@ -431,9 +431,11 @@ class TestServiceOfferings(cloudstackTestCase): self.small_offering.cpuspeed, "Check CPU Speed for small offering" ) - self.assertAlmostEqual( - int(total_mem) / 1024, # In MBs - int(self.small_offering.memory), + self.assertTrue( + isAlmostEqual(int(int(total_mem) / 1024), + int(self.small_offering.memory), + range=20 + ), "Check Memory(kb) for small offering" ) return diff --git a/tools/marvin/marvin/integration/lib/utils.py b/tools/marvin/marvin/integration/lib/utils.py index 839ec89f4fd..4eca8baab20 100644 --- a/tools/marvin/marvin/integration/lib/utils.py +++ b/tools/marvin/marvin/integration/lib/utils.py @@ -35,11 +35,11 @@ def restart_mgmt_server(server): try: # Get the SSH client ssh = is_server_ssh_ready( - server["ipaddress"], - server["port"], - server["username"], - server["password"], - ) + server["ipaddress"], + server["port"], + server["username"], + server["password"], + ) result = ssh.execute("/etc/init.d/cloud-management restart") res = str(result) # Server Stop - OK @@ -57,21 +57,21 @@ def fetch_latest_mail(services, from_mail): # Login to mail server to verify email mail = imaplib.IMAP4_SSL(services["server"]) mail.login( - services["email"], - services["password"] - ) + services["email"], + services["password"] + ) mail.list() mail.select(services["folder"]) date = (datetime.date.today() - datetime.timedelta(1)).strftime("%d-%b-%Y") result, data = mail.uid( - 'search', - None, - '(SENTSINCE {date} HEADER FROM "{mail}")'.format( - date=date, - mail=from_mail - ) - ) + 'search', + None, + '(SENTSINCE {date} HEADER FROM "{mail}")'.format( + date=date, + mail=from_mail + ) + ) # Return False if email is not present if data == []: return False @@ -112,11 +112,11 @@ def is_server_ssh_ready(ipaddress, port, username, password, retries=50, keyPair while True: try: ssh = remoteSSHClient( - host=ipaddress, - port=port, - user=username, - passwd=password, - keyPairFileLocation=keyPairFileLocation) + host=ipaddress, + port=port, + user=username, + passwd=password, + keyPairFileLocation=keyPairFileLocation) except Exception as e: if loop_cnt == 0: raise e @@ -129,9 +129,9 @@ def is_server_ssh_ready(ipaddress, port, username, password, retries=50, keyPair def format_volume_to_ext3(ssh_client, device="/dev/sda"): """Format attached storage to ext3 fs""" cmds = [ - "echo -e 'n\np\n1\n\n\nw' | fdisk %s" % device, - "mkfs.ext3 %s1" % device, - ] + "echo -e 'n\np\n1\n\n\nw' | fdisk %s" % device, + "mkfs.ext3 %s1" % device, + ] for c in cmds: ssh_client.execute(c) @@ -143,15 +143,15 @@ def fetch_api_client(config_file='datacenterCfg'): testClientLogger = logging.getLogger("testClient") asyncTimeout = 3600 return cloudstackAPIClient.CloudStackAPIClient( - marvin.cloudstackConnection.cloudConnection( - mgt.mgtSvrIp, - mgt.port, - mgt.apiKey, - mgt.securityKey, - asyncTimeout, - testClientLogger - ) - ) + marvin.cloudstackConnection.cloudConnection( + mgt.mgtSvrIp, + mgt.port, + mgt.apiKey, + mgt.securityKey, + asyncTimeout, + testClientLogger + ) + ) def get_process_status(hostip, port, username, password, linklocalip, process, hypervisor=None): @@ -164,10 +164,10 @@ def get_process_status(hostip, port, username, password, linklocalip, process, h else: ssh_command = "ssh -i ~/.ssh/id_rsa.cloud -ostricthostkeychecking=no " - ssh_command = ssh_command + \ - "-oUserKnownHostsFile=/dev/null -p 3922 %s %s" % ( - linklocalip, - process) + ssh_command = ssh_command +\ + "-oUserKnownHostsFile=/dev/null -p 3922 %s %s" % ( + linklocalip, + process) # Double hop into router timeout = 5 @@ -183,3 +183,14 @@ def get_process_status(hostip, port, username, password, linklocalip, process, h time.sleep(5) timeout = timeout - 1 return res + + +def isAlmostEqual(first_digit, second_digit, range=0): + digits_equal_within_range = False + + try: + if ((first_digit - range) < second_digit < (first_digit + range)): + digits_equal_within_range = True + except Exception as e: + raise e + return digits_equal_within_range diff --git a/ui/scripts/lbStickyPolicy.js b/ui/scripts/lbStickyPolicy.js index 2d132dab57b..02ebabea77a 100644 --- a/ui/scripts/lbStickyPolicy.js +++ b/ui/scripts/lbStickyPolicy.js @@ -259,6 +259,32 @@ } }); }, + delete: function(stickyRuleID, complete, error) { + $.ajax({ + url: createURL('deleteLBStickinessPolicy'), + data: { + id: stickyRuleID + }, + success: function(json) { + cloudStack.ui.notifications.add( + { + desc: 'Remove previous LB sticky rule', + section: 'Network', + poll: pollAsyncJobResult, + _custom: { + jobId: json.deleteLBstickinessrruleresponse.jobid + } + }, + complete, {}, + error, {} + ); + }, + error: function(json) { + complete(); + cloudStack.dialog.notice({ message: parseXMLHttpResponse(json) }); + } + }); + }, recreate: function(stickyRuleID, lbRuleID, data, complete, error) { var addStickyPolicy = function() { cloudStack.lbStickyPolicy.actions.add( @@ -270,43 +296,10 @@ }; // Delete existing rule - if (stickyRuleID) { - $.ajax({ - url: createURL('deleteLBStickinessPolicy'), - data: { - id: stickyRuleID - }, - success: function(json) { - cloudStack.ui.notifications.add( - { - desc: 'Remove previous LB sticky rule', - section: 'Network', - poll: pollAsyncJobResult, - _custom: { - jobId: json.deleteLBstickinessrruleresponse.jobid - } - }, - function() { - if (data.methodname != 'None') { - addStickyPolicy(); - } else { - complete(); - } - }, {}, - error, {} - ); - }, - error: function(json) { - cloudStack.dialog.notice({ - message: parseXMLHttpResponse(json) - }); - error(); - } - }); - } else if (data.methodname != 'None') { + if (data.methodname != 'None') { addStickyPolicy(); } else { - complete(); + cloudStack.lbStickyPolicy.actions.delete(stickyRuleID, complete, error); } } } diff --git a/ui/scripts/system.js b/ui/scripts/system.js index e00cb7696e8..dd971edbb5f 100644 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -5946,116 +5946,113 @@ } }, - dedicateZone:{ - label: 'Dedicate Zone', - messages: { - confirm: function(args) { - return 'Do you really want to dedicate this zone to a domain/account? '; - }, - notification: function(args) { - return 'Zone Dedicated'; - } + dedicateZone:{ + label: 'Dedicate Zone', + messages: { + confirm: function(args) { + return 'Do you really want to dedicate this zone to a domain/account? '; }, - createForm:{ - title:'Dedicate Zone', - fields:{ - domainId:{ - label:'Domain', - validation:{required:true}, - select:function(args){ - $.ajax({ - url:createURL("listDomains&listAll=true"), - dataType:"json", - async:false, - success: function(json) { - var domainObjs= json.listdomainsresponse.domain; - var items=[]; - - $(domainObjs).each(function() { - items.push({id:this.id ,description:this.name }); - }); - - args.response.success({ - data: items - }); - } - - - }); + notification: function(args) { + return 'Zone Dedicated'; } }, - - accountId:{ - label:'Account', - docID:'helpAccountForDedication', - validation:{required:false} + createForm:{ + title:'Dedicate Zone', + fields:{ + domainId:{ + label:'Domain', + validation:{required:true}, + select:function(args){ + $.ajax({ + url:createURL("listDomains&listAll=true"), + dataType:"json", + async:false, + success: function(json) { + var domainObjs= json.listdomainsresponse.domain; + var items=[]; - } + $(domainObjs).each(function() { + items.push({id:this.id ,description:this.name }); + }); + + args.response.success({ + data: items + }); + } + }); + } + }, + accountId:{ + label:'Account', + docID:'helpAccountForDedication', + validation:{required:false} } - }, - - action: function(args) { - //EXPLICIT DEDICATION - var array2 = []; - if(args.data.accountId != "") - array2.push("&account=" +todb(args.data.accountId)); - - $.ajax({ - url: createURL("dedicateZone&zoneId=" + args.context.physicalResources[0].id + "&domainId=" +args.data.domainId + array2.join("") ), - dataType: "json", - success: function(json) { - var jid = json.dedicatezoneresponse.jobid; - args.response.success({ - _custom: - { jobId: jid - }, - notification: { - poll: pollAsyncJobResult - }, - actionFilter:zoneActionfilter - - }); } - }); - } - - }, - - releaseDedicatedZone:{ - label:'Release Dedicated Zone', - messages:{ - confirm: function(args) { - return 'Do you want to release this dedicated zone ?'; }, - notification: function(args) { - return 'Zone dedication released'; + action: function(args) { + //EXPLICIT DEDICATION + var array2 = []; + if(args.data.accountId != "") + array2.push("&account=" +todb(args.data.accountId)); + + $.ajax({ + url: createURL("dedicateZone&zoneId=" + + args.context.physicalResources[0].id + + "&domainId=" +args.data.domainId + array2.join("")), + dataType: "json", + success: function(json) { + var jid = json.dedicatezoneresponse.jobid; + args.response.success({ + _custom:{ + jobId: jid, + getActionFilter: function() { + return zoneActionfilter; + } + } + }); + } + }); + }, + notification: { + poll: pollAsyncJobResult } }, - action:function(args){ - $.ajax({ - url:createURL("releaseDedicatedZone&zoneid=" + args.context.physicalResources[0].id), - dataType:"json", - async:true, - success:function(json){ - var jid = json.releasededicatedzoneresponse.jobid; - args.response.success({ - _custom: - { jobId: jid - }, - notification: { - poll: pollAsyncJobResult - }, - actionFilter:zoneActionfilter - - }); - }, - error:function(json){ - args.response.error(parseXMLHttpResponse(json)); + releaseDedicatedZone:{ + label:'Release Dedicated Zone', + messages:{ + confirm: function(args) { + return 'Do you want to release this dedicated zone ?'; + }, + notification: function(args) { + return 'Zone dedication released'; } - }); - - } - }, + }, + action:function(args){ + $.ajax({ + url:createURL("releaseDedicatedZone&zoneid="+ + args.context.physicalResources[0].id), + dataType:"json", + async:true, + success:function(json){ + var jid = json.releasededicatedzoneresponse.jobid; + args.response.success({ + _custom:{ + jobId: jid, + getActionFilter: function() { + return zoneActionfilter; + } + } + }); + }, + error:function(json){ + args.response.error(parseXMLHttpResponse(json)); + } + }); + }, + notification: { + poll: pollAsyncJobResult + } + }, 'remove': { label: 'label.action.delete.zone', @@ -9569,120 +9566,115 @@ }, dedicate:{ - label: 'Dedicate Pod', - messages: { - confirm: function(args) { - return 'Do you really want to dedicate this pod to a domain/account? '; + label: 'Dedicate Pod', + messages: { + confirm: function(args) { + return 'Do you really want to dedicate this pod to a domain/account? '; + }, + notification: function(args) { + return 'Pod Dedicated'; + } }, - notification: function(args) { - return 'Pod Dedicated'; - } - }, - createForm:{ - title:'Dedicate Pod', - fields:{ - domainId:{ - label:'Domain', - validation:{required:true}, - select:function(args){ - $.ajax({ - url:createURL("listDomains&listAll=true"), - dataType:"json", - async:false, - success: function(json) { - var domainObjs= json.listdomainsresponse.domain; - var items=[]; + createForm:{ + title:'Dedicate Pod', + fields:{ + domainId:{ + label:'Domain', + validation:{required:true}, + select:function(args){ + $.ajax({ + url:createURL("listDomains&listAll=true"), + dataType:"json", + async:false, + success: function(json) { + var domainObjs= json.listdomainsresponse.domain; + var items=[]; - $(domainObjs).each(function() { - items.push({id:this.id ,description:this.name }); - }); + $(domainObjs).each(function() { + items.push({id:this.id ,description:this.name }); + }); - args.response.success({ - data: items - }); - } - - - }); - } - }, - - accountId:{ - label:'Account', - docID:'helpAccountForDedication', - validation:{required:false} - - } - - - } - }, - action: function(args) { + args.response.success({ + data: items + }); + } + }); + } + }, + accountId:{ + label:'Account', + docID:'helpAccountForDedication', + validation:{required:false} + } + } + }, + action: function(args) { //EXPLICIT DEDICATION - var array2 = []; - if(args.data.accountId != "") - array2.push("&account=" +todb(args.data.accountId)); + var array2 = []; + if(args.data.accountId != "") + array2.push("&account=" +todb(args.data.accountId)); $.ajax({ - url: createURL("dedicatePod&podId=" + args.context.pods[0].id + "&domainId=" +args.data.domainId + array2.join("")), - dataType: "json", - success: function(json) { - var jid = json.dedicatepodresponse.jobid; - args.response.success({ - _custom: - { jobId: jid - }, - notification: { - poll: pollAsyncJobResult - }, - actionFilter:podActionfilter - }); + url: createURL("dedicatePod&podId=" + + args.context.pods[0].id + + "&domainId=" +args.data.domainId + array2.join("")), + dataType: "json", + success: function(json) { + var jid = json.dedicatepodresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getActionFilter: function() { + return podActionfilter; + } + }, + }); }, - error:function(json){ - args.response.error(parseXMLHttpResponse(XMLHttpResponse)); - - } - }); - } - - }, - - release:{ - label:'Release Dedicated Pod', - messages:{ - confirm: function(args) { - return 'Do you want to release this dedicated pod ?'; + error:function(json){ + args.response.error(parseXMLHttpResponse(XMLHttpResponse)); + } + }); }, - notification: function(args) { - return 'Pod dedication released'; + notification: { + poll: pollAsyncJobResult } }, - action:function(args){ - $.ajax({ - url:createURL("releaseDedicatedPod&podid=" + args.context.pods[0].id), - dataType:"json", - async:true, - success:function(json){ - var jid = json.releasededicatedpodresponse.jobid; - args.response.success({ - _custom: - { jobId: jid - }, - notification: { - poll: pollAsyncJobResult - }, - actionFilter:podActionfilter - - }); - }, - error:function(json){ - args.response.error(parseXMLHttpResponse(json)); + release:{ + label:'Release Dedicated Pod', + messages:{ + confirm: function(args) { + return 'Do you want to release this dedicated pod ?'; + }, + notification: function(args) { + return 'Pod dedication released'; } - }); - - } - }, + }, + action:function(args){ + $.ajax({ + url:createURL("releaseDedicatedPod&podid=" + args.context.pods[0].id), + dataType:"json", + async:true, + success:function(json){ + var jid = json.releasededicatedpodresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getActionFilter: function() { + return podActionfilter; + } + } + }); + }, + error:function(json){ + args.response.error(parseXMLHttpResponse(json)); + } + }); + }, + notification: { + poll: pollAsyncJobResult + } + }, disable: { @@ -10381,36 +10373,36 @@ //EXPLICIT DEDICATION var array2 = []; - if(args.$form.find('.form-item[rel=isDedicated]').find('input[type=checkbox]').is(':Checked')== true){ + if(args.$form.find('.form-item[rel=isDedicated]').find('input[type=checkbox]').is(':Checked')== true) { if(args.data.accountId != "") array2.push("&account=" +todb(args.data.accountId)); + if(clusterId != null){ + $.ajax({ + url:createURL("dedicateCluster&clusterId=" +clusterId +"&domainId=" +args.data.domainId + array2.join("")), + dataType:"json", + success:function(json){ + var jid = json.dedicateclusterresponse.jobid; + args.response.success({ + _custom: + { jobId: jid + }, + notification: { + poll: pollAsyncJobResult + }, - if(clusterId != null){ - $.ajax({ - url:createURL("dedicateCluster&clusterId=" +clusterId +"&domainId=" +args.data.domainId + array2.join("")), - dataType:"json", - success:function(json){ - var jid = json.dedicateclusterresponse.jobid; - args.response.success({ - _custom: - { jobId: jid - }, - notification: { - poll: pollAsyncJobResult - }, + data:$.extend(item, {state:'Enabled'}) + }); - data:$.extend(item, {state:'Enabled'}) - }); - - }, - - error:function(json){ - args.response.error(parseXMLHttpResponse(XMLHttpResponse)); - } - }); + }, + error:function(json){ + args.response.error(parseXMLHttpResponse(XMLHttpResponse)); + } + }); + } + } else { + args.response.success({data: item}); } - } }, error: function(XMLHttpResponse) { var errorMsg = parseXMLHttpResponse(XMLHttpResponse); @@ -10550,116 +10542,111 @@ } }, - dedicate:{ - label: 'Dedicate Cluster', - messages: { - confirm: function(args) { - return 'Do you really want to dedicate this cluster to a domain/account? '; - }, - notification: function(args) { - return 'Cluster Dedicated'; - } + dedicate:{ + label: 'Dedicate Cluster', + messages: { + confirm: function(args) { + return 'Do you really want to dedicate this cluster to a domain/account? '; }, - createForm:{ - title:'Dedicate Cluster', - fields:{ - domainId:{ - label:'Domain', - validation:{required:true}, - select:function(args){ - $.ajax({ - url:createURL("listDomains&listAll=true"), - dataType:"json", - async:false, - success: function(json) { - var domainObjs= json.listdomainsresponse.domain; - var items=[]; - - $(domainObjs).each(function() { - items.push({id:this.id ,description:this.name }); - }); - - args.response.success({ - data: items - }); - } - - - }); - } - }, - - accountId:{ - label:'Account', - docID:'helpAccountForDedication', - validation:{required:false} - - } - - } - }, - action: function(args) { - //EXPLICIT DEDICATION - - var array2 = []; - if(args.data.accountId != "") - array2.push("&account=" +todb(args.data.accountId)); - - $.ajax({ - url: createURL("dedicateCluster&clusterId=" + args.context.clusters[0].id + "&domainId=" +args.data.domainId + array2.join("") ), - dataType: "json", - success: function(json) { - var jid = json.dedicateclusterresponse.jobid; - args.response.success({ - _custom: - { jobId: jid - }, - notification: { - poll: pollAsyncJobResult - }, - actionFilter:clusterActionfilter - }); - } - }); + notification: function(args) { + return 'Cluster Dedicated'; } - }, + createForm:{ + title:'Dedicate Cluster', + fields:{ + domainId:{ + label:'Domain', + validation:{required:true}, + select:function(args){ + $.ajax({ + url:createURL("listDomains&listAll=true"), + dataType:"json", + async:false, + success: function(json) { + var domainObjs= json.listdomainsresponse.domain; + var items=[]; - release:{ - label:'Release Dedicated Cluster', - messages:{ - confirm: function(args) { - return 'Do you want to release this dedicated cluster ?'; - }, - notification: function(args) { - return 'Cluster dedication released'; - } - }, - action:function(args){ - $.ajax({ - url:createURL("releaseDedicatedCluster&clusterid=" + args.context.clusters[0].id), - dataType:"json", - async:true, - success:function(json){ - var jid = json.releasededicatedclusterresponse.jobid; - args.response.success({ - _custom: - { jobId: jid - }, - notification: { - poll: pollAsyncJobResult - }, - actionFilter:clusterActionfilter + $(domainObjs).each(function() { + items.push({id:this.id ,description:this.name }); + }); - }); - }, - error:function(json){ - args.response.error(parseXMLHttpResponse(json)); + args.response.success({ + data: items + }); + } + }); } - }); - - } + }, + accountId:{ + label:'Account', + docID:'helpAccountForDedication', + validation:{required:false} + } + } }, + action: function(args) { + //EXPLICIT DEDICATION + var array2 = []; + if(args.data.accountId != "") + array2.push("&account=" +todb(args.data.accountId)); + $.ajax({ + url: createURL("dedicateCluster&clusterId=" + + args.context.clusters[0].id + + "&domainId=" +args.data.domainId + array2.join("") ), + dataType: "json", + success: function(json) { + var jid = json.dedicateclusterresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getActionFilter: function() { + return clusterActionfilter; + } + } + }); + } + }); + }, + notification: { + poll: pollAsyncJobResult + } + }, + release:{ + label:'Release Dedicated Cluster', + messages:{ + confirm: function(args) { + return 'Do you want to release this dedicated cluster ?'; + }, + notification: function(args) { + return 'Cluster dedication released'; + } + }, + action:function(args){ + $.ajax({ + url:createURL("releaseDedicatedCluster&clusterid=" + args.context.clusters[0].id), + dataType:"json", + async:true, + success:function(json){ + var jid = json.releasededicatedclusterresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getActionFilter: function() { + return clusterActionfilter; + } + } + }); + }, + error:function(json){ + args.response.error(parseXMLHttpResponse(json)); + } + }); + }, + notification: { + poll: pollAsyncJobResult + } + }, manage: { @@ -11568,9 +11555,8 @@ } }, - - dedicate:{ - label: 'Dedicate Host', + dedicate:{ + label: 'Dedicate Host', messages: { confirm: function(args) { return 'Do you really want to dedicate this host to a domain/account? '; @@ -11580,107 +11566,101 @@ } }, createForm:{ - title:'Dedicate Host', - fields:{ - domainId:{ + title:'Dedicate Host', + fields:{ + domainId:{ label:'Domain', validation:{required:true}, select:function(args){ - $.ajax({ - url:createURL("listDomains&listAll=true"), - dataType:"json", - async:false, - success: function(json) { - var domainObjs= json.listdomainsresponse.domain; - var items=[]; - - $(domainObjs).each(function() { - items.push({id:this.id ,description:this.name }); - }); - - args.response.success({ - data: items - }); - } + $.ajax({ + url:createURL("listDomains&listAll=true"), + dataType:"json", + async:false, + success: function(json) { + var domainObjs= json.listdomainsresponse.domain; + var items=[]; + $(domainObjs).each(function() { + items.push({id:this.id ,description:this.name }); + }); + args.response.success({ + data: items + }); + } }); - } - }, - accountId:{ - label:'Account', - docID:'helpAccountForDedication', - validation:{required:false} - + } + }, + accountId:{ + label:'Account', + docID:'helpAccountForDedication', + validation:{required:false} + } } - - - } }, - action: function(args) { - //EXPLICIT DEDICATION - var array2 = []; - if(args.data.accountId != "") - array2.push("&account=" +todb(args.data.accountId)); + //EXPLICIT DEDICATION + var array2 = []; + if(args.data.accountId != "") + array2.push("&account=" +todb(args.data.accountId)); - $.ajax({ - url: createURL("dedicateHost&hostId=" + args.context.hosts[0].id + "&domainId=" +args.data.domainId + array2.join("") ), + $.ajax({ + url: createURL("dedicateHost&hostId=" + + args.context.hosts[0].id + + "&domainId=" +args.data.domainId + array2.join("")), dataType: "json", success: function(json) { - var jid = json.dedicatehostresponse.jobid; - args.response.success({ - _custom: - { jobId: jid - }, - notification: { - poll: pollAsyncJobResult - }, - actionFilter:hostActionfilter + var jid = json.dedicatehostresponse.jobid; - - }); + args.response.success({ + _custom: { + jobId: jid, + getActionFilter: function() { + return hostActionfilter; + } + } + }); } }); + }, + notification: { + poll: pollAsyncJobResult } - }, - - - release:{ + release:{ label:'Release Dedicated Host', messages:{ - confirm: function(args) { + confirm: function(args) { return 'Do you want to release this dedicated host ?'; }, notification: function(args) { return 'Host dedication released'; } }, - action:function(args){ + action:function(args){ $.ajax({ - url:createURL("releaseDedicatedHost&hostid=" + args.context.hosts[0].id), - dataType:"json", - async:true, - success:function(json){ - var jid = json.releasededicatedhostresponse.jobid; - args.response.success({ - _custom: - { jobId: jid - }, - notification: { - poll: pollAsyncJobResult - }, - actionFilter:hostActionfilter - - }); + url:createURL("releaseDedicatedHost&hostid=" + args.context.hosts[0].id), + dataType:"json", + async:true, + success:function(json){ + var jid = json.releasededicatedhostresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getActionFilter: function() { + return hostActionfilter; + } + } + }); }, error:function(json){ args.response.error(parseXMLHttpResponse(json)); } }); - - } + }, + notification: { + poll: pollAsyncJobResult + } },