diff --git a/test/integration/component/test_vmware_drs.py b/test/integration/component/test_vmware_drs.py new file mode 100644 index 00000000000..ec0228bf62f --- /dev/null +++ b/test/integration/component/test_vmware_drs.py @@ -0,0 +1,603 @@ +# -*- encoding: utf-8 -*- +# Copyright 2012 Citrix Systems, Inc. Licensed under the +# Apache License, Version 2.0 (the "License"); you may not use this +# file except in compliance with the License. Citrix Systems, Inc. +# reserves all rights not expressly granted by the License. +# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Automatically generated by addcopyright.py at 04/03/2012 + +""" P1 for VMware DRS testing +""" +#Import Local Modules +import marvin +from marvin.cloudstackTestCase import * +from marvin.cloudstackAPI import * +from marvin.remoteSSHClient import remoteSSHClient +from integration.lib.utils import * +from integration.lib.base import * +from integration.lib.common import * +#Import System modules +import time + +class Services: + """Test vmware DRS services + """ + + def __init__(self): + self.services = { + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + # Random characters are appended in create account to + # ensure unique username generated each time + "password": "password", + }, + "virtual_machine": + { + "displayname": "testserver", + "username": "root", # VM creds for SSH + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "service_offering": + { + "name": "Tiny Instance", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 100, # in MHz + "memory": 128, # In MBs + }, + "anti_affinity": { + "host_1": "10.147.29.55", + "host_2": "10.147.29.61", + "vm_2": '73973255-9354-4b1a-b98f-30a3531fd16d', + "vm_1": '11d4e127-7e07-47bb-8ec9-6a9e9ac672ce', + # VM IDs created manually on host 1 and anti-affinity rules added + }, + "affinity": { + "host_1": "10.147.29.55", + "host_2": "10.147.29.61", + "vm_2": '73973255-9354-4b1a-b98f-30a3531fd16d', + "vm_1": '11d4e127-7e07-47bb-8ec9-6a9e9ac672ce', + # VM IDs created manually on host 1 and affinity rules added + }, + "sleep": 60, + "timeout": 10, + "full_host": "10.147.29.53", + "ostypeid": 'd96fc3f0-a1d3-4498-88aa-a7a1ca96c1bb', + # CentOS 5.3 (64-bit) + } + + +class TestVMPlacement(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + + cls.api_client = super( + TestVMPlacement, + cls + ).getClsTestClient().getApiClient() + cls.services = Services().services + # Get Zone, Domain and templates + cls.domain = get_domain( + cls.api_client, + cls.services + ) + cls.zone = get_zone( + cls.api_client, + cls.services + ) + cls.pod = get_pod( + cls.api_client, + zoneid=cls.zone.id, + services=cls.services + ) + cls.template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostypeid"] + ) + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + cls.services["virtual_machine"]["template"] = cls.template.id + + cls.service_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offering"], + offerha=True + ) + cls._cleanup = [ + cls.service_offering, + ] + return + + @classmethod + def tearDownClass(cls): + try: + #Cleanup resources used + cleanup_resources(cls.api_client, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.account = Account.create( + self.apiclient, + self.services["account"], + admin=True, + domainid=self.domain.id + ) + self.cleanup = [self.account] + return + + def tearDown(self): + try: + #Clean up, terminate the created accounts, domains etc + cleanup_resources(self.apiclient, self.cleanup) + self.testClient.close() + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def test_vm_creation_in_fully_automated_mode(self): + """ Test VM Creation in automation mode = Fully automated + This test requires following preconditions: + - There should be minimum 2 hosts added to cluster + - One of the host should be full of capacity. This host's name + should be specified in "full_host" configuration + - Another host should have some capacity remaining + - DRS Cluster is configured in "Fully automated" mode + """ + + # Validate the following + # 1. Create a new VM in a host which is almost fully utilized + # 2 Automatically places VM on the other host + # 3. VM state is running after deployment + + hosts = Host.list( + self.apiclient, + zoneid=self.zone.id, + resourcestate='Enabled', + type='Routing' + ) + self.assertEqual( + isinstance(hosts, list), + True, + "List hosts should return valid host response" + ) + self.assertGreaterEqual( + len(hosts), + 2, + "There must be two hosts present in a cluster" + ) + self.debug( + "Finding the host details of host: %s" % + self.services["full_host"]) + + host = [host for host in hosts if host.name == self.services["full_host"]][0] + + self.debug("Deploying VM in account: %s" % self.account.account.name) + # Spawn an instance in that network + virtual_machine = VirtualMachine.create( + self.apiclient, + self.services["virtual_machine"], + accountid=self.account.account.name, + domainid=self.account.account.domainid, + serviceofferingid=self.service_offering.id + ) + vms = VirtualMachine.list( + self.apiclient, + id=virtual_machine.id, + listall=True + ) + self.assertEqual( + isinstance(vms, list), + True, + "List VMs should return valid response for deployed VM" + ) + self.assertNotEqual( + len(vms), + 0, + "List VMs should return valid response for deployed VM" + ) + vm = vms[0] + self.assertEqual( + vm.state, + "Running", + "Deployed VM should be in RUnning state" + ) + self.assertNotEqual( + vm.hostid, + host.id, + "Host Ids of two should not match as one host is full" + ) + return + + +class TestAntiAffinityRules(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + + cls.api_client = super( + TestAntiAffinityRules, + cls + ).getClsTestClient().getApiClient() + cls.services = Services().services + # Get Zone, Domain and templates + cls.domain = get_domain( + cls.api_client, + cls.services + ) + cls.zone = get_zone( + cls.api_client, + cls.services + ) + cls.template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostypeid"] + ) + cls._cleanup = [] + return + + @classmethod + def tearDownClass(cls): + try: + #Cleanup resources used + cleanup_resources(cls.api_client, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + return + + def tearDown(self): + try: + #Clean up, terminate the created accounts, domains etc + cleanup_resources(self.apiclient, self.cleanup) + self.testClient.close() + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def test_vmware_anti_affinity(self): + """ Test Set up anti-affinity rules + + The test requires following pre-requisites + - VMWare cluster configured in fully automated mode + - There should be atleast 2 VMs deployed on one of the clusters + through cloudstack UI manually + - Anti affinity rule should be set on the 2 VMs deployed. + - Add host names to host_1,host_2 and IDs of VM 1,2 in the settings + class "anti_affinity" above. + """ + + # Validate the following + # 1. Deploy VMs on host 1 and 2 + # 2. Enable maintenance mode for host 1 + # 3. VM should be migrated to 3rd host + + hosts = Host.list( + self.apiclient, + zoneid=self.zone.id, + resourcestate='Enabled', + type='Routing' + ) + self.assertEqual( + isinstance(hosts, list), + True, + "List hosts should return valid host response" + ) + self.assertGreaterEqual( + len(hosts), + 3, + "There must be two hosts present in a cluster" + ) + self.debug( + "Finding the host details of host_1: %s" % + self.services["anti_affinity"]["host_1"]) + + host_1 = [host for host in hosts if host.name == self.services["anti_affinity"]["host_1"]][0] + + host_2 = [host for host in hosts if host.name == self.services["anti_affinity"]["host_2"]][0] + + vms = VirtualMachine.list( + self.apiclient, + id=self.services["anti_affinity"]["vm_1"], + listall=True + ) + self.assertEqual( + isinstance(vms, list), + True, + "List VMs should return valid response for deployed VM" + ) + self.assertNotEqual( + len(vms), + 0, + "List VMs should return valid response for deployed VM" + ) + virtual_machine_1 = vms[0] + + self.debug("VM State: %s" % virtual_machine_1.state) + self.assertEqual( + virtual_machine_1.state, + "Running", + "Deployed VM should be in RUnning state" + ) + + vms = VirtualMachine.list( + self.apiclient, + id=self.services["anti_affinity"]["vm_2"], + listall=True + ) + self.assertEqual( + isinstance(vms, list), + True, + "List VMs should return valid response for deployed VM" + ) + self.assertNotEqual( + len(vms), + 0, + "List VMs should return valid response for deployed VM" + ) + virtual_machine_2 = vms[0] + self.debug("VM %s State: %s" % ( + virtual_machine_2.name, + virtual_machine_2.state + )) + self.assertEqual( + virtual_machine_2.state, + "Running", + "Deployed VM should be in RUnning state" + ) + self.debug("Enabling maintenance mode on host_1: %s" % host_1.name) + + cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() + cmd.id = host_1.id + self.apiclient.prepareHostForMaintenance(cmd) + + timeout = self.services["timeout"] + while True: + hosts = Host.list( + self.apiclient, + zoneid=self.zone.id, + type='Routing', + id=host_1.id + ) + if isinstance(hosts, list): + host = hosts[0] + if host.resourcestate == 'Maintenance': + break + elif timeout == 0: + self.fail("Failed to put host: %s in maintenance mode" % host.name) + + time.sleep(self.services["sleep"]) + timeout = timeout - 1 + + vms = VirtualMachine.list( + self.apiclient, + id=virtual_machine_1.id, + listall=True + ) + self.assertEqual( + isinstance(vms, list), + True, + "List VMs should return valid response for deployed VM" + ) + self.assertNotEqual( + len(vms), + 0, + "List VMs should return valid response for deployed VM" + ) + vm = vms[0] + self.assertEqual( + vm.state, + "Running", + "Deployed VM should be in RUnning state" + ) + self.assertNotEqual( + vm.hostid, + host_2.id, + "The host name should not match with second host name" + ) + + self.debug("Canceling host maintenance for ID: %s" % host_1.id) + cmd = cancelHostMaintenance.cancelHostMaintenanceCmd() + cmd.id = host_1.id + self.apiclient.cancelHostMaintenance(cmd) + self.debug("Maintenance mode canceled for host: %s" % host_1.id) + return + + +class TestAffinityRules(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + + cls.api_client = super( + TestAffinityRules, + cls + ).getClsTestClient().getApiClient() + cls.services = Services().services + # Get Zone, Domain and templates + cls.domain = get_domain( + cls.api_client, + cls.services + ) + cls.zone = get_zone( + cls.api_client, + cls.services + ) + cls.template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostypeid"] + ) + cls._cleanup = [] + return + + @classmethod + def tearDownClass(cls): + try: + #Cleanup resources used + cleanup_resources(cls.api_client, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + return + + def tearDown(self): + try: + # Clean up, terminate the created accounts, domains etc + cleanup_resources(self.apiclient, self.cleanup) + self.testClient.close() + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def test_vmware_affinity(self): + """ Test Set up affinity rules + + The test requires following pre-requisites + - VMWare cluster configured in fully automated mode + - There should be atleast 2 VMs deployed on one of the clusters + through cloudstack UI manually + - Anti affinity rule should be set on the 2 VMs deployed. + - Add host names to host_1,host_2 and IDs of VM 1,2 in the settings + class "affinity" above. + """ + + # Validate the following + # 1. Deploy VMs 2 VMs on same hosts + # 2. Migrate one VM from one host to another + # 3. The second VM should also get migrated + + hosts = Host.list( + self.apiclient, + zoneid=self.zone.id, + resourcestate='Enabled', + type='Routing' + ) + self.assertEqual( + isinstance(hosts, list), + True, + "List hosts should return valid host response" + ) + self.assertGreaterEqual( + len(hosts), + 2, + "There must be two hosts present in a cluster" + ) + self.debug( + "Finding the host details of host_1: %s" % + self.services["affinity"]["host_1"]) + + host_1 = [host for host in hosts if host.name == self.services["affinity"]["host_1"]][0] + + host_2 = [host for host in hosts if host.name == self.services["affinity"]["host_2"]][0] + + vms = VirtualMachine.list( + self.apiclient, + id=self.services["affinity"]["vm_1"], + listall=True + ) + self.assertEqual( + isinstance(vms, list), + True, + "List VMs should return valid response for deployed VM" + ) + self.assertNotEqual( + len(vms), + 0, + "List VMs should return valid response for deployed VM" + ) + + virtual_machine_1 = vms[0] + self.assertEqual( + virtual_machine_1.state, + "Running", + "Deployed VM should be in RUnning state" + ) + + self.debug("Deploying VM on account: %s" % self.account.account.name) + + vms = VirtualMachine.list( + self.apiclient, + id=self.services["affinity"]["vm_2"], + listall=True + ) + self.assertEqual( + isinstance(vms, list), + True, + "List VMs should return valid response for deployed VM" + ) + self.assertNotEqual( + len(vms), + 0, + "List VMs should return valid response for deployed VM" + ) + virtual_machine_2 = vms[0] + self.assertEqual( + virtual_machine_2.state, + "Running", + "Deployed VM should be in RUnning state" + ) + + self.debug("Migrate VM from host_1 to host_2") + cmd = migrateVirtualMachine.migrateVirtualMachineCmd() + cmd.virtualmachineid = virtual_machine_2.id + cmd.hostid = host_2.id + self.apiclient.migrateVirtualMachine(cmd) + self.debug("Migrated VM from host_1 to host_2") + + vms = VirtualMachine.list( + self.apiclient, + hostid=host_2.id, + listall=True + ) + self.assertEqual( + isinstance(vms, list), + True, + "List VMs should return valid response for deployed VM" + ) + self.assertNotEqual( + len(vms), + 0, + "List VMs should return valid response for deployed VM" + ) + vmids = [vm.id for vm in vms] + self.assertIn( + virtual_machine_1.id, + vmids, + "VM 1 should be successfully migrated to host 2" + ) + self.assertIn( + virtual_machine_2.id, + vmids, + "VM 2 should be automatically migrated to host 2" + ) + return