From 7791454516a00ddf02ec1703cca57d5f94f90e35 Mon Sep 17 00:00:00 2001 From: rayeesn Date: Sat, 29 Jun 2013 15:07:15 -0700 Subject: [PATCH 1/8] Reduced resource usage in vpc test cases Signed-off-by: Prasanna Santhanam (cherry picked from commit e14f355a0bfd7d76ff953e39e1c5b4fee597f917) --- test/integration/component/test_vpc_network_lbrules.py | 4 ++-- test/integration/component/test_vpc_network_pfrules.py | 4 ++-- test/integration/component/test_vpc_network_staticnatrule.py | 4 ++-- test/integration/component/test_vpc_routers.py | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/test/integration/component/test_vpc_network_lbrules.py b/test/integration/component/test_vpc_network_lbrules.py index b0357fa8de1..865cf0e2a78 100644 --- a/test/integration/component/test_vpc_network_lbrules.py +++ b/test/integration/component/test_vpc_network_lbrules.py @@ -65,8 +65,8 @@ class Services: "name": "Tiny Instance", "displaytext": "Tiny Instance", "cpunumber": 1, - "cpuspeed": 1000, - "memory": 512, + "cpuspeed": 100, + "memory": 128, }, "network_offering": { "name": 'VPC Network offering', diff --git a/test/integration/component/test_vpc_network_pfrules.py b/test/integration/component/test_vpc_network_pfrules.py index b478b6a1780..ad7bbae7fe7 100644 --- a/test/integration/component/test_vpc_network_pfrules.py +++ b/test/integration/component/test_vpc_network_pfrules.py @@ -62,8 +62,8 @@ class Services: "name": "Tiny Instance", "displaytext": "Tiny Instance", "cpunumber": 1, - "cpuspeed": 1000, - "memory": 512, + "cpuspeed": 100, + "memory": 128, }, "network_offering": { "name": 'VPC Network offering', diff --git a/test/integration/component/test_vpc_network_staticnatrule.py b/test/integration/component/test_vpc_network_staticnatrule.py index c5d9e57434d..1410f5e50c5 100644 --- a/test/integration/component/test_vpc_network_staticnatrule.py +++ b/test/integration/component/test_vpc_network_staticnatrule.py @@ -61,8 +61,8 @@ class Services: "name": "Tiny Instance", "displaytext": "Tiny Instance", "cpunumber": 1, - "cpuspeed": 1000, - "memory": 512, + "cpuspeed": 100, + "memory": 128, }, "network_offering": { "name": 'VPC Network offering', diff --git a/test/integration/component/test_vpc_routers.py b/test/integration/component/test_vpc_routers.py index a8559e5cc6c..043a01b63c5 100644 --- a/test/integration/component/test_vpc_routers.py +++ b/test/integration/component/test_vpc_routers.py @@ -49,7 +49,7 @@ class Services: "displaytext": "Tiny Instance", "cpunumber": 1, "cpuspeed": 100, - "memory": 64, + "memory": 128, }, "service_offering_new": { "name": "Small Instance", From 8244ae3ab6c3438a02f110f49ecf84e57fadfa8a Mon Sep 17 00:00:00 2001 From: Prasanna Santhanam Date: Sun, 30 Jun 2013 12:50:32 +0530 Subject: [PATCH 2/8] Fix test_assign_vm to be discovered by nose testrunner the nosetests runner will not discover tests unless the tests start with the test_ prefix. When wrapping by the log_test_exceptions method the name of the method changes and is not test_ anymore causing the runner to skip the test. Included the __doc__ string as well so the runner can report results correctly. Signed-off-by: Prasanna Santhanam (cherry picked from commit 2a51c3e2c9e7fd7d96afb9b9fee7199329bb7fdf) --- test/integration/component/test_assign_vm.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/integration/component/test_assign_vm.py b/test/integration/component/test_assign_vm.py index 1dc93a81417..8bc98fec55c 100644 --- a/test/integration/component/test_assign_vm.py +++ b/test/integration/component/test_assign_vm.py @@ -40,13 +40,14 @@ from marvin.integration.lib.common import (get_domain, list_virtual_machines) def log_test_exceptions(func): - def _log_test_exceptions(self, *args, **kwargs): + def test_wrap_exception_log(self, *args, **kwargs): try: func(self, *args, **kwargs) except Exception as e: self.debug('Test %s Failed due to Exception=%s' % (func, e)) raise e - return _log_test_exceptions + test_wrap_exception_log.__doc__ = func.__doc__ + return test_wrap_exception_log class Services: """Test service data for:Change the ownershop of @@ -229,6 +230,7 @@ class TestVMOwnership(cloudstackTestCase): except Exception as e: self.debug("Warning! Exception in tearDown: %s" % e) + @attr(tags = ["advanced"]) @log_test_exceptions def test_01_move_across_different_domains(self): From 154c24e5616d4387f5254eff970f00e2e271b041 Mon Sep 17 00:00:00 2001 From: Prasanna Santhanam Date: Sun, 30 Jun 2013 13:20:27 +0530 Subject: [PATCH 3/8] Fix tab/space issues Several test failures occurred due to tab issues Signed-off-by: Prasanna Santhanam (cherry picked from commit c7315975d22e95f48eee929583f8ec3c30f65094) --- test/integration/component/test_accounts.py | 2 +- .../component/test_affinity_groups.py | 131 ++--- .../component/test_netscaler_configs.py | 8 +- .../component/test_shared_networks.py | 2 +- .../integration/component/test_vpc_routers.py | 76 ++- ...ploy_vms_with_varied_deploymentplanners.py | 48 +- test/integration/smoke/test_network.py | 173 ++++--- test/integration/smoke/test_vm_snapshots.py | 451 +++++++++--------- 8 files changed, 409 insertions(+), 482 deletions(-) diff --git a/test/integration/component/test_accounts.py b/test/integration/component/test_accounts.py index ee38c6ddc66..65c0c6ff49e 100644 --- a/test/integration/component/test_accounts.py +++ b/test/integration/component/test_accounts.py @@ -753,7 +753,7 @@ class TestServiceOfferingHierarchy(cloudstackTestCase): domainid=cls.domain_2.id ) - cls._cleanup = [ + cls._cleanup = [ cls.account_2, cls.domain_2, cls.service_offering, diff --git a/test/integration/component/test_affinity_groups.py b/test/integration/component/test_affinity_groups.py index 79e35e0d61d..44bf90c26dd 100644 --- a/test/integration/component/test_affinity_groups.py +++ b/test/integration/component/test_affinity_groups.py @@ -304,15 +304,14 @@ class TestListAffinityGroups(cloudstackTestCase): def tearDown(self): try: - cls.api_client = super(TestListAffinityGroups, cls).getClsTestClient().getApiClient() + self.api_client = super(TestListAffinityGroups, self).getClsTestClient().getApiClient() #Clean up, terminate the created templates - cleanup_resources(cls.api_client, cls.cleanup) + cleanup_resources(self.api_client, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) @classmethod def tearDownClass(cls): - try: cls.api_client = super(TestListAffinityGroups, cls).getClsTestClient().getApiClient() #Clean up, terminate the created templates @@ -327,10 +326,6 @@ class TestListAffinityGroups(cloudstackTestCase): api_client = self.api_client if aff_grp == None: self.services["host_anti_affinity_0"] - #if acc == None: - # acc = self.account.name - #if domainid == None: - # domainid = self.domain.id try: self.aff_grp.append(AffinityGroup.create(api_client, @@ -339,34 +334,25 @@ class TestListAffinityGroups(cloudstackTestCase): raise Exception("Error: Creation of Affinity Group failed : %s" %e) def create_vm_in_aff_grps(self, ag_list): - #try: - self.debug('Creating VM in AffinityGroup=%s' % ag_list[0]) - vm = VirtualMachine.create( - self.api_client, - self.services["virtual_machine"], - templateid=self.template.id, - #accountid=self.account.name, - #domainid=self.account.domainid, - serviceofferingid=self.service_offering.id, - affinitygroupnames=ag_list - ) - self.debug('Created VM=%s in Affinity Group=%s' % - (vm.id, ag_list[0])) - #except Exception: - #self.debug('Unable to create VM in a Affinity Group=%s' - # % ag_list[0]) - - list_vm = list_virtual_machines(self.api_client, id=vm.id) + self.debug('Creating VM in AffinityGroup=%s' % ag_list[0]) + vm = VirtualMachine.create( + self.api_client, + self.services["virtual_machine"], + templateid=self.template.id, + serviceofferingid=self.service_offering.id, + affinitygroupnames=ag_list + ) + self.debug('Created VM=%s in Affinity Group=%s' % + (vm.id, ag_list[0])) + list_vm = list_virtual_machines(self.api_client, id=vm.id) self.assertEqual(isinstance(list_vm, list), True, "Check list response returns a valid list") self.assertNotEqual(len(list_vm),0, "Check VM available in List Virtual Machines") - vm_response = list_vm[0] self.assertEqual(vm_response.state, 'Running', msg="VM is not in Running state") - return vm, vm_response.hostid def test_01_list_aff_grps_for_vm(self): @@ -543,11 +529,6 @@ class TestDeleteAffinityGroups(cloudstackTestCase): api_client = self.api_client if aff_grp == None: self.services["host_anti_affinity_0"] - #if acc == None: - # acc = self.account.name - #if domainid == None: - # domainid = self.domain.id - try: self.aff_grp.append(AffinityGroup.create(api_client, aff_grp, acc, domainid)) @@ -555,24 +536,18 @@ class TestDeleteAffinityGroups(cloudstackTestCase): raise Exception("Error: Creation of Affinity Group failed : %s" %e) def create_vm_in_aff_grps(self, ag_list): - #try: - self.debug('Creating VM in AffinityGroup=%s' % ag_list[0]) - vm = VirtualMachine.create( - self.api_client, - self.services["virtual_machine"], - templateid=self.template.id, - #accountid=self.account.name, - #domainid=self.account.domainid, - serviceofferingid=self.service_offering.id, - affinitygroupnames=ag_list - ) - self.debug('Created VM=%s in Affinity Group=%s' % - (vm.id, ag_list[0])) - #except Exception: - #self.debug('Unable to create VM in a Affinity Group=%s' - # % ag_list[0]) + self.debug('Creating VM in AffinityGroup=%s' % ag_list[0]) + vm = VirtualMachine.create( + self.api_client, + self.services["virtual_machine"], + templateid=self.template.id, + serviceofferingid=self.service_offering.id, + affinitygroupnames=ag_list + ) + self.debug('Created VM=%s in Affinity Group=%s' % + (vm.id, ag_list[0])) - list_vm = list_virtual_machines(self.api_client, id=vm.id) + list_vm = list_virtual_machines(self.api_client, id=vm.id) self.assertEqual(isinstance(list_vm, list), True, "Check list response returns a valid list") @@ -817,11 +792,6 @@ class TestUpdateVMAffinityGroups(cloudstackTestCase): api_client = self.api_client if aff_grp == None: self.services["host_anti_affinity_0"] - #if acc == None: - # acc = self.account.name - #if domainid == None: - # domainid = self.domain.id - try: self.aff_grp.append(AffinityGroup.create(api_client, aff_grp, acc, domainid)) @@ -829,24 +799,18 @@ class TestUpdateVMAffinityGroups(cloudstackTestCase): raise Exception("Error: Creation of Affinity Group failed : %s" %e) def create_vm_in_aff_grps(self, ag_list): - #try: - self.debug('Creating VM in AffinityGroup=%s' % ag_list[0]) - vm = VirtualMachine.create( - self.api_client, + self.debug('Creating VM in AffinityGroup=%s' % ag_list[0]) + vm = VirtualMachine.create( + self.api_client, self.services["virtual_machine"], templateid=self.template.id, - #accountid=self.account.name, - #domainid=self.account.domainid, serviceofferingid=self.service_offering.id, affinitygroupnames=ag_list ) - self.debug('Created VM=%s in Affinity Group=%s' % - (vm.id, ag_list[0])) - #except Exception: - #self.debug('Unable to create VM in a Affinity Group=%s' - # % ag_list[0]) + self.debug('Created VM=%s in Affinity Group=%s' % + (vm.id, ag_list[0])) - list_vm = list_virtual_machines(self.api_client, id=vm.id) + list_vm = list_virtual_machines(self.api_client, id=vm.id) self.assertEqual(isinstance(list_vm, list), True, "Check list response returns a valid list") @@ -996,7 +960,7 @@ class TestUpdateVMAffinityGroups(cloudstackTestCase): vm1.start(self.api_client) list_aff_grps = AffinityGroup.list(self.api_client, - virtualmachineid=vm.id) + virtualmachineid=vm1.id) self.assertEqual(list_aff_grps, [], "The affinity groups list is not empyty") vm1.delete(self.api_client) @@ -1096,10 +1060,6 @@ class TestDeployVMAffinityGroups(cloudstackTestCase): api_client = self.api_client if aff_grp == None: self.services["host_anti_affinity_0"] - #if acc == None: - # acc = self.account.name - #if domainid == None: - # domainid = self.domain.id try: self.aff_grp.append(AffinityGroup.create(api_client, @@ -1111,21 +1071,19 @@ class TestDeployVMAffinityGroups(cloudstackTestCase): if api_client == None: api_client = self.api_client - self.debug('Creating VM in AffinityGroup=%s' % ag_list) - vm = VirtualMachine.create( + self.debug('Creating VM in AffinityGroup=%s' % ag_list) + vm = VirtualMachine.create( api_client, self.services["virtual_machine"], templateid=self.template.id, - #accountid=self.account.name, - #domainid=self.account.domainid, serviceofferingid=self.service_offering.id, affinitygroupnames=ag_list, affinitygroupids=ag_ids ) - self.debug('Created VM=%s in Affinity Group=%s' % - (vm.id, ag_list)) + self.debug('Created VM=%s in Affinity Group=%s' % + (vm.id, ag_list)) - list_vm = list_virtual_machines(self.api_client, id=vm.id) + list_vm = list_virtual_machines(self.api_client, id=vm.id) self.assertEqual(isinstance(list_vm, list), True, "Check list response returns a valid list") @@ -1143,7 +1101,6 @@ class TestDeployVMAffinityGroups(cloudstackTestCase): """ Deploy VM without affinity group """ - vm1, hostid1 = self.create_vm_in_aff_grps() vm1.delete(self.api_client) @@ -1441,10 +1398,6 @@ class TestAffinityGroupsAdminUser(cloudstackTestCase): api_client = self.api_client if aff_grp == None: self.services["host_anti_affinity_0"] - #if acc == None: - # acc = self.account.name - #if domainid == None: - # domainid = self.domain.id try: self.aff_grp.append(AffinityGroup.create(api_client, @@ -1456,21 +1409,19 @@ class TestAffinityGroupsAdminUser(cloudstackTestCase): if api_client == None: api_client = self.api_client - self.debug('Creating VM in AffinityGroup=%s' % ag_list) - vm = VirtualMachine.create( + self.debug('Creating VM in AffinityGroup=%s' % ag_list) + vm = VirtualMachine.create( api_client, self.services["virtual_machine"], templateid=self.template.id, - #accountid=self.account.name, - #domainid=self.account.domainid, serviceofferingid=self.service_offering.id, affinitygroupnames=ag_list, affinitygroupids=ag_ids ) - self.debug('Created VM=%s in Affinity Group=%s' % - (vm.id, ag_list)) + self.debug('Created VM=%s in Affinity Group=%s' % + (vm.id, ag_list)) - list_vm = list_virtual_machines(self.api_client, id=vm.id) + list_vm = list_virtual_machines(self.api_client, id=vm.id) self.assertEqual(isinstance(list_vm, list), True, "Check list response returns a valid list") diff --git a/test/integration/component/test_netscaler_configs.py b/test/integration/component/test_netscaler_configs.py index 5de0843b49b..d26da47a5f5 100644 --- a/test/integration/component/test_netscaler_configs.py +++ b/test/integration/component/test_netscaler_configs.py @@ -730,9 +730,9 @@ class TestNetScalerDedicated(cloudstackTestCase): networkofferingid=self.network_offering.id, zoneid=self.zone.id ) - self.debug("Deploying an instance in account: %s" % self.account_2.account.name) + self.debug("Deploying an instance in account: %s" % self.account_2.account.name) with self.assertRaises(Exception): - VirtualMachine.create( + VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account_2.account.name, @@ -740,7 +740,7 @@ class TestNetScalerDedicated(cloudstackTestCase): serviceofferingid=self.service_offering.id, networkids=[str(self.network.id)] ) - self.debug("Deply instacne in dedicated Network offering mode failed") + self.debug("Deply instance in dedicated Network offering mode failed") return @@ -1285,7 +1285,7 @@ class TestNetScalerNoCapacity(cloudstackTestCase): ) if isinstance(physical_networks, list): physical_network = physical_networks[0] - cls.services["netscaler"]["lbdevicecapacity"] = 2 + cls.services["netscaler"]["lbdevicecapacity"] = 2 cls.netscaler = NetScaler.add( cls.api_client, cls.services["netscaler"], diff --git a/test/integration/component/test_shared_networks.py b/test/integration/component/test_shared_networks.py index 6bcfbfdfb39..8f59dfe82cb 100644 --- a/test/integration/component/test_shared_networks.py +++ b/test/integration/component/test_shared_networks.py @@ -2098,7 +2098,7 @@ class TestSharedNetworks(cloudstackTestCase): networkofferingid=self.shared_network_offering.id, zoneid=self.zone.id, ) - self.cleanup_networks.append(self.network1) + self.cleanup_networks.append(self.network1) self.fail("Network got created with used vlan id, which is invalid") except Exception as e: self.debug("Network creation failed because the valn id being used by another network.") diff --git a/test/integration/component/test_vpc_routers.py b/test/integration/component/test_vpc_routers.py index 043a01b63c5..3501110bca1 100644 --- a/test/integration/component/test_vpc_routers.py +++ b/test/integration/component/test_vpc_routers.py @@ -424,9 +424,9 @@ class TestVPCRoutersBasic(cloudstackTestCase): @attr(tags=["advanced", "intervlan"]) def test_02_reboot_router_after_creating_vpc(self): - """ Test to reboot the router after creating a VPC - """ - # Validate the following + """ Test to reboot the router after creating a VPC + """ + # Validate the following # 1. Create a VPC with cidr - 10.1.1.1/16 # 2. Reboot the VPC Virtual Router which is created as a result of VPC creation. # Stop the VPC Router @@ -473,9 +473,9 @@ class TestVPCRoutersBasic(cloudstackTestCase): @attr(tags=["advanced", "intervlan"]) def test_03_destroy_router_after_creating_vpc(self): - """ Test to destroy the router after creating a VPC - """ - # Validate the following + """ Test to destroy the router after creating a VPC + """ + # Validate the following # 1. Create a VPC with cidr - 10.1.1.1/16 # 2. Destroy the VPC Virtual Router which is created as a result of VPC creation. self.validate_vpc_offering(self.vpc_off) @@ -528,15 +528,15 @@ class TestVPCRoutersBasic(cloudstackTestCase): "List Routers should return a valid list" ) self.migrate_router(routers[0]) - return + return @attr(tags=["advanced", "intervlan"]) def test_05_change_service_offerring_vpc(self): - """ Tests to change service offering of the Router after - creating a vpc - """ + """ Tests to change service offering of the Router after + creating a vpc + """ - # Validate the following + # Validate the following # 1. Create a VPC with cidr - 10.1.1.1/16 # 2. Change the service offerings of the VPC Virtual Router which is created as a result of VPC creation. @@ -568,7 +568,7 @@ class TestVPCRoutersBasic(cloudstackTestCase): ) self.debug("Changing service offering for the Router %s" % router.id) try: - router = Router.change_service_offering(self.apiclient, + router = Router.change_service_offering(self.apiclient, router.id, service_offering.id ) @@ -589,7 +589,7 @@ class TestVPCRoutersBasic(cloudstackTestCase): "Changing service offering failed as id is %s and expected" "is %s" % (router.serviceofferingid, service_offering.id) ) - return + return class TestVPCRouterOneNetwork(cloudstackTestCase): @@ -748,18 +748,6 @@ class TestVPCRouterOneNetwork(cloudstackTestCase): account=cls.account.name, domainid=cls.account.domainid ) -# cls.assertEqual( -# isinstance(public_ips, list), -# True, -# "List public Ip for network should list the Ip addr" -# ) -# cls.assertEqual( -# public_ips[0].ipaddress, -# public_ip_2.ipaddress.ipaddress, -# "List public Ip for network should list the Ip addr" -# ) -# - public_ip_3 = PublicIPAddress.create( cls.apiclient, accountid=cls.account.name, @@ -917,8 +905,8 @@ class TestVPCRouterOneNetwork(cloudstackTestCase): return def validate_network_rules(self): - """ Validate network rules - """ + """ Validate network rules + """ vms = VirtualMachine.list( self.apiclient, account=self.account.name, @@ -1014,8 +1002,8 @@ class TestVPCRouterOneNetwork(cloudstackTestCase): @attr(tags=["advanced", "intervlan"]) def test_01_start_stop_router_after_addition_of_one_guest_network(self): - """ Test start/stop of router after addition of one guest network - """ + """ Test start/stop of router after addition of one guest network + """ # Validations #1. Create a VPC with cidr - 10.1.1.1/16 #2. Add network1(10.1.1.1/24) to this VPC. @@ -1031,7 +1019,6 @@ class TestVPCRouterOneNetwork(cloudstackTestCase): self.validate_vpc_offering(self.vpc_off) self.validate_vpc_network(self.vpc) - #self.validate_network_rules() self.assertEqual( isinstance(self.gateways, list), True, @@ -1063,7 +1050,7 @@ class TestVPCRouterOneNetwork(cloudstackTestCase): cmd.id = router.id self.apiclient.stopRouter(cmd) - #List routers to check state of router + #List routers to check state of router router_response = list_routers( self.apiclient, id=router.id @@ -1082,13 +1069,13 @@ class TestVPCRouterOneNetwork(cloudstackTestCase): self.debug("Stopped the router with ID: %s" % router.id) - # Start The Router + # Start The Router self.debug("Starting the router with ID: %s" % router.id) cmd = startRouter.startRouterCmd() cmd.id = router.id self.apiclient.startRouter(cmd) - #List routers to check state of router + #List routers to check state of router router_response = list_routers( self.apiclient, id=router.id @@ -1110,8 +1097,8 @@ class TestVPCRouterOneNetwork(cloudstackTestCase): @attr(tags=["advanced", "intervlan"]) def test_02_reboot_router_after_addition_of_one_guest_network(self): - """ Test reboot of router after addition of one guest network - """ + """ Test reboot of router after addition of one guest network + """ # Validations #1. Create a VPC with cidr - 10.1.1.1/16 #2. Add network1(10.1.1.1/24) to this VPC. @@ -1177,8 +1164,8 @@ class TestVPCRouterOneNetwork(cloudstackTestCase): @attr(tags=["advanced", "intervlan"]) def test_03_destroy_router_after_addition_of_one_guest_network(self): - """ Test destroy of router after addition of one guest network - """ + """ Test destroy of router after addition of one guest network + """ # Validations #1. Create a VPC with cidr - 10.1.1.1/16 #2. Add network1(10.1.1.1/24) to this VPC. @@ -1236,8 +1223,8 @@ class TestVPCRouterOneNetwork(cloudstackTestCase): @attr(tags=["advanced", "intervlan"]) def test_04_migrate_router_after_addition_of_one_guest_network(self): - """ Test migrate of router after addition of one guest network - """ + """ Test migrate of router after addition of one guest network + """ # Validations #1. Create a VPC with cidr - 10.1.1.1/16 #2. Add network1(10.1.1.1/24) to this VPC. @@ -1275,12 +1262,12 @@ class TestVPCRouterOneNetwork(cloudstackTestCase): "List Routers should return a valid list" ) self.migrate_router(routers[0]) - return + return @attr(tags=["advanced", "intervlan"]) def test_05_chg_srv_off_router_after_addition_of_one_guest_network(self): - """ Test to change service offering of router after addition of one guest network - """ + """ Test to change service offering of router after addition of one guest network + """ # Validations #1. Create a VPC with cidr - 10.1.1.1/16 #2. Add network1(10.1.1.1/24) to this VPC. @@ -1332,7 +1319,7 @@ class TestVPCRouterOneNetwork(cloudstackTestCase): ) self.debug("Changing service offering for the Router %s" % router.id) try: - router = Router.change_service_offering(self.apiclient, + router = Router.change_service_offering(self.apiclient, router.id, service_offering.id ) @@ -1353,5 +1340,4 @@ class TestVPCRouterOneNetwork(cloudstackTestCase): "Changing service offering failed as id is %s and expected" "is %s" % (router.serviceofferingid, service_offering.id) ) - return - + return diff --git a/test/integration/smoke/test_deploy_vms_with_varied_deploymentplanners.py b/test/integration/smoke/test_deploy_vms_with_varied_deploymentplanners.py index fc8e71648af..ab44a2be083 100644 --- a/test/integration/smoke/test_deploy_vms_with_varied_deploymentplanners.py +++ b/test/integration/smoke/test_deploy_vms_with_varied_deploymentplanners.py @@ -23,30 +23,30 @@ from nose.plugins.attrib import attr class Services: def __init__(self): - self.services = { - "account": { - "email": "test@test.com", - "firstname": "Test", - "lastname": "User", - "username": "test", - # Random characters are appended for unique - # username - "password": "password", - }, - "service_offering": { - "name": "Planner Service Offering", - "displaytext": "Planner Service Offering", - "cpunumber": 1, - "cpuspeed": 100, - # in MHz - "memory": 128, - # In MBs - }, - "ostype": 'CentOS 5.3 (64-bit)', - "virtual_machine": { - "hypervisor": "XenServer", - } - } + self.services = { + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + # Random characters are appended for unique + # username + "password": "password", + }, + "service_offering": { + "name": "Planner Service Offering", + "displaytext": "Planner Service Offering", + "cpunumber": 1, + "cpuspeed": 100, + # in MHz + "memory": 128, + # In MBs + }, + "ostype": 'CentOS 5.3 (64-bit)', + "virtual_machine": { + "hypervisor": "XenServer", + } + } class TestDeployVmWithVariedPlanners(cloudstackTestCase): diff --git a/test/integration/smoke/test_network.py b/test/integration/smoke/test_network.py index 121bda03506..6788dca393a 100644 --- a/test/integration/smoke/test_network.py +++ b/test/integration/smoke/test_network.py @@ -669,7 +669,7 @@ class TestLoadBalancingRule(cloudstackTestCase): self.debug( "SSH into VM (IPaddress: %s) & NAT Rule (Public IP: %s)" % (self.vm_1.ipaddress, src_nat_ip_addr.ipaddress) - ) + ) ssh_1 = remoteSSHClient( src_nat_ip_addr.ipaddress, @@ -804,20 +804,20 @@ class TestLoadBalancingRule(cloudstackTestCase): ) - hostnames = [] - self.try_ssh(src_nat_ip_addr, hostnames) - self.try_ssh(src_nat_ip_addr, hostnames) - self.try_ssh(src_nat_ip_addr, hostnames) - self.try_ssh(src_nat_ip_addr, hostnames) - self.try_ssh(src_nat_ip_addr, hostnames) + hostnames = [] + self.try_ssh(src_nat_ip_addr, hostnames) + self.try_ssh(src_nat_ip_addr, hostnames) + self.try_ssh(src_nat_ip_addr, hostnames) + self.try_ssh(src_nat_ip_addr, hostnames) + self.try_ssh(src_nat_ip_addr, hostnames) - self.debug("Hostnames: %s" % str(hostnames)) - self.assertIn( + self.debug("Hostnames: %s" % str(hostnames)) + self.assertIn( self.vm_1.name, hostnames, "Check if ssh succeeded for server1" ) - self.assertIn( + self.assertIn( self.vm_2.name, hostnames, "Check if ssh succeeded for server2" @@ -826,8 +826,8 @@ class TestLoadBalancingRule(cloudstackTestCase): #SSH should pass till there is a last VM associated with LB rule lb_rule.remove(self.apiclient, [self.vm_2]) - # making hostnames list empty - hostnames[:] = [] + # making hostnames list empty + hostnames[:] = [] try: self.debug("SSHing into IP address: %s after removing VM (ID: %s)" % @@ -837,13 +837,11 @@ class TestLoadBalancingRule(cloudstackTestCase): )) self.try_ssh(src_nat_ip_addr, hostnames) - - self.assertIn( - self.vm_1.name, - hostnames, - "Check if ssh succeeded for server1" - ) - + self.assertIn( + self.vm_1.name, + hostnames, + "Check if ssh succeeded for server1" + ) except Exception as e: self.fail("%s: SSH failed for VM with IP Address: %s" % (e, src_nat_ip_addr.ipaddress)) @@ -958,23 +956,23 @@ class TestLoadBalancingRule(cloudstackTestCase): ) try: hostnames = [] - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) + self.try_ssh(self.non_src_nat_ip, hostnames) + self.try_ssh(self.non_src_nat_ip, hostnames) + self.try_ssh(self.non_src_nat_ip, hostnames) + self.try_ssh(self.non_src_nat_ip, hostnames) + self.try_ssh(self.non_src_nat_ip, hostnames) - self.debug("Hostnames: %s" % str(hostnames)) - self.assertIn( - self.vm_1.name, - hostnames, - "Check if ssh succeeded for server1" - ) - self.assertIn( - self.vm_2.name, - hostnames, - "Check if ssh succeeded for server2" - ) + self.debug("Hostnames: %s" % str(hostnames)) + self.assertIn( + self.vm_1.name, + hostnames, + "Check if ssh succeeded for server1" + ) + self.assertIn( + self.vm_2.name, + hostnames, + "Check if ssh succeeded for server2" + ) #SSH should pass till there is a last VM associated with LB rule lb_rule.remove(self.apiclient, [self.vm_2]) @@ -984,19 +982,16 @@ class TestLoadBalancingRule(cloudstackTestCase): self.non_src_nat_ip.ipaddress.ipaddress, self.vm_2.id )) - # Making host list empty + # Making host list empty hostnames[:] = [] - self.try_ssh(self.non_src_nat_ip, hostnames) - - self.assertIn( - self.vm_1.name, - hostnames, - "Check if ssh succeeded for server1" - ) - + self.try_ssh(self.non_src_nat_ip, hostnames) + self.assertIn( + self.vm_1.name, + hostnames, + "Check if ssh succeeded for server1" + ) self.debug("Hostnames after removing VM2: %s" % str(hostnames)) - except Exception as e: self.fail("%s: SSH failed for VM with IP Address: %s" % (e, self.non_src_nat_ip.ipaddress.ipaddress)) @@ -1017,7 +1012,6 @@ class TestLoadBalancingRule(cloudstackTestCase): ssh_1.execute("hostname")[0] return - class TestRebootRouter(cloudstackTestCase): def setUp(self): @@ -1336,31 +1330,29 @@ class TestAssignRemoveLB(cloudstackTestCase): ) lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2]) - hostnames = [] - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - - self.debug("Hostnames: %s" % str(hostnames)) - self.assertIn( - self.vm_1.name, - hostnames, - "Check if ssh succeeded for server1" - ) - self.assertIn( - self.vm_2.name, - hostnames, - "Check if ssh succeeded for server2" - ) - + hostnames = [] + self.try_ssh(self.non_src_nat_ip, hostnames) + self.try_ssh(self.non_src_nat_ip, hostnames) + self.try_ssh(self.non_src_nat_ip, hostnames) + self.try_ssh(self.non_src_nat_ip, hostnames) + self.try_ssh(self.non_src_nat_ip, hostnames) + self.debug("Hostnames: %s" % str(hostnames)) + self.assertIn( + self.vm_1.name, + hostnames, + "Check if ssh succeeded for server1" + ) + self.assertIn( + self.vm_2.name, + hostnames, + "Check if ssh succeeded for server2" + ) #Removing VM and assigning another VM to LB rule lb_rule.remove(self.apiclient, [self.vm_2]) - # making hostnames list empty - hostnames[:] = [] + # making hostnames list empty + hostnames[:] = [] try: self.debug("SSHing again into IP address: %s with VM (ID: %s) added to LB rule" % @@ -1370,38 +1362,35 @@ class TestAssignRemoveLB(cloudstackTestCase): )) self.try_ssh(self.non_src_nat_ip, hostnames) - self.assertIn( - self.vm_1.name, - hostnames, - "Check if ssh succeeded for server1" - ) - + self.assertIn( + self.vm_1.name, + hostnames, + "Check if ssh succeeded for server1" + ) except Exception as e: self.fail("SSH failed for VM with IP: %s" % self.non_src_nat_ip.ipaddress) lb_rule.assign(self.apiclient, [self.vm_3]) - # Making hostnames list empty + # Making hostnames list empty hostnames[:] = [] - - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - self.try_ssh(self.non_src_nat_ip, hostnames) - - self.debug("Hostnames: %s" % str(hostnames)) - self.assertIn( - self.vm_1.name, - hostnames, - "Check if ssh succeeded for server1" - ) - self.assertIn( - self.vm_3.name, - hostnames, - "Check if ssh succeeded for server3" - ) + self.try_ssh(self.non_src_nat_ip, hostnames) + self.try_ssh(self.non_src_nat_ip, hostnames) + self.try_ssh(self.non_src_nat_ip, hostnames) + self.try_ssh(self.non_src_nat_ip, hostnames) + self.try_ssh(self.non_src_nat_ip, hostnames) + self.debug("Hostnames: %s" % str(hostnames)) + self.assertIn( + self.vm_1.name, + hostnames, + "Check if ssh succeeded for server1" + ) + self.assertIn( + self.vm_3.name, + hostnames, + "Check if ssh succeeded for server3" + ) return class TestReleaseIP(cloudstackTestCase): diff --git a/test/integration/smoke/test_vm_snapshots.py b/test/integration/smoke/test_vm_snapshots.py index cca4cfb767f..dd709828a60 100644 --- a/test/integration/smoke/test_vm_snapshots.py +++ b/test/integration/smoke/test_vm_snapshots.py @@ -29,282 +29,283 @@ class Services: """ def __init__(self): - self.services = { - "account": { - "email": "test@test.com", - "firstname": "Test", - "lastname": "User", - "username": "test", - # Random characters are appended for unique - # username - "password": "password", - }, - "service_offering": { - "name": "Tiny Instance", - "displaytext": "Tiny Instance", - "cpunumber": 1, - "cpuspeed": 200, # in MHz - "memory": 256, # In MBs - }, - "server": { - "displayname": "TestVM", - "username": "root", - "password": "password", - "ssh_port": 22, - "hypervisor": 'XenServer', - "privateport": 22, - "publicport": 22, - "protocol": 'TCP', - }, - "mgmt_server": { - "ipaddress": '1.2.2.152', - "username": "root", - "password": "password", - "port": 22, - }, - "templates": { - "displaytext": 'Template', - "name": 'Template', - "ostype": "CentOS 5.3 (64-bit)", - "templatefilter": 'self', - }, - "test_dir": "/tmp", - "random_data": "random.data", - "snapshot_name":"TestSnapshot", - "snapshot_displaytext":"Test", - "ostype": "CentOS 5.3 (64-bit)", - "sleep": 60, - "timeout": 10, - "mode": 'advanced', # Networking mode: Advanced, Basic - } + self.services = { + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + # Random characters are appended for unique + # username + "password": "password", + }, + "service_offering": { + "name": "Tiny Instance", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 200, # in MHz + "memory": 256, # In MBs + }, + "server": { + "displayname": "TestVM", + "username": "root", + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "mgmt_server": { + "ipaddress": '1.2.2.152', + "username": "root", + "password": "password", + "port": 22, + }, + "templates": { + "displaytext": 'Template', + "name": 'Template', + "ostype": "CentOS 5.3 (64-bit)", + "templatefilter": 'self', + }, + "test_dir": "/tmp", + "random_data": "random.data", + "snapshot_name": "TestSnapshot", + "snapshot_displaytext": "Test", + "ostype": "CentOS 5.3 (64-bit)", + "sleep": 60, + "timeout": 10, + "mode": 'advanced', # Networking mode: Advanced, Basic + } class TestVmSnapshot(cloudstackTestCase): + @classmethod def setUpClass(cls): - cls.api_client = super(TestVmSnapshot, cls).getClsTestClient().getApiClient() - cls.services = Services().services - # Get Zone, Domain and templates - cls.domain = get_domain(cls.api_client, cls.services) - cls.zone = get_zone(cls.api_client, cls.services) + cls.api_client = super(TestVmSnapshot, cls).getClsTestClient().getApiClient() + cls.services = Services().services + # Get Zone, Domain and templates + cls.domain = get_domain(cls.api_client, cls.services) + cls.zone = get_zone(cls.api_client, cls.services) - template = get_template( - cls.api_client, - cls.zone.id, - cls.services["ostype"] - ) - cls.services["domainid"] = cls.domain.id - cls.services["server"]["zoneid"] = cls.zone.id - cls.services["templates"]["ostypeid"] = template.ostypeid - cls.services["zoneid"] = cls.zone.id + template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) + cls.services["domainid"] = cls.domain.id + cls.services["server"]["zoneid"] = cls.zone.id + cls.services["templates"]["ostypeid"] = template.ostypeid + cls.services["zoneid"] = cls.zone.id - # Create VMs, NAT Rules etc - cls.account = Account.create( - cls.api_client, - cls.services["account"], - domainid=cls.domain.id - ) + # Create VMs, NAT Rules etc + cls.account = Account.create( + cls.api_client, + cls.services["account"], + domainid=cls.domain.id + ) - cls.services["account"] = cls.account.name + cls.services["account"] = cls.account.name - cls.service_offering = ServiceOffering.create( - cls.api_client, - cls.services["service_offering"] - ) - cls.virtual_machine = VirtualMachine.create( - cls.api_client, - cls.services["server"], - templateid=template.id, - accountid=cls.account.name, - domainid=cls.account.domainid, - serviceofferingid=cls.service_offering.id, - mode=cls.services["mode"] - ) - cls.random_data_0 = random_gen(100) - cls._cleanup = [ - cls.service_offering, - cls.account, - ] - return + cls.service_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offering"] + ) + cls.virtual_machine = VirtualMachine.create( + cls.api_client, + cls.services["server"], + templateid=template.id, + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id, + mode=cls.services["mode"] + ) + cls.random_data_0 = random_gen(100) + cls._cleanup = [ + cls.service_offering, + cls.account, + ] + return @classmethod def tearDownClass(cls): - try: - # Cleanup resources used - cleanup_resources(cls.api_client, cls._cleanup) - except Exception as e: - raise Exception("Warning: Exception during cleanup : %s" % e) - return + try: + # Cleanup resources used + cleanup_resources(cls.api_client, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return def setUp(self): - self.apiclient = self.testClient.getApiClient() - self.dbclient = self.testClient.getDbConnection() - self.cleanup = [] - return + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + return def tearDown(self): - try: - # Clean up, terminate the created instance, volumes and snapshots - cleanup_resources(self.apiclient, self.cleanup) - except Exception as e: - raise Exception("Warning: Exception during cleanup : %s" % e) - return + try: + # Clean up, terminate the created instance, volumes and snapshots + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return @attr(tags=["advanced", "advancedns", "smoke"]) def test_01_create_vm_snapshots(self): - """Test to create VM snapshots - """ + """Test to create VM snapshots + """ - try: - # Login to VM and write data to file system - ssh_client = self.virtual_machine.get_ssh_client() + try: + # Login to VM and write data to file system + ssh_client = self.virtual_machine.get_ssh_client() - cmds = [ - "echo %s > %s/%s" % (self.random_data_0, self.services["test_dir"], self.services["random_data"]), - "cat %s/%s" % (self.services["test_dir"], self.services["random_data"]) - ] + cmds = [ + "echo %s > %s/%s" % (self.random_data_0, self.services["test_dir"], self.services["random_data"]), + "cat %s/%s" % (self.services["test_dir"], self.services["random_data"]) + ] - for c in cmds: - self.debug(c) - result = ssh_client.execute(c) - self.debug(result) + for c in cmds: + self.debug(c) + result = ssh_client.execute(c) + self.debug(result) - except Exception: - self.fail("SSH failed for Virtual machine: %s" % - self.virtual_machine.ipaddress) - self.assertEqual( - self.random_data_0, - result[0], - "Check the random data has be write into temp file!" - ) + except Exception: + self.fail("SSH failed for Virtual machine: %s" % + self.virtual_machine.ipaddress) + self.assertEqual( + self.random_data_0, + result[0], + "Check the random data has be write into temp file!" + ) - time.sleep(self.services["sleep"]) + time.sleep(self.services["sleep"]) - vm_snapshot = VmSnapshot.create( - self.apiclient, - self.virtual_machine.id, - "false", - self.services["snapshot_name"], - self.services["snapshot_displaytext"] - ) - self.assertEqual( - vm_snapshot.state, - "Ready", - "Check the snapshot of vm is ready!" - ) - return + vm_snapshot = VmSnapshot.create( + self.apiclient, + self.virtual_machine.id, + "false", + self.services["snapshot_name"], + self.services["snapshot_displaytext"] + ) + self.assertEqual( + vm_snapshot.state, + "Ready", + "Check the snapshot of vm is ready!" + ) + return @attr(tags=["advanced", "advancedns", "smoke"]) def test_02_revert_vm_snapshots(self): - """Test to revert VM snapshots - """ + """Test to revert VM snapshots + """ - try: - ssh_client = self.virtual_machine.get_ssh_client() + try: + ssh_client = self.virtual_machine.get_ssh_client() - cmds = [ - "rm -rf %s/%s" % (self.services["test_dir"], self.services["random_data"]), - "ls %s/%s" % (self.services["test_dir"], self.services["random_data"]) - ] + cmds = [ + "rm -rf %s/%s" % (self.services["test_dir"], self.services["random_data"]), + "ls %s/%s" % (self.services["test_dir"], self.services["random_data"]) + ] - for c in cmds: - self.debug(c) - result = ssh_client.execute(c) - self.debug(result) + for c in cmds: + self.debug(c) + result = ssh_client.execute(c) + self.debug(result) - except Exception: - self.fail("SSH failed for Virtual machine: %s" % - self.virtual_machine.ipaddress) + except Exception: + self.fail("SSH failed for Virtual machine: %s" % + self.virtual_machine.ipaddress) - if str(result[0]).index("No such file or directory") == -1: - self.fail("Check the random data has be delete from temp file!") + if str(result[0]).index("No such file or directory") == -1: + self.fail("Check the random data has be delete from temp file!") - time.sleep(self.services["sleep"]) + time.sleep(self.services["sleep"]) - list_snapshot_response = VmSnapshot.list(self.apiclient,vmid=self.virtual_machine.id,listall=True) + list_snapshot_response = VmSnapshot.list(self.apiclient, vmid=self.virtual_machine.id, listall=True) - self.assertEqual( - isinstance(list_snapshot_response, list), - True, - "Check list response returns a valid list" - ) - self.assertNotEqual( - list_snapshot_response, - None, - "Check if snapshot exists in ListSnapshot" - ) + self.assertEqual( + isinstance(list_snapshot_response, list), + True, + "Check list response returns a valid list" + ) + self.assertNotEqual( + list_snapshot_response, + None, + "Check if snapshot exists in ListSnapshot" + ) - self.assertEqual( - list_snapshot_response[0].state, - "Ready", - "Check the snapshot of vm is ready!" - ) + self.assertEqual( + list_snapshot_response[0].state, + "Ready", + "Check the snapshot of vm is ready!" + ) - VmSnapshot.revertToSnapshot(self.apiclient,list_snapshot_response[0].id) + VmSnapshot.revertToSnapshot(self.apiclient, list_snapshot_response[0].id) - list_vm_response = list_virtual_machines( - self.apiclient, - id=self.virtual_machine.id - ) + list_vm_response = list_virtual_machines( + self.apiclient, + id=self.virtual_machine.id + ) - self.assertEqual( - list_vm_response[0].state, - "Stopped", - "Check the state of vm is Stopped!" - ) + self.assertEqual( + list_vm_response[0].state, + "Stopped", + "Check the state of vm is Stopped!" + ) - cmd = startVirtualMachine.startVirtualMachineCmd() - cmd.id = list_vm_response[0].id - self.apiclient.startVirtualMachine(cmd) + cmd = startVirtualMachine.startVirtualMachineCmd() + cmd.id = list_vm_response[0].id + self.apiclient.startVirtualMachine(cmd) - time.sleep(self.services["sleep"]) + time.sleep(self.services["sleep"]) - try: - ssh_client = self.virtual_machine.get_ssh_client(reconnect=True) + try: + ssh_client = self.virtual_machine.get_ssh_client(reconnect=True) - cmds = [ - "cat %s/%s" % (self.services["test_dir"], self.services["random_data"]) - ] + cmds = [ + "cat %s/%s" % (self.services["test_dir"], self.services["random_data"]) + ] - for c in cmds: - self.debug(c) - result = ssh_client.execute(c) - self.debug(result) + for c in cmds: + self.debug(c) + result = ssh_client.execute(c) + self.debug(result) - except Exception: - self.fail("SSH failed for Virtual machine: %s" % - self.virtual_machine.ipaddress) + except Exception: + self.fail("SSH failed for Virtual machine: %s" % + self.virtual_machine.ipaddress) - self.assertEqual( - self.random_data_0, - result[0], - "Check the random data is equal with the ramdom file!" - ) + self.assertEqual( + self.random_data_0, + result[0], + "Check the random data is equal with the ramdom file!" + ) @attr(tags=["advanced", "advancedns", "smoke"]) def test_03_delete_vm_snapshots(self): - """Test to delete vm snapshots - """ + """Test to delete vm snapshots + """ - list_snapshot_response = VmSnapshot.list(self.apiclient,vmid=self.virtual_machine.id,listall=True) + list_snapshot_response = VmSnapshot.list(self.apiclient, vmid=self.virtual_machine.id, listall=True) - self.assertEqual( - isinstance(list_snapshot_response, list), - True, - "Check list response returns a valid list" - ) - self.assertNotEqual( - list_snapshot_response, - None, - "Check if snapshot exists in ListSnapshot" - ) - VmSnapshot.deleteVMSnapshot(self.apiclient,list_snapshot_response[0].id) + self.assertEqual( + isinstance(list_snapshot_response, list), + True, + "Check list response returns a valid list" + ) + self.assertNotEqual( + list_snapshot_response, + None, + "Check if snapshot exists in ListSnapshot" + ) + VmSnapshot.deleteVMSnapshot(self.apiclient, list_snapshot_response[0].id) - time.sleep(self.services["sleep"]*3) + time.sleep(self.services["sleep"] * 3) - list_snapshot_response = VmSnapshot.list(self.apiclient,vmid=self.virtual_machine.id,listall=True) + list_snapshot_response = VmSnapshot.list(self.apiclient, vmid=self.virtual_machine.id, listall=True) - self.assertEqual( - list_snapshot_response, - None, - "Check list vm snapshot has be deleted" - ) + self.assertEqual( + list_snapshot_response, + None, + "Check list vm snapshot has be deleted" + ) From 48a2c761b4c0eaa9a1873afcc82320b7199323bd Mon Sep 17 00:00:00 2001 From: Prasanna Santhanam Date: Sun, 30 Jun 2013 13:58:16 +0530 Subject: [PATCH 4/8] pep8 fix Signed-off-by: Prasanna Santhanam (cherry picked from commit 3684bafd37db5884dbd3594b8f1408848fcdffb8) --- tools/marvin/marvin/deployDataCenter.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/marvin/marvin/deployDataCenter.py b/tools/marvin/marvin/deployDataCenter.py index 961718ad67a..d704375559a 100644 --- a/tools/marvin/marvin/deployDataCenter.py +++ b/tools/marvin/marvin/deployDataCenter.py @@ -120,7 +120,8 @@ specify a valid config file" % cfgFile) self.createVlanIpRanges("Basic", pod.guestIpRanges, zoneId, podId, networkId) - self.createClusters(pod.clusters, zoneId, podId, vmwareDc=pod.vmwaredc) + self.createClusters(pod.clusters, zoneId, podId, + vmwareDc=pod.vmwaredc) def createVlanIpRanges(self, mode, ipranges, zoneId, podId=None, networkId=None, forvirtualnetwork=None): From cb1d6eaa0e034a26800ced2e3e67636febdd82b2 Mon Sep 17 00:00:00 2001 From: Prasanna Santhanam Date: Sun, 30 Jun 2013 21:43:08 +0530 Subject: [PATCH 5/8] Fix attr plugin import and class reference Signed-off-by: Prasanna Santhanam (cherry picked from commit e7fe43567bef589a128a03f49db8cf8e7ebd7b3c) --- test/integration/component/test_advancedsg_networks.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/test/integration/component/test_advancedsg_networks.py b/test/integration/component/test_advancedsg_networks.py index f8774be9e48..483435188ca 100644 --- a/test/integration/component/test_advancedsg_networks.py +++ b/test/integration/component/test_advancedsg_networks.py @@ -24,9 +24,8 @@ from marvin.cloudstackAPI import * from marvin.integration.lib.utils import * from marvin.integration.lib.base import * from marvin.integration.lib.common import * -from marvin.remoteSSHClient import remoteSSHClient -import datetime import netaddr +from nose.plugins.attrib import attr class Services: """ Test networks in advanced zone with security groups""" @@ -156,7 +155,7 @@ class TestNetworksInAdvancedSG(cloudstackTestCase): @classmethod def setUpClass(cls): cls.api_client = super( - TestSharedNetworks, + TestNetworksInAdvancedSG, cls ).getClsTestClient().getApiClient() @@ -687,7 +686,7 @@ class TestNetworksInAdvancedSG(cloudstackTestCase): "The network offering state should get updated to Enabled." ) - physical_network = list_physical_networks_response[0] + physical_network = PhysicalNetwork.list(self.api_client)[0] #create network using the shared network offering created self.services["shared_network_sg"]["acltype"] = "domain" From 9b722f4ccc0f6fb3ee4a34297593fe01fc23b4d7 Mon Sep 17 00:00:00 2001 From: Rayeesn Date: Fri, 28 Jun 2013 17:26:12 -0700 Subject: [PATCH 6/8] Adding axis2.xml to cloudstack-bridge/webapps/awsapi/WEB-INF/conf as part of defect CLOUDSTACK-2927 --- packaging/centos63/cloud.spec | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/centos63/cloud.spec b/packaging/centos63/cloud.spec index 1f112ddd686..f7eb629e265 100644 --- a/packaging/centos63/cloud.spec +++ b/packaging/centos63/cloud.spec @@ -299,6 +299,7 @@ cp -r awsapi/target/cloud-awsapi-%{_maventag}/* ${RPM_BUILD_ROOT}%{_datadir}/%{n install -D awsapi-setup/setup/cloud-setup-bridge ${RPM_BUILD_ROOT}%{_bindir}/cloudstack-setup-bridge install -D awsapi-setup/setup/cloudstack-aws-api-register ${RPM_BUILD_ROOT}%{_bindir}/cloudstack-aws-api-register cp -r awsapi-setup/db/mysql/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/setup +cp awsapi/resource/Axis2/axis2.xml ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/awsapi/WEB-INF/conf for name in applicationContext.xml cloud-bridge.properties commons-logging.properties crypto.properties xes.keystore ec2-service.properties ; do mv ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/awsapi/WEB-INF/classes/$name \ From 01debd59d3a21c4164e4ef7e6f4d9e279933e34f Mon Sep 17 00:00:00 2001 From: Prasanna Santhanam Date: Mon, 1 Jul 2013 13:47:16 +0530 Subject: [PATCH 7/8] Fix injection of datastoreproviders in nonoss context the datastore provider references are not in application context anymore and have been moved to respective componentCOntexts. Plug them in by default for nonoss server to startup successfully. Signed-off-by: Prasanna Santhanam --- client/tomcatconf/applicationContext.xml.in | 4 ++-- client/tomcatconf/componentContext.xml.in | 2 +- .../tomcatconf/nonossComponentContext.xml.in | 19 +++++++++++++------ .../simulatorComponentContext.xml.in | 2 +- 4 files changed, 17 insertions(+), 10 deletions(-) diff --git a/client/tomcatconf/applicationContext.xml.in b/client/tomcatconf/applicationContext.xml.in index 3d5d4fab16c..14255c1e7a5 100644 --- a/client/tomcatconf/applicationContext.xml.in +++ b/client/tomcatconf/applicationContext.xml.in @@ -688,7 +688,7 @@ - - + diff --git a/client/tomcatconf/componentContext.xml.in b/client/tomcatconf/componentContext.xml.in index 966faf57078..1fbec61ba66 100644 --- a/client/tomcatconf/componentContext.xml.in +++ b/client/tomcatconf/componentContext.xml.in @@ -112,7 +112,7 @@ class="org.apache.cloudstack.storage.datastore.provider.DataStoreProviderManagerImpl"> - + diff --git a/client/tomcatconf/nonossComponentContext.xml.in b/client/tomcatconf/nonossComponentContext.xml.in index e7828e1d2af..ffa62818970 100644 --- a/client/tomcatconf/nonossComponentContext.xml.in +++ b/client/tomcatconf/nonossComponentContext.xml.in @@ -195,17 +195,24 @@ - + + + + + - - - - - + + + + diff --git a/client/tomcatconf/simulatorComponentContext.xml.in b/client/tomcatconf/simulatorComponentContext.xml.in index 579ae1b6417..92278a4da8e 100644 --- a/client/tomcatconf/simulatorComponentContext.xml.in +++ b/client/tomcatconf/simulatorComponentContext.xml.in @@ -45,7 +45,7 @@ - + From e600d7cb6120ae8ea203e5b372826843f6c23b31 Mon Sep 17 00:00:00 2001 From: Radhika PC Date: Mon, 18 Feb 2013 16:55:35 +0530 Subject: [PATCH 8/8] CLOUDSTACK-3300: Adding patches from cloudstack-1313 --- docs/en-US/attaching-volume.xml | 57 +++++++---- docs/en-US/creating-new-volumes.xml | 99 +++++++++++-------- docs/en-US/detach-move-volumes.xml | 58 ++++++----- docs/en-US/storage.xml | 15 ++- docs/en-US/upload-existing-volume-to-vm.xml | 90 ++++++++++++++++- docs/en-US/vm-storage-migration.xml | 27 ++--- .../volume-deletion-garbage-collection.xml | 29 ++++-- docs/en-US/working-with-volumes.xml | 51 +++++----- 8 files changed, 291 insertions(+), 135 deletions(-) diff --git a/docs/en-US/attaching-volume.xml b/docs/en-US/attaching-volume.xml index 360555eac06..7511ec32a4d 100644 --- a/docs/en-US/attaching-volume.xml +++ b/docs/en-US/attaching-volume.xml @@ -21,24 +21,41 @@ specific language governing permissions and limitations under the License. --> -
- Attaching a Volume - You can attach a volume to a guest VM to provide extra disk storage. Attach a volume when you first create a new volume, when you are moving an existing volume from one VM to another, or after you have migrated a volume from one storage pool to another. - - Log in to the &PRODUCT; UI as a user or admin. - In the left navigation, click Storage. - In Select View, choose Volumes. - 4. Click the volume name in the Volumes list, then click the Attach Disk button - - - - AttachDiskButton.png: button to attach a volume - - - In the Instance popup, choose the VM to which you want to attach the volume. You will only see instances to which you are allowed to attach volumes; for example, a user will see only instances created by that user, but the administrator will have more choices. - - - When the volume has been attached, you should be able to see it by clicking Instances, the instance name, and View Volumes. - -
+ Attaching a Volume + You can attach a volume to a guest VM to provide extra disk storage. Attach a volume when + you first create a new volume, when you are moving an existing volume from one VM to another, or + after you have migrated a volume from one storage pool to another. + + + Log in to the &PRODUCT; UI as a user or admin. + + + In the left navigation, click Storage. + + + In Select View, choose Volumes. + + + 4. Click the volume name in the Volumes list, then click the Attach Disk button + + + + + AttachDiskButton.png: button to attach a volume + + + + + + In the Instance popup, choose the VM to which you want to attach the volume. You will + only see instances to which you are allowed to attach volumes; for example, a user will see + only instances created by that user, but the administrator will have more choices. + + + + When the volume has been attached, you should be able to see it by clicking Instances, + the instance name, and View Volumes. + + + diff --git a/docs/en-US/creating-new-volumes.xml b/docs/en-US/creating-new-volumes.xml index 5a12d7f5783..5440dc5a016 100644 --- a/docs/en-US/creating-new-volumes.xml +++ b/docs/en-US/creating-new-volumes.xml @@ -20,44 +20,65 @@ KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ---> +-->
- Creating a New Volume - You can add more data disk volumes to a guest VM at any time, up to the limits of your storage capacity. Both &PRODUCT; administrators and users can add volumes to VM instances. When you create a new volume, it is stored as an entity in &PRODUCT;, but the actual storage resources are not allocated on the physical storage device until you attach the volume. This optimization allows the &PRODUCT; to provision the volume nearest to the guest that will use it when the first attachment is made. -
- Using Local Storage for Data Volumes - You can create data volumes on local storage (supported with XenServer, KVM, and VMware). - The data volume is placed on the same - host as the VM instance that is attached to the data volume. These - local data volumes can be attached to virtual machines, detached, re-attached, - and deleted just as with the other types of data volume. - Local storage is ideal for scenarios where persistence of data volumes and HA - is not required. Some of the benefits include reduced disk I/O latency and cost - reduction from using inexpensive local disks. - In order for local volumes to be used, the feature must be enabled for the - zone. - You can create a data disk offering for local storage. When a user creates a - new VM, they can select this disk offering in order to cause the data disk - volume to be placed in local storage. - You can not migrate a VM that has a volume in local storage to a different - host, nor migrate the volume itself away to a different host. If you want to put - a host into maintenance mode, you must first stop any VMs with local data - volumes on that host. -
-
- To Create a New Volume - - Log in to the &PRODUCT; UI as a user or admin. - In the left navigation bar, click Storage. - In Select View, choose Volumes. - To create a new volume, click Add Volume, provide the following details, and click OK. - - Name. Give the volume a unique name so you can find it later. - Availability Zone. Where do you want the storage to reside? This should be close to the VM that will use the volume. - Disk Offering. Choose the characteristics of the storage. - - The new volume appears in the list of volumes with the state “Allocated.” The volume data is stored in &PRODUCT;, but the volume is not yet ready for use - To start using the volume, continue to Attaching a Volume - -
+ Creating a New Volume + You can add more data disk volumes to a guest VM at any time, up to the limits of your + storage capacity. Both &PRODUCT; administrators and users can add volumes to VM instances. When + you create a new volume, it is stored as an entity in &PRODUCT;, but the actual storage + resources are not allocated on the physical storage device until you attach the volume. This + optimization allows the &PRODUCT; to provision the volume nearest to the guest that will use it + when the first attachment is made. +
+ Using Local Storage for Data Volumes + You can create data volumes on local storage (supported with XenServer, KVM, and VMware). + The data volume is placed on the same host as the VM instance that is attached to the data + volume. These local data volumes can be attached to virtual machines, detached, re-attached, + and deleted just as with the other types of data volume. + Local storage is ideal for scenarios where persistence of data volumes and HA is not + required. Some of the benefits include reduced disk I/O latency and cost reduction from using + inexpensive local disks. + In order for local volumes to be used, the feature must be enabled for the zone. + You can create a data disk offering for local storage. When a user creates a new VM, they + can select this disk offering in order to cause the data disk volume to be placed in local + storage. + You can not migrate a VM that has a volume in local storage to a different host, nor + migrate the volume itself away to a different host. If you want to put a host into maintenance + mode, you must first stop any VMs with local data volumes on that host. +
+
+ To Create a New Volume + + + Log in to the &PRODUCT; UI as a user or admin. + + + In the left navigation bar, click Storage. + + + In Select View, choose Volumes. + + + To create a new volume, click Add Volume, provide the following details, and click + OK. + + + Name. Give the volume a unique name so you can find it later. + + + Availability Zone. Where do you want the storage to reside? This should be close + to the VM that will use the volume. + + + Disk Offering. Choose the characteristics of the storage. + + + The new volume appears in the list of volumes with the state “Allocated.” The volume + data is stored in &PRODUCT;, but the volume is not yet ready for use + + + To start using the volume, continue to Attaching a Volume + + +
diff --git a/docs/en-US/detach-move-volumes.xml b/docs/en-US/detach-move-volumes.xml index fda6e66cede..7103c305c4f 100644 --- a/docs/en-US/detach-move-volumes.xml +++ b/docs/en-US/detach-move-volumes.xml @@ -22,25 +22,39 @@ under the License. -->
- Detaching and Moving Volumes - This procedure is different from moving disk volumes from one storage pool to another. See VM Storage Migration - A volume can be detached from a guest VM and attached to another guest. Both &PRODUCT; administrators and users can detach volumes from VMs and move them to other VMs. - If the two VMs are in different clusters, and the volume is large, it may take several minutes for the volume to be moved to the new VM. - - - Log in to the &PRODUCT; UI as a user or admin. - In the left navigation bar, click Storage, and choose Volumes in Select View. Alternatively, if you know which VM the volume is attached to, you can click Instances, click the VM name, and click View Volumes. - Click the name of the volume you want to detach, then click the Detach Disk button. - - - - - DetachDiskButton.png: button to detach a volume - - - - To move the volume to another VM, follow the steps in . - -
- + Detaching and Moving Volumes + + This procedure is different from moving disk volumes from one storage pool to another. See + VM Storage Migration + + A volume can be detached from a guest VM and attached to another guest. Both &PRODUCT; + administrators and users can detach volumes from VMs and move them to other VMs. + If the two VMs are in different clusters, and the volume is large, it may take several + minutes for the volume to be moved to the new VM. + + + + Log in to the &PRODUCT; UI as a user or admin. + + + In the left navigation bar, click Storage, and choose Volumes in Select View. + Alternatively, if you know which VM the volume is attached to, you can click Instances, + click the VM name, and click View Volumes. + + + Click the name of the volume you want to detach, then click the Detach Disk button. + + + + + DetachDiskButton.png: button to detach a volume + + + + + + To move the volume to another VM, follow the steps in . + + + diff --git a/docs/en-US/storage.xml b/docs/en-US/storage.xml index 580fe59e1e1..3ef73246d1d 100644 --- a/docs/en-US/storage.xml +++ b/docs/en-US/storage.xml @@ -1,5 +1,5 @@ - %BOOK_ENTITIES; ]> @@ -21,12 +21,11 @@ specific language governing permissions and limitations under the License. --> - - Working With Storage - - - - - + Working With Storage + + + + + diff --git a/docs/en-US/upload-existing-volume-to-vm.xml b/docs/en-US/upload-existing-volume-to-vm.xml index d2b657164c8..6be43f89f6b 100644 --- a/docs/en-US/upload-existing-volume-to-vm.xml +++ b/docs/en-US/upload-existing-volume-to-vm.xml @@ -21,8 +21,8 @@ specific language governing permissions and limitations under the License. --> -
+<<<<<<< HEAD Uploading an Existing Volume to a Virtual Machine Existing data can be made accessible to a virtual machine. This is called uploading a volume to the VM. For example, this is useful to upload data from a local file system and attach it to a VM. Root administrators, domain administrators, and end users can all upload existing volumes to VMs. The upload is performed using HTTP. The uploaded volume is placed in the zone's secondary storage @@ -73,4 +73,92 @@ Wait until the status of the volume shows that the upload is complete. Click Instances - Volumes, find the name you specified in step , and make sure the status is Uploaded. +======= + Uploading an Existing Volume to a Virtual Machine + Existing data can be made accessible to a virtual machine. This is called uploading a volume + to the VM. For example, this is useful to upload data from a local file system and attach it to + a VM. Root administrators, domain administrators, and end users can all upload existing volumes + to VMs. + The upload is performed using HTTP. The uploaded volume is placed in the zone's secondary + storage + You cannot upload a volume if the preconfigured volume limit has already been reached. The + default limit for the cloud is set in the global configuration parameter max.account.volumes, + but administrators can also set per-domain limits that are different from the global default. + See Setting Usage Limits + To upload a volume: + + + (Optional) Create an MD5 hash (checksum) of the disk image file that you are going to + upload. After uploading the data disk, &PRODUCT; will use this value to verify that no data + corruption has occurred. + + + Log in to the &PRODUCT; UI as an administrator or user + + + In the left navigation bar, click Storage. + + + Click Upload Volume. + + + Provide the following: + + + Name and Description. Any desired name and a brief description that can be shown in + the UI. + + + Availability Zone. Choose the zone where you want to store the volume. VMs running + on hosts in this zone can attach the volume. + + + Format. Choose one of the following to indicate the disk image format of the + volume. + + + + + Hypervisor + Disk Image Format + + + + + XenServer + VHD + + + VMware + OVA + + + KVM + QCOW2 + + + + + + + + URL. The secure HTTP or HTTPS URL that &PRODUCT; can use to access your disk. The + type of file at the URL must match the value chosen in Format. For example, if Format is + VHD, the URL might look like the following: + http://yourFileServerIP/userdata/myDataDisk.vhd + + + MD5 checksum. (Optional) Use the hash that you created in step 1. + + + + + Wait until the status of the volume shows that the upload is complete. Click Instances - + Volumes, find the name you specified in step 5, and make sure the status is Uploaded. + + +>>>>>>> 9cb9f45... CLOUDSTACK-1313
diff --git a/docs/en-US/vm-storage-migration.xml b/docs/en-US/vm-storage-migration.xml index 7c3824b4817..e0dad57faa0 100644 --- a/docs/en-US/vm-storage-migration.xml +++ b/docs/en-US/vm-storage-migration.xml @@ -22,14 +22,19 @@ under the License. -->
- VM Storage Migration - Supported in XenServer, KVM, and VMware. - This procedure is different from moving disk volumes from one VM to another. See Detaching and Moving Volumes . - - - You can migrate a virtual machine’s root disk volume or any additional data disk volume from one storage pool to another in the same zone. - You can use the storage migration feature to achieve some commonly desired administration goals, such as balancing the load on storage pools and increasing the reliability of virtual machines by moving them away from any storage pool that is experiencing issues. - - -
- + VM Storage Migration + Supported in XenServer, KVM, and VMware. + + This procedure is different from moving disk volumes from one VM to another. See Detaching + and Moving Volumes . + + You can migrate a virtual machine’s root disk volume or any additional data disk volume from + one storage pool to another in the same zone. + You can use the storage migration feature to achieve some commonly desired administration + goals, such as balancing the load on storage pools and increasing the reliability of virtual + machines by moving them away from any storage pool that is experiencing issues. + + + diff --git a/docs/en-US/volume-deletion-garbage-collection.xml b/docs/en-US/volume-deletion-garbage-collection.xml index d162d848cc3..418643890f3 100644 --- a/docs/en-US/volume-deletion-garbage-collection.xml +++ b/docs/en-US/volume-deletion-garbage-collection.xml @@ -21,15 +21,24 @@ specific language governing permissions and limitations under the License. --> -
- Volume Deletion and Garbage Collection - The deletion of a volume does not delete the snapshots that have been created from the volume - When a VM is destroyed, data disk volumes that are attached to the VM are not deleted. - Volumes are permanently destroyed using a garbage collection process. The global configuration variables expunge.delay and expunge.interval determine when the physical deletion of volumes will occur. - - expunge.delay: determines how old the volume must be before it is destroyed, in seconds - expunge.interval: determines how often to run the garbage collection check - - Administrators should adjust these values depending on site policies around data retention. + Volume Deletion and Garbage Collection + The deletion of a volume does not delete the snapshots that have been created from the + volume + When a VM is destroyed, data disk volumes that are attached to the VM are not + deleted. + Volumes are permanently destroyed using a garbage collection process. The global + configuration variables expunge.delay and expunge.interval determine when the physical deletion + of volumes will occur. + + + expunge.delay: determines how old the volume must be before it is destroyed, in + seconds + + + expunge.interval: determines how often to run the garbage collection check + + + Administrators should adjust these values depending on site policies around data + retention.
diff --git a/docs/en-US/working-with-volumes.xml b/docs/en-US/working-with-volumes.xml index ab567d2d0ca..6832cffe339 100644 --- a/docs/en-US/working-with-volumes.xml +++ b/docs/en-US/working-with-volumes.xml @@ -21,29 +21,32 @@ specific language governing permissions and limitations under the License. --> -
- Using Swift for Secondary Storage - A volume provides storage to a guest VM. The volume can provide for - a root disk or an additional data disk. &PRODUCT; supports additional - volumes for guest VMs. - - Volumes are created for a specific hypervisor type. A volume that has - been attached to guest using one hypervisor type (e.g, XenServer) may not - be attached to a guest that is using another hypervisor type (e.g. - vSphere, KVM). This is because the different hypervisors use - different disk image formats. - - &PRODUCT; defines a volume as a unit of storage available to a guest - VM. Volumes are either root disks or data disks. The root disk has "/" - in the file system and is usually the boot device. Data disks provide - for additional storage (e.g. As "/opt" or "D:"). Every guest VM has a root - disk, and VMs can also optionally have a data disk. End users can mount - multiple data disks to guest VMs. Users choose data disks from the disk - offerings created by administrators. The user can create a template from - a volume as well; this is the standard procedure for private template - creation. Volumes are hypervisor-specific: a volume from one hypervisor - type may not be used on a guest of another hypervisor type. - + Working With Volumes + A volume provides storage to a guest VM. The volume can provide for a root disk or an + additional data disk. &PRODUCT; supports additional volumes for guest VMs. + Volumes are created for a specific hypervisor type. A volume that has been attached to guest + using one hypervisor type (e.g, XenServer) may not be attached to a guest that is using another + hypervisor type, for example:vSphere, KVM. This is because the different hypervisors use different + disk image formats. + &PRODUCT; defines a volume as a unit of storage available to a guest VM. Volumes are either + root disks or data disks. The root disk has "/" in the file system and is usually the boot + device. Data disks provide for additional storage, for example: "/opt" or "D:". Every guest VM + has a root disk, and VMs can also optionally have a data disk. End users can mount multiple data + disks to guest VMs. Users choose data disks from the disk offerings created by administrators. + The user can create a template from a volume as well; this is the standard procedure for private + template creation. Volumes are hypervisor-specific: a volume from one hypervisor type may not be + used on a guest of another hypervisor type. + + &PRODUCT; supports attaching up to 13 data disks to a VM on XenServer hypervisor versions + 6.0 and above. For the VMs on other hypervisor types, the data disk limit is 6. + + + + + + + +
-