From af28ded904045734d83cac4c4fe711e5e1315a5b Mon Sep 17 00:00:00 2001 From: Priti Sarap Date: Fri, 7 Aug 2015 17:57:54 +0530 Subject: [PATCH 1/2] CLOUDSTACK-8717: Failed to start instance after restoring the running instance --- .../testpaths/testpath_restore_vm.py | 201 ++++++++++++++++++ 1 file changed, 201 insertions(+) create mode 100644 test/integration/testpaths/testpath_restore_vm.py diff --git a/test/integration/testpaths/testpath_restore_vm.py b/test/integration/testpaths/testpath_restore_vm.py new file mode 100644 index 00000000000..7fd1610dcc6 --- /dev/null +++ b/test/integration/testpaths/testpath_restore_vm.py @@ -0,0 +1,201 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Test restore running VM on VMWare with one cluster having 2 Primary Storage +""" + + +from nose.plugins.attrib import attr +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.lib.utils import cleanup_resources +from marvin.lib.base import (Account, + ServiceOffering, + VirtualMachine, + StoragePool + ) +from marvin.lib.common import (get_domain, + get_zone, + get_template, + list_volumes, + list_virtual_machines + ) + +from marvin.codes import CLUSTERTAG1, ROOT +import time + + +class TestRestoreVM(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestRestoreVM, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.testdata = testClient.getParsedTestDataConfig() + cls.hypervisor = cls.testClient.getHypervisorInfo() + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + + cls.template = get_template( + cls.apiclient, + cls.zone.id, + cls.testdata["ostype"]) + + cls._cleanup = [] + + try: + cls.skiptest = False + if cls.hypervisor.lower() not in ["vmware"]: + cls.skiptest = True + return + + # Create an account + cls.account = Account.create( + cls.apiclient, + cls.testdata["account"], + domainid=cls.domain.id + ) + cls._cleanup.append(cls.account) + # Create user api client of the account + cls.userapiclient = testClient.getUserApiClient( + UserName=cls.account.name, + DomainName=cls.account.domain + ) + # Create Service offering + cls.service_offering_cwps = ServiceOffering.create( + cls.apiclient, + cls.testdata["service_offering"], + tags=CLUSTERTAG1 + ) + cls._cleanup.append(cls.service_offering_cwps) + except Exception as e: + cls.tearDownClass() + raise e + return + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiclient, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + def setUp(self): + + self.cleanup = [] + if self.skiptest: + self.skipTest("This test is to be checked on VMWare only \ + Hence, skip for %s" % self.hypervisor) + + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + + def tearDown(self): + try: + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + @attr(tags=["advanced", "basic"], required_hardware="true") + def test_01_recover_VM(self): + """ Test Restore VM on VMWare + 1. Deploy a VM without datadisk + 2. Restore the VM + 3. Verify that VM comes up in Running state + """ + try: + self.pools = StoragePool.list( + self.apiclient, + zoneid=self.zone.id, + scope="CLUSTER") + + if len(self.pools) < 2: + self.skipTest("There must be at atleast two cluster wide\ + storage pools available in the setup") + + except Exception as e: + self.skipTest(e) + + # Adding tags to Storage Pools + cluster_no = 1 + self.debug("Storage Pools: %s" % self.pools) + for storagePool in self.pools: + if storagePool.scope == "CLUSTER": + StoragePool.update( + self.apiclient, + id=storagePool.id, + tags=[CLUSTERTAG1[:-1] + repr(cluster_no)]) + cluster_no += 1 + + self.vm = VirtualMachine.create( + self.apiclient, + self.testdata["small"], + accountid=self.account.name, + templateid=self.template.id, + domainid=self.account.domainid, + serviceofferingid=self.service_offering_cwps.id, + zoneid=self.zone.id, + ) + # Step 2 + + volumes_root_list = list_volumes( + self.apiclient, + virtualmachineid=self.vm.id, + type=ROOT, + listall=True + ) + + root_volume = volumes_root_list[0] + + # Restore VM till its ROOT disk is recreated on onother Primary Storage + while True: + self.vm.restore(self.apiclient) + volumes_root_list = list_volumes( + self.apiclient, + virtualmachineid=self.vm.id, + type=ROOT, + listall=True + ) + + root_volume = volumes_root_list[0] + + if root_volume.storage != self.pools[0].name: + break + + # Step 3 + vm_list = list_virtual_machines( + self.apiclient, + id=self.vm.id) + + state = vm_list[0].state + i = 0 + while(state != "Running"): + vm_list = list_virtual_machines( + self.apiclient, + id=self.vm.id) + + time.sleep(10) + i = i + 1 + state = vm_list[0].state + if i >= 10: + self.fail("Restore VM Failed") + break + + return From 4bbf151eb7d39d0ab535a3a7c45d95af71cbd689 Mon Sep 17 00:00:00 2001 From: Priti Sarap Date: Wed, 12 Aug 2015 12:50:56 +0530 Subject: [PATCH 2/2] CLOUDSTACK-8717: Failed to start instance after restoring the running instance -Modified code to add tag to aonly one cluster wide SP -Added validateList function -Added code to clear tags in tearDown class --- .../testpaths/testpath_restore_vm.py | 30 ++++++++++++------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/test/integration/testpaths/testpath_restore_vm.py b/test/integration/testpaths/testpath_restore_vm.py index 7fd1610dcc6..d73499ad21a 100644 --- a/test/integration/testpaths/testpath_restore_vm.py +++ b/test/integration/testpaths/testpath_restore_vm.py @@ -22,7 +22,7 @@ Test restore running VM on VMWare with one cluster having 2 Primary Storage from nose.plugins.attrib import attr from marvin.cloudstackTestCase import cloudstackTestCase -from marvin.lib.utils import cleanup_resources +from marvin.lib.utils import cleanup_resources, validateList from marvin.lib.base import (Account, ServiceOffering, VirtualMachine, @@ -35,7 +35,7 @@ from marvin.lib.common import (get_domain, list_virtual_machines ) -from marvin.codes import CLUSTERTAG1, ROOT +from marvin.codes import CLUSTERTAG1, ROOT, PASS import time @@ -108,6 +108,12 @@ class TestRestoreVM(cloudstackTestCase): def tearDown(self): try: + if self.pools: + StoragePool.update( + self.apiclient, + id=self.pools[0].id, + tags="") + cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) @@ -126,6 +132,14 @@ class TestRestoreVM(cloudstackTestCase): zoneid=self.zone.id, scope="CLUSTER") + status = validateList(self.pools) + + # Step 3 + self.assertEqual( + status[0], + PASS, + "Check: Failed to list cluster wide storage pools") + if len(self.pools) < 2: self.skipTest("There must be at atleast two cluster wide\ storage pools available in the setup") @@ -135,14 +149,10 @@ class TestRestoreVM(cloudstackTestCase): # Adding tags to Storage Pools cluster_no = 1 - self.debug("Storage Pools: %s" % self.pools) - for storagePool in self.pools: - if storagePool.scope == "CLUSTER": - StoragePool.update( - self.apiclient, - id=storagePool.id, - tags=[CLUSTERTAG1[:-1] + repr(cluster_no)]) - cluster_no += 1 + StoragePool.update( + self.apiclient, + id=self.pools[0].id, + tags=[CLUSTERTAG1[:-1] + repr(cluster_no)]) self.vm = VirtualMachine.create( self.apiclient,