diff --git a/test/integration/component/test_snapshot_gc.py b/test/integration/component/test_snapshot_gc.py index 369543d0bc7..ac18bd52cb5 100644 --- a/test/integration/component/test_snapshot_gc.py +++ b/test/integration/component/test_snapshot_gc.py @@ -144,50 +144,59 @@ class TestAccountSnapshotClean(cloudstackTestCase): cls.services["server"]["zoneid"] = cls.zone.id cls.services["template"] = template.id + cls._cleanup = [] - # Create VMs, NAT Rules etc - cls.account = Account.create( - cls.api_client, - cls.services["account"], - domainid=cls.domain.id - ) - - cls.services["account"] = cls.account.name - - cls.service_offering = ServiceOffering.create( - cls.api_client, - cls.services["service_offering"] - ) - cls.virtual_machine = VirtualMachine.create( + try: + # Create VMs, NAT Rules etc + cls.account = Account.create( cls.api_client, - cls.services["server"], - templateid=template.id, - accountid=cls.account.name, - domainid=cls.account.domainid, - serviceofferingid=cls.service_offering.id + cls.services["account"], + domainid=cls.domain.id ) - # Get the Root disk of VM - volumes = list_volumes( - cls.api_client, - virtualmachineid=cls.virtual_machine.id, - type='ROOT', - listall=True - ) - volume = volumes[0] - # Create a snapshot from the ROOTDISK - cls.snapshot = Snapshot.create(cls.api_client, volumes[0].id) + cls.services["account"] = cls.account.name + cls._cleanup.append(cls.account) - cls._cleanup = [ - cls.service_offering, - ] + cls.services["service_offering"]["storagetype"] = "local" + cls.service_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offering"] + ) + cls._cleanup.append(cls.service_offering) + cls.virtual_machine = VirtualMachine.create( + cls.api_client, + cls.services["server"], + templateid=template.id, + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id + ) + cls._cleanup.append(cls.virtual_machine) + # Get the Root disk of VM + volumes = list_volumes( + cls.api_client, + virtualmachineid=cls.virtual_machine.id, + type='ROOT', + listall=True + ) + volume = volumes[0] + + # Create a snapshot from the ROOTDISK + cls.snapshot = Snapshot.create(cls.api_client, volumes[0].id) + cls._cleanup.append(cls.snapshot) + except Exception, e: + cls.tearDownClass() + unittest.SkipTest("setupClass fails for %s" % cls.__name__) + raise e + else: + cls._cleanup.remove(cls.account) return @classmethod def tearDownClass(cls): try: #Cleanup resources used - cleanup_resources(cls.api_client, cls._cleanup) + cleanup_resources(cls.api_client, reversed(cls._cleanup)) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return @@ -206,6 +215,82 @@ class TestAccountSnapshotClean(cloudstackTestCase): raise Exception("Warning: Exception during cleanup : %s" % e) return + def is_snapshot_on_nfs(self, snapshot_id): + """ + Checks whether a snapshot with id (not UUID) `snapshot_id` is present on the nfs storage + + @param snapshot_id: id of the snapshot (not uuid) + @return: True if snapshot is found, False otherwise + """ + secondaryStores = ImageStore.list(self.apiclient, zoneid=self.zone.id) + self.assertTrue(isinstance(secondaryStores, list), "Not a valid response for listImageStores") + self.assertNotEqual(len(secondaryStores), 0, "No image stores found in zone %s" % self.zone.id) + secondaryStore = secondaryStores[0] + if str(secondaryStore.providername).lower() != "nfs": + self.skipTest("TODO: %s test works only against nfs secondary storage" % self._testMethodName) + + qresultset = self.dbclient.execute( + "select install_path from snapshot_store_ref where snapshot_id='%s' and store_role='Image';" % snapshot_id + ) + self.assertEqual( + isinstance(qresultset, list), + True, + "Invalid db query response for snapshot %s" % self.snapshot.id + ) + self.assertNotEqual( + len(qresultset), + 0, + "No such snapshot %s found in the cloudstack db" % self.snapshot.id + ) + snapshotPath = qresultset[0][0] + nfsurl = secondaryStore.url + # parse_url = ['nfs:', '', '192.168.100.21', 'export', 'test'] + from urllib2 import urlparse + parse_url = urlparse.urlsplit(nfsurl, scheme='nfs') + host, path = parse_url.netloc, parse_url.path + # Sleep to ensure that snapshot is reflected in sec storage + time.sleep(self.services["sleep"]) + snapshots = [] + try: + # Login to Secondary storage VM to check snapshot present on sec disk + ssh_client = remoteSSHClient( + self.config.mgtSvr[0].mgtSvrIp, + 22, + self.config.mgtSvr[0].user, + self.config.mgtSvr[0].passwd, + ) + + cmds = [ + "mkdir -p %s" % self.services["paths"]["mount_dir"], + "mount -t %s %s/%s %s" % ( + 'nfs', + host, + path, + self.services["paths"]["mount_dir"] + ), + "ls %s" % ( + snapshotPath + ), + ] + + for c in cmds: + self.debug("command: %s" % c) + result = ssh_client.execute(c) + self.debug("Result: %s" % result) + + snapshots.extend(result) + # Unmount the Sec Storage + cmds = [ + "umount %s" % (self.services["paths"]["mount_dir"]), + ] + for c in cmds: + ssh_client.execute(c) + except Exception as e: + self.fail("SSH failed for management server: %s - %s" % + (self.services["mgmt_server"]["ipaddress"], e)) + res = str(snapshots) + return res.count(snapshot_id) == 1 + @attr(speed = "slow") @attr(tags = ["advanced", "advancedns", "basic", "sg"]) def test_02_accountSnapshotClean(self): @@ -276,7 +361,7 @@ class TestAccountSnapshotClean(cloudstackTestCase): self.assertNotEqual( snapshots, None, - "Check if result exists in list snapshots call" + "No such snapshot %s found" % self.snapshot.id ) self.assertEqual( snapshots[0].id, @@ -284,191 +369,41 @@ class TestAccountSnapshotClean(cloudstackTestCase): "Check snapshot id in list resources call" ) - # Fetch values from database qresultset = self.dbclient.execute( - "select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" \ + "select id from snapshots where uuid = '%s';" \ % self.snapshot.id ) self.assertEqual( isinstance(qresultset, list), True, - "Check DB response returns a valid list" + "Invalid db query response for snapshot %s" % self.snapshot.id ) self.assertNotEqual( len(qresultset), 0, - "Check DB Query result set" + "No such snapshot %s found in the cloudstack db" % self.snapshot.id ) qresult = qresultset[0] - snapshot_uuid = qresult[0] # backup_snap_id = snapshot UUID - account_id = qresult[1] - volume_id = qresult[2] + snapshot_uuid = qresult[0] - # Get the Secondary Storage details from list Hosts - hosts = list_hosts( - self.apiclient, - type='SecondaryStorage', - zoneid=self.zone.id - ) - self.assertEqual( - isinstance(hosts, list), - True, - "Check list response returns a valid list" - ) - uuids = [] - for host in hosts: - # hosts[0].name = "nfs://192.168.100.21/export/test" - parse_url = (host.name).split('/') - # parse_url = ['nfs:', '', '192.168.100.21', 'export', 'test'] - - # Stripping end ':' from storage type - storage_type = parse_url[0][:-1] - # Split IP address and export path from name - sec_storage_ip = parse_url[2] - # Sec Storage IP: 192.168.100.21 - - if sec_storage_ip[-1] != ":": - sec_storage_ip = sec_storage_ip + ":" - - export_path = '/'.join(parse_url[3:]) - # Export path: export/test - - # Sleep to ensure that snapshot is reflected in sec storage - time.sleep(self.services["sleep"]) - try: - # Login to Secondary storage VM to check snapshot present on sec disk - ssh_client = remoteSSHClient( - self.services["mgmt_server"]["ipaddress"], - self.services["mgmt_server"]["port"], - self.services["mgmt_server"]["username"], - self.services["mgmt_server"]["password"], - ) - - cmds = [ - "mkdir -p %s" % self.services["paths"]["mount_dir"], - "mount -t %s %s/%s %s" % ( - storage_type, - sec_storage_ip, - export_path, - self.services["paths"]["mount_dir"] - ), - "ls %s/snapshots/%s/%s" % ( - self.services["paths"]["mount_dir"], - account_id, - volume_id - ), - ] - - for c in cmds: - self.debug("command: %s" % c) - result = ssh_client.execute(c) - self.debug("Result: %s" % result) - - uuids.append(result) - - # Unmount the Sec Storage - cmds = [ - "umount %s" % (self.services["mount_dir"]), - ] - for c in cmds: - result = ssh_client.execute(c) - except Exception as e: - self.fail("SSH failed for management server: %s - %s" % - (self.services["mgmt_server"]["ipaddress"], e)) - - res = str(uuids) - self.assertEqual( - res.count(snapshot_uuid), - 1, - "Check snapshot UUID in secondary storage and database" - ) + self.assertTrue(self.is_snapshot_on_nfs(snapshot_uuid), "Snapshot was not found no NFS") self.debug("Deleting account: %s" % self.account.name) # Delete account self.account.delete(self.apiclient) - interval = list_configurations( - self.apiclient, - name='account.cleanup.interval' - ) - self.assertEqual( - isinstance(interval, list), - True, - "Check list response returns a valid list" - ) - self.debug("account.cleanup.interval: %s" % interval[0].value) - # Wait for account cleanup interval - time.sleep(int(interval[0].value) * 2) - + wait_for_cleanup(self.apiclient, configs=["account.cleanup.interval"]) accounts = list_accounts( self.apiclient, id=self.account.id ) - self.assertEqual( accounts, None, "List accounts should return empty list after account deletion" ) - uuids = [] - for host in hosts: - # hosts[0].name = "nfs://192.168.100.21/export/test" - parse_url = (host.name).split('/') - # parse_url = ['nfs:', '', '192.168.100.21', 'export', 'test'] - - # Stripping end ':' from storage type - storage_type = parse_url[0][:-1] - # Split IP address and export path from name - sec_storage_ip = parse_url[2] - # Sec Storage IP: 192.168.100.21 - - if sec_storage_ip[-1] != ":": - sec_storage_ip = sec_storage_ip + ":" - - export_path = '/'.join(parse_url[3:]) - # Export path: export/test - - try: - cmds = [ - "mount -t %s %s/%s %s" % ( - storage_type, - sec_storage_ip, - export_path, - self.services["paths"]["mount_dir"] - ), - "ls %s/snapshots/%s/%s" % ( - self.services["paths"]["mount_dir"], - account_id, - volume_id - ), - ] - - for c in cmds: - self.debug("command: %s" % c) - result = ssh_client.execute(c) - self.debug("Result: %s" % result) - - uuids.append(result) - # Unmount the Sec Storage - cmds = [ - "umount %s" % (self.services["paths"]["mount_dir"]), - ] - for c in cmds: - self.debug("command: %s" % c) - result = ssh_client.execute(c) - self.debug("Result: %s" % result) - - except Exception as e: - self.fail("SSH failed for management server: %s - %s" % - (self.services["mgmt_server"]["ipaddress"], e)) - - res = str(uuids) - self.assertNotEqual( - res.count(snapshot_uuid), - 1, - "Check snapshot UUID in secondary storage and database" - ) + self.assertFalse(self.is_snapshot_on_nfs(snapshot_uuid), "Snapshot was still found no NFS after account gc") return \ No newline at end of file diff --git a/tools/marvin/marvin/integration/lib/base.py b/tools/marvin/marvin/integration/lib/base.py index 8112f4813a8..b5d086b16c0 100755 --- a/tools/marvin/marvin/integration/lib/base.py +++ b/tools/marvin/marvin/integration/lib/base.py @@ -2344,28 +2344,74 @@ class PortablePublicIpRange: [setattr(cmd, k, v) for k, v in kwargs.items()] return(apiclient.listPortableIpRanges(cmd)) -class SecondaryStorage: - """Manage Secondary storage""" +class SecondaryStagingStore: + """Manage Staging Store""" def __init__(self, items): self.__dict__.update(items) @classmethod - def create(cls, apiclient, services): - """Create Secondary Storage""" - cmd = addSecondaryStorage.addSecondaryStorageCmd() + def create(cls, apiclient, url, provider, services=None): + """Create Staging Storage""" + cmd = createSecondaryStagingStore.createSecondaryStagingStoreCmd() + cmd.url = url + cmd.provider = provider + if services: + if "zoneid" in services: + cmd.zoneid = services["zoneid"] + if "details" in services: + cmd.details = services["details"] + if "scope" in services: + cmd.scope = services["scope"] - cmd.url = services["url"] - if "zoneid" in services: - cmd.zoneid = services["zoneid"] - return SecondaryStorage(apiclient.addSecondaryStorage(cmd).__dict__) + return SecondaryStagingStore(apiclient.createSecondaryStagingStore(cmd).__dict__) def delete(self, apiclient): - """Delete Secondary Storage""" - - cmd = deleteHost.deleteHostCmd() + """Delete Staging Storage""" + cmd = deleteSecondaryStagingStore.deleteSecondaryStagingStoreCmd() cmd.id = self.id - apiclient.deleteHost(cmd) + apiclient.deleteSecondaryStagingStore(cmd) + + @classmethod + def list(cls, apiclient, **kwargs): + cmd = listSecondaryStagingStores.listSecondaryStagingStoresCmd() + [setattr(cmd, k, v) for k, v in kwargs.items()] + return(apiclient.listSecondaryStagingStores(cmd)) + + +class ImageStore: + """Manage image stores""" + + def __init__(self, items): + self.__dict__.update(items) + + @classmethod + def create(cls, apiclient, url, provider, services=None): + """Add Image Store""" + cmd = addImageStore.addImageStoreCmd() + cmd.url = url + cmd.provider = provider + if services: + if "zoneid" in services: + cmd.zoneid = services["zoneid"] + if "details" in services: + cmd.details = services["details"] + if "scope" in services: + cmd.scope = services["scope"] + + return ImageStore(apiclient.addImageStore(cmd).__dict__) + + def delete(self, apiclient): + """Delete Image Store""" + cmd = deleteImageStore.deleteImageStoreCmd() + cmd.id = self.id + apiclient.deleteImageStore(cmd) + + @classmethod + def list(cls, apiclient, **kwargs): + cmd = listImageStores.listImageStoresCmd() + [setattr(cmd, k, v) for k, v in kwargs.items()] + return(apiclient.listImageStores(cmd)) class PhysicalNetwork: