diff --git a/test/integration/smoke/test_volumes.py b/test/integration/smoke/test_volumes.py index fa7bea0b9c1..e1d419ff150 100644 --- a/test/integration/smoke/test_volumes.py +++ b/test/integration/smoke/test_volumes.py @@ -16,41 +16,42 @@ # under the License. """ BVT tests for Volumes """ -#Import Local Modules -from marvin.cloudstackTestCase import cloudstackTestCase +import os +import tempfile +import time import unittest -#from marvin.cloudstackException import * +import urllib.error +import urllib.parse +import urllib.request + from marvin.cloudstackAPI import (deleteVolume, extractVolume, resizeVolume) -#from marvin.sshClient import SshClient -from marvin.lib.utils import (cleanup_resources, - format_volume_to_ext3, - wait_until) + +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.codes import SUCCESS, FAILED, XEN_SERVER from marvin.lib.base import (ServiceOffering, VirtualMachine, Account, Volume, Host, DiskOffering, - StoragePool,) + StoragePool) from marvin.lib.common import (get_domain, - get_suitable_test_template, - get_zone, - find_storage_pool_type, - get_pod, - list_disk_offering) + get_suitable_test_template, + get_zone, + find_storage_pool_type, + get_pod, + list_disk_offering) from marvin.lib.utils import checkVolumeSize -from marvin.codes import SUCCESS, FAILED, XEN_SERVER +from marvin.lib.utils import (format_volume_to_ext3, + wait_until) + from nose.plugins.attrib import attr -#Import System modules -import os -import urllib.request, urllib.parse, urllib.error -import time -import tempfile _multiprocess_shared_ = True + class TestCreateVolume(cloudstackTestCase): @classmethod @@ -65,25 +66,28 @@ class TestCreateVolume(cloudstackTestCase): cls.hypervisor = testClient.getHypervisorInfo() cls.services['mode'] = cls.zone.networktype cls.invalidStoragePoolType = False - #for LXC if the storage pool of type 'rbd' ex: ceph is not available, skip the test + # for LXC if the storage pool of type 'rbd' ex: ceph is not available, skip the test if cls.hypervisor.lower() == 'lxc': if not find_storage_pool_type(cls.apiclient, storagetype='rbd'): # RBD storage type is required for data volumes for LXC cls.invalidStoragePoolType = True return cls.disk_offering = DiskOffering.create( - cls.apiclient, - cls.services["disk_offering"] - ) + cls.apiclient, + cls.services["disk_offering"] + ) + cls._cleanup.append(cls.disk_offering) cls.sparse_disk_offering = DiskOffering.create( - cls.apiclient, - cls.services["sparse_disk_offering"] - ) + cls.apiclient, + cls.services["sparse_disk_offering"] + ) + cls._cleanup.append(cls.sparse_disk_offering) cls.custom_disk_offering = DiskOffering.create( - cls.apiclient, - cls.services["disk_offering"], - custom=True - ) + cls.apiclient, + cls.services["disk_offering"], + custom=True + ) + cls._cleanup.append(cls.custom_disk_offering) template = get_suitable_test_template( cls.apiclient, @@ -101,28 +105,24 @@ class TestCreateVolume(cloudstackTestCase): cls.services["diskname"] = cls.services["volume"]["diskname"] # Create VMs, NAT Rules etc cls.account = Account.create( - cls.apiclient, - cls.services["account"], - domainid=cls.domain.id - ) + cls.apiclient, + cls.services["account"], + domainid=cls.domain.id + ) + cls._cleanup.append(cls.account) cls.service_offering = ServiceOffering.create( - cls.apiclient, - cls.services["service_offerings"]["tiny"] - ) + cls.apiclient, + cls.services["service_offerings"]["tiny"] + ) cls.virtual_machine = VirtualMachine.create( - cls.apiclient, - cls.services, - accountid=cls.account.name, - domainid=cls.account.domainid, - serviceofferingid=cls.service_offering.id, - mode=cls.services["mode"] - ) - cls._cleanup = [ - cls.service_offering, - cls.disk_offering, - cls.custom_disk_offering, - cls.account - ] + cls.apiclient, + cls.services, + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id, + mode=cls.services["mode"] + ) + cls._cleanup.append(cls.virtual_machine) def setUp(self): @@ -134,81 +134,81 @@ class TestCreateVolume(cloudstackTestCase): self.skipTest("Skipping test because of valid storage\ pool not available") - @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true") + @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true") def test_01_create_volume(self): """Test Volume creation for all Disk Offerings (incl. custom) - """ # Validate the following # 1. Create volumes from the different sizes # 2. Verify the size of volume with actual size allocated + """ self.volumes = [] for k, v in list(self.services["volume_offerings"].items()): volume = Volume.create( - self.apiClient, - v, - zoneid=self.zone.id, - account=self.account.name, - domainid=self.account.domainid, - diskofferingid=self.disk_offering.id - ) - self.debug("Created a volume with ID: %s" % volume.id) + self.apiClient, + v, + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid, + diskofferingid=self.disk_offering.id + ) self.volumes.append(volume) + self.debug("Created a volume with ID: %s" % volume.id) if self.virtual_machine.hypervisor == "KVM": sparse_volume = Volume.create( - self.apiClient, - self.services, - zoneid=self.zone.id, - account=self.account.name, - domainid=self.account.domainid, - diskofferingid=self.sparse_disk_offering.id - ) - self.debug("Created a sparse volume: %s" % sparse_volume.id) + self.apiClient, + self.services, + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid, + diskofferingid=self.sparse_disk_offering.id + ) self.volumes.append(sparse_volume) + self.debug("Created a sparse volume: %s" % sparse_volume.id) volume = Volume.create_custom_disk( - self.apiClient, - self.services, - account=self.account.name, - domainid=self.account.domainid, - ) - self.debug("Created a volume with custom offering: %s" % volume.id) + self.apiClient, + self.services, + account=self.account.name, + domainid=self.account.domainid, + ) self.volumes.append(volume) + self.debug("Created a volume with custom offering: %s" % volume.id) - #Attach a volume with different disk offerings - #and check the memory allocated to each of them + # Attach a volume with different disk offerings + # and check the memory allocated to each of them for volume in self.volumes: list_volume_response = Volume.list( - self.apiClient, - id=volume.id) + self.apiClient, + id=volume.id) self.assertEqual( - isinstance(list_volume_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(list_volume_response, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - list_volume_response, - None, - "Check if volume exists in ListVolumes" - ) + list_volume_response, + None, + "Check if volume exists in ListVolumes" + ) self.debug( "Attaching volume (ID: %s) to VM (ID: %s)" % ( - volume.id, - self.virtual_machine.id - )) + volume.id, + self.virtual_machine.id + )) self.virtual_machine.attach_volume( - self.apiClient, - volume - ) + self.apiClient, + volume + ) try: ssh = self.virtual_machine.get_ssh_client() self.debug("Rebooting VM %s" % self.virtual_machine.id) ssh.execute("reboot") except Exception as e: self.fail("SSH access failed for VM %s - %s" % - (self.virtual_machine.ipaddress, e)) + (self.virtual_machine.ipaddress, e)) # Poll listVM to ensure VM is started properly timeout = self.services["timeout"] @@ -217,9 +217,9 @@ class TestCreateVolume(cloudstackTestCase): # Ensure that VM is in running state list_vm_response = VirtualMachine.list( - self.apiClient, - id=self.virtual_machine.id - ) + self.apiClient, + id=self.virtual_machine.id + ) if isinstance(list_vm_response, list): vm = list_vm_response[0] @@ -234,41 +234,35 @@ class TestCreateVolume(cloudstackTestCase): vol_sz = str(list_volume_response[0].size) ssh = self.virtual_machine.get_ssh_client( - reconnect=True - ) + reconnect=True + ) # Get the updated volume information list_volume_response = Volume.list( - self.apiClient, - id=volume.id) + self.apiClient, + id=volume.id) if list_volume_response[0].hypervisor.lower() == XEN_SERVER.lower(): volume_name = "/dev/xvd" + chr(ord('a') + int(list_volume_response[0].deviceid)) self.debug(" Using XenServer volume_name: %s" % (volume_name)) - ret = checkVolumeSize(ssh_handle=ssh,volume_name=volume_name,size_to_verify=vol_sz) + ret = checkVolumeSize(ssh_handle=ssh, volume_name=volume_name, size_to_verify=vol_sz) elif list_volume_response[0].hypervisor.lower() == "kvm": volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid)) self.debug(" Using KVM volume_name: %s" % (volume_name)) - ret = checkVolumeSize(ssh_handle=ssh,volume_name=volume_name,size_to_verify=vol_sz) + ret = checkVolumeSize(ssh_handle=ssh, volume_name=volume_name, size_to_verify=vol_sz) elif list_volume_response[0].hypervisor.lower() == "hyperv": - ret = checkVolumeSize(ssh_handle=ssh,volume_name="/dev/sdb",size_to_verify=vol_sz) + ret = checkVolumeSize(ssh_handle=ssh, volume_name="/dev/sdb", size_to_verify=vol_sz) else: - ret = checkVolumeSize(ssh_handle=ssh,size_to_verify=vol_sz) - self.debug(" Volume Size Expected %s Actual :%s" %(vol_sz,ret[1])) + ret = checkVolumeSize(ssh_handle=ssh, size_to_verify=vol_sz) + self.debug(" Volume Size Expected %s Actual :%s" % (vol_sz, ret[1])) self.virtual_machine.detach_volume(self.apiClient, volume) - self.assertEqual(ret[0],SUCCESS,"Check if promised disk size actually available") + self.assertEqual(ret[0], SUCCESS, "Check if promised disk size actually available") time.sleep(self.services["sleep"]) def tearDown(self): - #Clean up, terminate the created volumes - cleanup_resources(self.apiClient, self.cleanup) - return + super(TestCreateVolume, self).tearDown() @classmethod def tearDownClass(cls): - try: - cls.apiclient = super(TestCreateVolume, cls).getClsTestClient().getApiClient() - cleanup_resources(cls.apiclient, cls._cleanup) - except Exception as e: - raise Exception("Warning: Exception during cleanup : %s" % e) + super(TestCreateVolume, cls).tearDownClass() class TestVolumes(cloudstackTestCase): @@ -285,25 +279,28 @@ class TestVolumes(cloudstackTestCase): cls.services['mode'] = cls.zone.networktype cls.hypervisor = testClient.getHypervisorInfo() cls.invalidStoragePoolType = False - #for LXC if the storage pool of type 'rbd' ex: ceph is not available, skip the test + # for LXC if the storage pool of type 'rbd' ex: ceph is not available, skip the test if cls.hypervisor.lower() == 'lxc': if not find_storage_pool_type(cls.apiclient, storagetype='rbd'): # RBD storage type is required for data volumes for LXC cls.invalidStoragePoolType = True return cls.disk_offering = DiskOffering.create( - cls.apiclient, - cls.services["disk_offering"] - ) + cls.apiclient, + cls.services["disk_offering"] + ) + cls._cleanup.append(cls.disk_offering) cls.resized_disk_offering = DiskOffering.create( - cls.apiclient, - cls.services["resized_disk_offering"] - ) + cls.apiclient, + cls.services["resized_disk_offering"] + ) + cls._cleanup.append(cls.resized_disk_offering) cls.custom_resized_disk_offering = DiskOffering.create( - cls.apiclient, - cls.services["resized_disk_offering"], - custom=True - ) + cls.apiclient, + cls.services["resized_disk_offering"], + custom=True + ) + cls._cleanup.append(cls.custom_resized_disk_offering) cls.template = get_suitable_test_template( cls.apiclient, @@ -324,53 +321,40 @@ class TestVolumes(cloudstackTestCase): # Create VMs, VMs etc cls.account = Account.create( - cls.apiclient, - cls.services["account"], - domainid=cls.domain.id - ) + cls.apiclient, + cls.services["account"], + domainid=cls.domain.id + ) + cls._cleanup.append(cls.account) cls.service_offering = ServiceOffering.create( - cls.apiclient, - cls.services["service_offerings"]["tiny"] - ) + cls.apiclient, + cls.services["service_offerings"]["tiny"] + ) + cls._cleanup.append(cls.service_offering) cls.virtual_machine = VirtualMachine.create( - cls.apiclient, - cls.services, - accountid=cls.account.name, - domainid=cls.account.domainid, - serviceofferingid=cls.service_offering.id, - mode=cls.services["mode"] - ) + cls.apiclient, + cls.services, + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id, + mode=cls.services["mode"] + ) + cls._cleanup.append(cls.virtual_machine) pools = StoragePool.list(cls.apiclient) - # cls.assertEqual( - # validateList(pools)[0], - # PASS, - # "storage pool list validation failed") - - if cls.hypervisor.lower() == 'lxc' and cls.storage_pools.type.lower() != 'rbd': raise unittest.SkipTest("Snapshots not supported on Hyper-V or LXC") cls.volume = Volume.create( - cls.apiclient, - cls.services, - account=cls.account.name, - domainid=cls.account.domainid - ) - cls._cleanup = [ - cls.resized_disk_offering, - cls.custom_resized_disk_offering, - cls.service_offering, - cls.disk_offering, - cls.volume, - cls.account - ] + cls.apiclient, + cls.services, + account=cls.account.name, + domainid=cls.account.domainid + ) + cls._cleanup.append(cls.volume) @classmethod def tearDownClass(cls): - try: - cleanup_resources(cls.apiclient, cls._cleanup) - except Exception as e: - raise Exception("Warning: Exception during cleanup : %s" % e) + super(TestVolumes, cls).tearDownClass() def setUp(self): self.apiClient = self.testClient.getApiClient() @@ -383,29 +367,29 @@ class TestVolumes(cloudstackTestCase): available") def tearDown(self): - #Clean up, terminate the created volumes - if self.attached: - self.virtual_machine.get_ssh_client(reconnect = True) - - self.virtual_machine.detach_volume(self.apiClient, self.volume) - if self.virtual_machine.hypervisor == "KVM": self.virtual_machine.stop(self.apiClient) + if self.attached: + self.virtual_machine.detach_volume(self.apiClient, self.volume) self.virtual_machine.start(self.apiClient) - self.virtual_machine.get_ssh_client(reconnect = True) + try: + self.virtual_machine.get_ssh_client(reconnect=True) + except Exception as err: + self.fail("SSH failed for Virtual machine: %s due to %s" % + (self.virtual_machine.ipaddress, err)) + elif self.attached: + self.virtual_machine.detach_volume(self.apiClient, self.volume) + super(TestVolumes, self).tearDown() - cleanup_resources(self.apiClient, self.cleanup) - return - - @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true") + @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true") def test_02_attach_volume(self): """Attach a created Volume to a Running VM - """ # Validate the following # 1. shows list of volumes # 2. "Attach Disk" pop-up box will display with list of instances # 3. disk should be attached to instance successfully + """ self.debug( "Attaching volume (ID: %s) to VM (ID: %s)" % ( @@ -435,7 +419,7 @@ class TestVolumes(cloudstackTestCase): "Check if volume state (attached) is reflected" ) try: - #Format the attached volume to a known fs + # Format the attached volume to a known fs format_volume_to_ext3(self.virtual_machine.get_ssh_client()) except Exception as e: @@ -444,14 +428,14 @@ class TestVolumes(cloudstackTestCase): (self.virtual_machine.ipaddress, e)) return - @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="false") + @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="false") def test_03_download_attached_volume(self): """Download a Volume attached to a VM - """ # Validate the following # 1. download volume will fail with proper error message # "Failed - Invalid state of the volume with ID: # It should be either detached or the VM should be in stopped state + """ self.debug("Extract attached Volume ID: %s" % self.volume.id) @@ -466,84 +450,84 @@ class TestVolumes(cloudstackTestCase): with self.assertRaises(Exception): self.apiClient.extractVolume(cmd) - @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="false") + @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="false") def test_04_delete_attached_volume(self): """Delete a Volume attached to a VM - """ # Validate the following # 1. delete volume will fail with proper error message # "Failed - Invalid state of the volume with ID: # It should be either detached or the VM should be in stopped state + """ self.debug("Trying to delete attached Volume ID: %s" % - self.volume.id) + self.volume.id) self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True cmd = deleteVolume.deleteVolumeCmd() cmd.id = self.volume.id - #Proper exception should be raised; deleting attach VM is not allowed - #with self.assertRaises(Exception): + # Proper exception should be raised; deleting attach VM is not allowed + # with self.assertRaises(Exception): with self.assertRaises(Exception): self.apiClient.deleteVolume(cmd) + self._cleanup.remove(self.volume) # when succeeded - @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="false") + @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="false") def test_05_detach_volume(self): """Detach a Volume attached to a VM - """ # Validate the following # Data disk should be detached from instance and detached data disk # details should be updated properly + """ self.debug( - "Detaching volume (ID: %s) from VM (ID: %s)" % ( - self.volume.id, - self.virtual_machine.id - )) + "Detaching volume (ID: %s) from VM (ID: %s)" % ( + self.volume.id, + self.virtual_machine.id)) self.virtual_machine.attach_volume(self.apiClient, self.volume) self.virtual_machine.detach_volume(self.apiClient, self.volume) self.attached = False - #Sleep to ensure the current state will reflected in other calls + # Sleep to ensure the current state will reflected in other calls time.sleep(self.services["sleep"]) list_volume_response = Volume.list( - self.apiClient, - id=self.volume.id - ) + self.apiClient, + id=self.volume.id + ) self.assertEqual( - isinstance(list_volume_response, list), - True, - "Check list response returns a valid list" - ) + isinstance(list_volume_response, list), + True, + "Check list response returns a valid list" + ) self.assertNotEqual( - list_volume_response, - None, - "Check if volume exists in ListVolumes" - ) + list_volume_response, + None, + "Check if volume exists in ListVolumes" + ) volume = list_volume_response[0] self.assertEqual( - volume.virtualmachineid, - None, - "Check if volume state (detached) is reflected" - ) + volume.virtualmachineid, + None, + "Check if volume state (detached) is reflected" + ) return - @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true") + @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true") def test_06_download_detached_volume(self): """Download a Volume unattached to an VM + Validate the following + 1. able to download the volume when its not attached to instance """ - # Validate the following - # 1. able to download the volume when its not attached to instance self.debug("Extract detached Volume ID: %s" % self.volume.id) self.virtual_machine.attach_volume(self.apiClient, self.volume) - #Sleep to ensure the current state will reflected in other calls + # Sleep to ensure the current state will reflected in other calls time.sleep(self.services["sleep"]) self.virtual_machine.detach_volume(self.apiClient, self.volume) self.attached = False - #Sleep to ensure the current state will reflected in other calls + # Sleep to ensure the current state will reflected in other calls time.sleep(self.services["sleep"]) cmd = extractVolume.extractVolumeCmd() @@ -552,90 +536,90 @@ class TestVolumes(cloudstackTestCase): cmd.zoneid = self.services["zoneid"] extract_vol = self.apiClient.extractVolume(cmd) - #Attempt to download the volume and save contents locally + # Attempt to download the volume and save contents locally try: formatted_url = urllib.parse.unquote_plus(extract_vol.url) self.debug("Attempting to download volume at url %s" % formatted_url) response = urllib.request.urlopen(formatted_url) self.debug("response from volume url %s" % response.getcode()) fd, path = tempfile.mkstemp() - self.debug("Saving volume %s to path %s" %(self.volume.id, path)) + self.debug("Saving volume %s to path %s" % (self.volume.id, path)) os.close(fd) with open(path, 'wb') as fd: fd.write(response.read()) self.debug("Saved volume successfully") - except Exception: + except Exception as e: self.fail( - "Extract Volume Failed with invalid URL %s (vol id: %s)" \ - % (extract_vol.url, self.volume.id) + "Extract Volume Failed (URL: %s , vol id: %s) due to %s" + % (extract_vol.url, self.volume.id, e) ) - @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true") + @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true") def test_07_resize_fail(self): """Test resize (negative) non-existent volume""" # Verify the size is the new size is what we wanted it to be. self.debug("Fail Resize Volume ID: %s" % self.volume.id) # first, an invalid id - cmd = resizeVolume.resizeVolumeCmd() - cmd.id = "invalid id" + cmd = resizeVolume.resizeVolumeCmd() + cmd.id = "invalid id" cmd.diskofferingid = self.services['customresizeddiskofferingid'] - success = False + success = False try: self.apiClient.resizeVolume(cmd) except Exception as ex: - #print str(ex) + # print str(ex) if "invalid" in str(ex): success = True self.assertEqual( - success, - True, - "ResizeVolume - verify invalid id is handled appropriately") + success, + True, + "ResizeVolume - verify invalid id is handled appropriately") # Next, we'll try an invalid disk offering id - cmd.id = self.volume.id + cmd.id = self.volume.id cmd.diskofferingid = "invalid id" - success = False + success = False try: self.apiClient.resizeVolume(cmd) except Exception as ex: if "invalid" in str(ex): success = True self.assertEqual( - success, - True, - "ResizeVolume - verify disk offering is handled appropriately") + success, + True, + "ResizeVolume - verify disk offering is handled appropriately") # try to resize a root disk with a disk offering, root can only be resized by size= # get root vol from created vm list_volume_response = Volume.list( - self.apiClient, - virtualmachineid=self.virtual_machine.id, - type='ROOT', - listall=True - ) + self.apiClient, + virtualmachineid=self.virtual_machine.id, + type='ROOT', + listall=True + ) rootvolume = list_volume_response[0] - cmd.id = rootvolume.id + cmd.id = rootvolume.id cmd.diskofferingid = self.services['diskofferingid'] with self.assertRaises(Exception): self.apiClient.resizeVolume(cmd) # Ok, now let's try and resize a volume that is not custom. - cmd.id = self.volume.id + cmd.id = self.volume.id cmd.diskofferingid = self.services['diskofferingid'] - cmd.size = 4 + cmd.size = 4 self.debug( - "Attaching volume (ID: %s) to VM (ID: %s)" % ( - self.volume.id, - self.virtual_machine.id) - ) - #attach the volume + "Attaching volume (ID: %s) to VM (ID: %s)" % ( + self.volume.id, + self.virtual_machine.id) + ) + # attach the volume self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True - #stop the vm if it is on xenserver + # stop the vm if it is on xenserver hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid) self.assertTrue(isinstance(hosts, list)) self.assertTrue(len(hosts) > 0) @@ -656,16 +640,15 @@ class TestVolumes(cloudstackTestCase): time.sleep(30) return - - @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true") + @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true") def test_08_resize_volume(self): """Test resize a volume""" # Verify the size is the new size is what we wanted it to be. self.debug( - "Attaching volume (ID: %s) to VM (ID: %s)" % ( - self.volume.id, - self.virtual_machine.id - )) + "Attaching volume (ID: %s) to VM (ID: %s)" % ( + self.volume.id, + self.virtual_machine.id + )) self.virtual_machine.attach_volume(self.apiClient, self.volume) self.attached = True @@ -684,13 +667,13 @@ class TestVolumes(cloudstackTestCase): self.services["disk_offering"]["disksize"] = 20 disk_offering_20_GB = DiskOffering.create( - self.apiclient, - self.services["disk_offering"] - ) + self.apiclient, + self.services["disk_offering"] + ) self.cleanup.append(disk_offering_20_GB) - cmd = resizeVolume.resizeVolumeCmd() - cmd.id = self.volume.id + cmd = resizeVolume.resizeVolumeCmd() + cmd.id = self.volume.id cmd.diskofferingid = disk_offering_20_GB.id self.apiClient.resizeVolume(cmd) @@ -699,12 +682,12 @@ class TestVolumes(cloudstackTestCase): success = False while count < 3: list_volume_response = Volume.list( - self.apiClient, - id=self.volume.id, - type='DATADISK' - ) + self.apiClient, + id=self.volume.id, + type='DATADISK' + ) for vol in list_volume_response: - if vol.id == self.volume.id and int(vol.size) == (int(disk_offering_20_GB.disksize) * (1024** 3)) and vol.state == 'Ready': + if vol.id == self.volume.id and int(vol.size) == (int(disk_offering_20_GB.disksize) * (1024 ** 3)) and vol.state == 'Ready': success = True if success: break @@ -713,18 +696,18 @@ class TestVolumes(cloudstackTestCase): count += 1 self.assertEqual( - success, - True, - "Check if the data volume resized appropriately" - ) + success, + True, + "Check if the data volume resized appropriately" + ) can_shrink = False list_volume_response = Volume.list( - self.apiClient, - id=self.volume.id, - type='DATADISK' - ) + self.apiClient, + id=self.volume.id, + type='DATADISK' + ) storage_pool_id = [x.storageid for x in list_volume_response if x.id == self.volume.id][0] storage = StoragePool.list(self.apiclient, id=storage_pool_id)[0] # At present only CLVM supports shrinking volumes @@ -734,15 +717,15 @@ class TestVolumes(cloudstackTestCase): if can_shrink: self.services["disk_offering"]["disksize"] = 10 disk_offering_10_GB = DiskOffering.create( - self.apiclient, - self.services["disk_offering"] - ) + self.apiclient, + self.services["disk_offering"] + ) self.cleanup.append(disk_offering_10_GB) - cmd = resizeVolume.resizeVolumeCmd() - cmd.id = self.volume.id + cmd = resizeVolume.resizeVolumeCmd() + cmd.id = self.volume.id cmd.diskofferingid = disk_offering_10_GB.id - cmd.shrinkok = "true" + cmd.shrinkok = "true" self.apiClient.resizeVolume(cmd) @@ -750,9 +733,9 @@ class TestVolumes(cloudstackTestCase): success = False while count < 3: list_volume_response = Volume.list( - self.apiClient, - id=self.volume.id - ) + self.apiClient, + id=self.volume.id + ) for vol in list_volume_response: if vol.id == self.volume.id and int(vol.size) == (int(disk_offering_10_GB.disksize) * (1024 ** 3)) and vol.state == 'Ready': success = True @@ -763,37 +746,38 @@ class TestVolumes(cloudstackTestCase): count += 1 self.assertEqual( - success, - True, - "Check if the root volume resized appropriately" - ) + success, + True, + "Check if the root volume resized appropriately" + ) - #start the vm if it is on xenserver + # start the vm if it is on xenserver if hosts[0].hypervisor == "XenServer": self.virtual_machine.start(self.apiClient) time.sleep(30) return - @attr(tags = ["advanced", "advancedns", "smoke","basic"], required_hardware="false") + @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="false") def test_09_delete_detached_volume(self): """Delete a Volume unattached to an VM - """ # Validate the following # 1. volume should be deleted successfully and listVolume should not # contain the deleted volume details. # 2. "Delete Volume" menu item not shown under "Actions" menu. # (UI should not allow to delete the volume when it is attached # to instance by hiding the menu Item) + """ self.debug("Delete Volume ID: %s" % self.volume.id) self.volume_1 = Volume.create( - self.apiclient, - self.services, - account=self.account.name, - domainid=self.account.domainid + self.apiclient, + self.services, + account=self.account.name, + domainid=self.account.domainid ) + self.cleanup.append(self.volume_1) self.virtual_machine.attach_volume(self.apiClient, self.volume_1) self.virtual_machine.detach_volume(self.apiClient, self.volume_1) @@ -801,28 +785,30 @@ class TestVolumes(cloudstackTestCase): cmd = deleteVolume.deleteVolumeCmd() cmd.id = self.volume_1.id self.apiClient.deleteVolume(cmd) + self.cleanup.remove(self.volume_1) list_volume_response = Volume.list( - self.apiClient, - id=self.volume_1.id, - type='DATADISK' - ) + self.apiClient, + id=self.volume_1.id, + type='DATADISK' + ) self.assertEqual( - list_volume_response, - None, - "Check if volume exists in ListVolumes" - ) + list_volume_response, + None, + "Check if volume exists in ListVolumes" + ) return - @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true") + @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true") def test_10_list_volumes(self): - + """ # Validate the following # # 1. List Root Volume and waits until it has the newly introduced attributes # # 2. Verifies return attributes has values different from none, when instance is running # + """ list_vm = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)[0] @@ -871,15 +857,15 @@ class TestVolumes(cloudstackTestCase): self.assertTrue(hasattr(root_volume, "podname")) self.assertEqual(root_volume.podname, list_pods.name) - @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true") + @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true") def test_11_attach_volume_with_unstarted_vm(self): """Attach a created Volume to a unstarted VM + Validate the following + 1. Attach to a vm in startvm=false state works and vm can be started afterwards. + 2. shows list of volumes + 3. "Attach Disk" pop-up box will display with list of instances + 4. disk should be attached to instance successfully """ - # Validate the following - # 1. Attach to a vm in startvm=false state works and vm can be started afterwards. - # 2. shows list of volumes - # 3. "Attach Disk" pop-up box will display with list of instances - # 4. disk should be attached to instance successfully test_vm = VirtualMachine.create( self.apiclient, @@ -891,6 +877,7 @@ class TestVolumes(cloudstackTestCase): mode=self.services["mode"], startvm=False ) + self.cleanup.append(test_vm) self.debug( "Attaching volume (ID: %s) to VM (ID: %s)" % ( @@ -920,11 +907,10 @@ class TestVolumes(cloudstackTestCase): None, "Check if volume state (attached) is reflected" ) - #Sleep to ensure the current state will reflected in other calls + # Sleep to ensure the current state will reflected in other calls time.sleep(self.services["sleep"]) test_vm.detach_volume(self.apiClient, self.volume) - self.cleanup.append(test_vm) return @@ -947,34 +933,31 @@ class TestVolumes(cloudstackTestCase): self.fail("Failed to return root volume response") return response - @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true") def test_11_migrate_volume_and_change_offering(self): - - # Validates the following - # - # 1. Creates a new Volume with a small disk offering - # - # 2. Migrates the Volume to another primary storage and changes the offering - # - # 3. Verifies the Volume has new offering when migrated to the new storage. + """ + Validates the following + 1. Creates a new Volume with a small disk offering + 2. Migrates the Volume to another primary storage and changes the offering + 3. Verifies the Volume has new offering when migrated to the new storage. + """ small_offering = list_disk_offering( self.apiclient, - name = "Small" + name="Small" )[0] large_offering = list_disk_offering( self.apiclient, - name = "Large" + name="Large" )[0] volume = Volume.create( self.apiClient, self.services, - zoneid = self.zone.id, - account = self.account.name, - domainid = self.account.domainid, - diskofferingid = small_offering.id + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid, + diskofferingid=small_offering.id ) self.debug("Created a small volume: %s" % volume.id) @@ -986,7 +969,7 @@ class TestVolumes(cloudstackTestCase): pools = StoragePool.listForMigration( self.apiclient, id=volume.id - ) + ) pool = None @@ -1005,17 +988,16 @@ class TestVolumes(cloudstackTestCase): Volume.migrate( self.apiclient, - volumeid = volume.id, - storageid = pool.id, - newdiskofferingid = large_offering.id, - livemigrate = livemigrate + volumeid=volume.id, + storageid=pool.id, + newdiskofferingid=large_offering.id, + livemigrate=livemigrate ) if self.virtual_machine.hypervisor == "KVM": - self.virtual_machine.start(self.apiclient - ) + self.virtual_machine.start(self.apiclient) migrated_vol = Volume.list( self.apiclient, - id = volume.id + id=volume.id )[0] self.assertEqual( migrated_vol.diskofferingname,