mirror of https://github.com/apache/cloudstack.git
Remove Cloudstack setup tests.They are covered elsewhere.
Minor fixes to routers, accounts and snapshot limits.
This commit is contained in:
parent
87b5bc21e3
commit
e33dcfb71d
|
|
@ -79,10 +79,10 @@ class Services:
|
|||
{
|
||||
"displaytext": 'Template from snapshot',
|
||||
"name": 'Template from snapshot',
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"ostypeid": 12,
|
||||
"templatefilter": 'self',
|
||||
},
|
||||
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
|
||||
"ostypeid": 12,
|
||||
# Cent OS 5.3 (64 bit)
|
||||
"diskdevice": "/dev/xvdb", # Data Disk
|
||||
"rootdisk": "/dev/xvda", # Root Disk
|
||||
|
|
@ -192,8 +192,7 @@ class TestSnapshotRootDisk(cloudstackTestCase):
|
|||
volumes = list_volumes(
|
||||
self.apiclient,
|
||||
virtualmachineid=self.virtual_machine_with_disk.id,
|
||||
type='ROOT',
|
||||
listall=True
|
||||
type='ROOT'
|
||||
)
|
||||
|
||||
snapshot = Snapshot.create(
|
||||
|
|
@ -225,12 +224,12 @@ class TestSnapshotRootDisk(cloudstackTestCase):
|
|||
"Check resource id in list resources call"
|
||||
)
|
||||
self.debug(
|
||||
"select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" \
|
||||
% str(snapshot.id)
|
||||
"select backup_snap_id, account_id, volume_id from snapshots where id = %s;" \
|
||||
% snapshot.id
|
||||
)
|
||||
qresultset = self.dbclient.execute(
|
||||
"select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" \
|
||||
% str(snapshot.id)
|
||||
"select backup_snap_id, account_id, volume_id from snapshots where id = %s;" \
|
||||
% snapshot.id
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(qresultset),
|
||||
|
|
@ -425,8 +424,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
volume = list_volumes(
|
||||
self.apiclient,
|
||||
virtualmachineid=self.virtual_machine_with_disk.id,
|
||||
type='DATADISK',
|
||||
listall=True
|
||||
type='DATADISK'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(volume, list),
|
||||
|
|
@ -461,12 +459,12 @@ class TestSnapshots(cloudstackTestCase):
|
|||
"Check resource id in list resources call"
|
||||
)
|
||||
self.debug(
|
||||
"select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" \
|
||||
% str(snapshot.id)
|
||||
"select backup_snap_id, account_id, volume_id from snapshots where id = %s;" \
|
||||
% snapshot.id
|
||||
)
|
||||
qresultset = self.dbclient.execute(
|
||||
"select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" \
|
||||
% str(snapshot.id)
|
||||
"select backup_snap_id, account_id, volume_id from snapshots where id = %s;" \
|
||||
% snapshot.id
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(qresultset),
|
||||
|
|
@ -628,8 +626,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
list_volume_response = list_volumes(
|
||||
self.apiclient,
|
||||
virtualmachineid=self.virtual_machine.id,
|
||||
type='DATADISK',
|
||||
listall=True
|
||||
type='DATADISK'
|
||||
)
|
||||
|
||||
volume_response = list_volume_response[0]
|
||||
|
|
@ -750,8 +747,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
volumes = list_volumes(
|
||||
self.apiclient,
|
||||
virtualmachineid=self.virtual_machine.id,
|
||||
type='DATADISK',
|
||||
listall=True
|
||||
type='DATADISK'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(volumes, list),
|
||||
|
|
@ -788,8 +784,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
volume = list_volumes(
|
||||
self.apiclient,
|
||||
virtualmachineid=self.virtual_machine_with_disk.id,
|
||||
type='ROOT',
|
||||
listall=True
|
||||
type='ROOT'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(volume, list),
|
||||
|
|
@ -843,8 +838,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
volumeid=volume[0].id,
|
||||
intervaltype=\
|
||||
self.services["recurring_snapshot"]["intervaltype"],
|
||||
snapshottype='RECURRING',
|
||||
listall=True
|
||||
snapshottype='RECURRING'
|
||||
)
|
||||
|
||||
if isinstance(snapshots, list):
|
||||
|
|
@ -879,8 +873,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
volume = list_volumes(
|
||||
self.apiclient,
|
||||
virtualmachineid=self.virtual_machine_with_disk.id,
|
||||
type='DATADISK',
|
||||
listall=True
|
||||
type='DATADISK'
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -938,8 +931,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
volumeid=volume[0].id,
|
||||
intervaltype=\
|
||||
self.services["recurring_snapshot"]["intervaltype"],
|
||||
snapshottype='RECURRING',
|
||||
listall=True
|
||||
snapshottype='RECURRING'
|
||||
)
|
||||
|
||||
if isinstance(snapshots, list):
|
||||
|
|
@ -1030,8 +1022,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
volumes = list_volumes(
|
||||
self.apiclient,
|
||||
virtualmachineid=self.virtual_machine.id,
|
||||
type='ROOT',
|
||||
listall=True
|
||||
type='ROOT'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(volumes, list),
|
||||
|
|
|
|||
|
|
@ -23,72 +23,6 @@ class Services:
|
|||
"domain": {
|
||||
"name": "Domain",
|
||||
},
|
||||
"zone": {
|
||||
"dns1": '121.242.190.180',
|
||||
"internaldns1": '192.168.100.1',
|
||||
"name" : "Test Zone",
|
||||
"networktype" : "Basic",
|
||||
"dns2": '121.242.190.211',
|
||||
},
|
||||
"pod": {
|
||||
"name": "Test Pod",
|
||||
"gateway": '192.168.100.1',
|
||||
"netmask": '255.255.255.0',
|
||||
"startip": '192.168.100.132',
|
||||
"endip": '192.168.100.140',
|
||||
},
|
||||
"public_ip": {
|
||||
"gateway": '192.168.100.1',
|
||||
"netmask": '255.255.255.0',
|
||||
"forvirtualnetwork": False,
|
||||
"startip": '192.168.100.142',
|
||||
"endip": '192.168.100.149',
|
||||
"vlan": "untagged",
|
||||
},
|
||||
"cluster": {
|
||||
"clustername": "Xen Cluster",
|
||||
"clustertype": "CloudManaged",
|
||||
# CloudManaged or ExternalManaged"
|
||||
"hypervisor": "XenServer",
|
||||
# Hypervisor type
|
||||
},
|
||||
"host": {
|
||||
"hypervisor": 'XenServer',
|
||||
# Hypervisor type
|
||||
"clustertype": 'CloudManaged',
|
||||
# CloudManaged or ExternalManaged"
|
||||
"url": 'http://192.168.100.211',
|
||||
"username": "root",
|
||||
"password": "fr3sca",
|
||||
"port": 22,
|
||||
"ipaddress": '192.168.100.211'
|
||||
},
|
||||
|
||||
"primary_storage": {
|
||||
"name": "Test Primary",
|
||||
"url": "nfs://192.168.100.150/mnt/DroboFS/Shares/nfsclo3",
|
||||
# Format: File_System_Type/Location/Path
|
||||
},
|
||||
"sec_storage": {
|
||||
"url": "nfs://192.168.100.150/mnt/DroboFS/Shares/nfsclo4"
|
||||
# Format: File_System_Type/Location/Path
|
||||
|
||||
|
||||
},
|
||||
"mgmt_server": {
|
||||
"ipaddress": '192.168.100.154',
|
||||
"port": 22,
|
||||
"username": 'root',
|
||||
"password": 'fr3sca',
|
||||
},
|
||||
"sysVM": {
|
||||
"mnt_dir": '/mnt/test',
|
||||
"sec_storage": '192.168.100.150',
|
||||
"path": 'TestSec',
|
||||
"command": '/usr/lib64/cloud/agent/scripts/storage/secondary/cloud-install-sys-tmplt',
|
||||
"download_url": 'http://download.cloud.com/releases/2.2.0/systemvm.vhd.bz2',
|
||||
"hypervisor": "xenserver",
|
||||
},
|
||||
"account": {
|
||||
"email": "test@test.com",
|
||||
"firstname": "Test",
|
||||
|
|
@ -999,7 +933,7 @@ class TesttemplateHierarchy(cloudstackTestCase):
|
|||
)
|
||||
return
|
||||
|
||||
@unittest.skip("Open Questions")
|
||||
|
||||
class TestAddVmToSubDomain(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
|
|
@ -1008,93 +942,14 @@ class TestAddVmToSubDomain(cloudstackTestCase):
|
|||
cls.services = Services().services
|
||||
|
||||
# Setup working Environment- Create domain, zone, pod cluster etc.
|
||||
cls.domain = Domain.create(
|
||||
cls.domain = get_domain(
|
||||
cls.api_client,
|
||||
cls.services["domain"]
|
||||
cls.services
|
||||
)
|
||||
cls.zone = Zone.create(
|
||||
cls.zone = get_zone(
|
||||
cls.api_client,
|
||||
cls.services["zone"],
|
||||
domainid=cls.domain.id
|
||||
cls.services
|
||||
)
|
||||
cls.services["pod"]["zoneid"] = cls.zone.id
|
||||
|
||||
cls.pod = Pod.create(
|
||||
cls.api_client,
|
||||
cls.services["pod"]
|
||||
)
|
||||
cls.services["public_ip"]["zoneid"] = cls.zone.id
|
||||
cls.services["public_ip"]["podid"] = cls.pod.id
|
||||
|
||||
cls.public_ip_range = PublicIpRange.create(
|
||||
cls.api_client,
|
||||
cls.services["public_ip"]
|
||||
)
|
||||
cls.services["cluster"]["zoneid"] = cls.zone.id
|
||||
cls.services["cluster"]["podid"] = cls.pod.id
|
||||
|
||||
cls.cluster = Cluster.create(
|
||||
cls.api_client,
|
||||
cls.services["cluster"]
|
||||
)
|
||||
|
||||
cls.services["host"]["zoneid"] = cls.zone.id
|
||||
cls.services["host"]["podid"] = cls.pod.id
|
||||
|
||||
cls.host = Host.create(
|
||||
cls.api_client,
|
||||
cls.cluster,
|
||||
cls.services["host"]
|
||||
)
|
||||
|
||||
cls.services["primary_storage"]["zoneid"] = cls.zone.id
|
||||
cls.services["primary_storage"]["podid"] = cls.pod.id
|
||||
|
||||
cls.primary_storage = StoragePool.create(
|
||||
cls.api_client,
|
||||
cls.services["primary_storage"],
|
||||
cls.cluster.id
|
||||
)
|
||||
|
||||
# before adding Sec Storage, First download System Templates on it
|
||||
download_systemplates_sec_storage(
|
||||
cls.services["mgmt_server"],
|
||||
cls.services["sysVM"]
|
||||
)
|
||||
|
||||
cls.services["sec_storage"]["zoneid"] = cls.zone.id
|
||||
cls.services["sec_storage"]["podid"] = cls.pod.id
|
||||
|
||||
cls.secondary_storage = SecondaryStorage.create(
|
||||
cls.api_client,
|
||||
cls.services["sec_storage"]
|
||||
)
|
||||
# After adding Host, Clusters wait for SSVMs to come up
|
||||
wait_for_ssvms(
|
||||
cls.api_client,
|
||||
cls.zone.id,
|
||||
cls.pod.id
|
||||
)
|
||||
|
||||
ssvm_response = list_ssvms(
|
||||
cls.api_client,
|
||||
systemvmtype='secondarystoragevm',
|
||||
hostid=cls.host.id,
|
||||
sleep=cls.services["sleep"]
|
||||
)
|
||||
if isinstance(ssvm_response, list):
|
||||
ssvm = ssvm_response[0]
|
||||
else:
|
||||
raise Exception("List SSVM failed")
|
||||
|
||||
# Download BUILTIN templates
|
||||
download_builtin_templates(
|
||||
cls.api_client,
|
||||
cls.zone.id,
|
||||
cls.services["cluster"]["hypervisor"],
|
||||
cls.services["host"],
|
||||
ssvm.linklocalip
|
||||
)
|
||||
cls.sub_domain = Domain.create(
|
||||
cls.api_client,
|
||||
cls.services["domain"],
|
||||
|
|
@ -1145,60 +1000,21 @@ class TestAddVmToSubDomain(cloudstackTestCase):
|
|||
domainid=cls.account_2.account.domainid,
|
||||
serviceofferingid=cls.service_offering.id
|
||||
)
|
||||
cls._cleanup = []
|
||||
cls._cleanup = [
|
||||
cls.account_2,
|
||||
cls.account_1,
|
||||
cls.sub_domain,
|
||||
cls.service_offering
|
||||
]
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
# Cleanup the accounts
|
||||
cls.account_1.delete(cls.api_client)
|
||||
cls.account_2.delete(cls.api_client)
|
||||
|
||||
cleanup_wait = list_configurations(
|
||||
cls.api_client,
|
||||
name='account.cleanup.interval'
|
||||
)
|
||||
# Sleep for account.cleanup.interval * 2 to wait for expunge of
|
||||
# resources associated with that account
|
||||
if isinstance(cleanup_wait, list):
|
||||
sleep_time = int(cleanup_wait[0].value) * 2
|
||||
|
||||
time.sleep(sleep_time)
|
||||
|
||||
# Delete Service offerings and sub-domains
|
||||
cls.service_offering.delete(cls.api_client)
|
||||
cls.sub_domain.delete(cls.api_client)
|
||||
|
||||
# Enable maintenance mode of
|
||||
cls.host.enableMaintenance(cls.api_client)
|
||||
cls.primary_storage.enableMaintenance(cls.api_client)
|
||||
|
||||
# Destroy SSVMs and wait for volumes to cleanup
|
||||
ssvms = list_ssvms(
|
||||
cls.api_client,
|
||||
zoneid=cls.zone.id
|
||||
)
|
||||
|
||||
if isinstance(ssvms, list):
|
||||
for ssvm in ssvms:
|
||||
cmd = destroySystemVm.destroySystemVmCmd()
|
||||
cmd.id = ssvm.id
|
||||
cls.api_client.destroySystemVm(cmd)
|
||||
|
||||
# Sleep for account.cleanup.interval*2 to wait for SSVM volume
|
||||
# to cleanup
|
||||
time.sleep(sleep_time)
|
||||
|
||||
# Cleanup Primary, secondary storage, hosts, zones etc.
|
||||
cls.secondary_storage.delete(cls.api_client)
|
||||
cls.host.delete(cls.api_client)
|
||||
|
||||
cls.primary_storage.delete(cls.api_client)
|
||||
cls.cluster.delete(cls.api_client)
|
||||
cls.pod.delete(cls.api_client)
|
||||
cls.zone.delete(cls.api_client)
|
||||
except Exception as e:
|
||||
#Clean up, terminate the created resources
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
|
|
|
|||
|
|
@ -238,6 +238,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
self.services["paths"]["sub_lvl_dir2"],
|
||||
self.services["paths"]["random_data"]
|
||||
),
|
||||
"sync"
|
||||
]
|
||||
for c in cmds:
|
||||
self.debug("Command: %s" % c)
|
||||
|
|
@ -472,7 +473,7 @@ class TestTemplate(cloudstackTestCase):
|
|||
list_template_response = Template.list(
|
||||
self.apiclient,
|
||||
templatefilter=\
|
||||
self.services["templatefilter"],
|
||||
self.services["templates"]["templatefilter"],
|
||||
id=template.id,
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
|
|
@ -1128,6 +1129,7 @@ class TestTemplates(cloudstackTestCase):
|
|||
)
|
||||
return
|
||||
|
||||
@unittest.skip("Known bug in 2.0")
|
||||
def test_03_resuse_template_name(self):
|
||||
"""TS_BUG_011-Test Reusing deleted template name
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -1127,7 +1127,7 @@ class TestResourceLimitsDomain(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
2, # Volume
|
||||
domainid=self.account.account.domainid,
|
||||
max=1
|
||||
max=2
|
||||
)
|
||||
|
||||
self.debug("Deploying VM for account: %s" % self.account.account.name)
|
||||
|
|
@ -1171,6 +1171,13 @@ class TestResourceLimitsDomain(cloudstackTestCase):
|
|||
# 4. Try create 3rd template in the domain. It should give the user an
|
||||
# appropriate error and an alert should be generated.
|
||||
|
||||
# Reset volume limit set
|
||||
update_resource_limit(
|
||||
self.apiclient,
|
||||
2, # Volume
|
||||
domainid=self.account.account.domainid,
|
||||
max=5
|
||||
)
|
||||
self.debug(
|
||||
"Updating template resource limits for domain: %s" %
|
||||
self.account.account.domainid)
|
||||
|
|
|
|||
|
|
@ -405,19 +405,32 @@ class TestRouterServices(cloudstackTestCase):
|
|||
cmd.id = virtual_machine.id
|
||||
self.apiclient.stopVirtualMachine(cmd)
|
||||
|
||||
interval = list_configurations(
|
||||
gcinterval = list_configurations(
|
||||
self.apiclient,
|
||||
name='network.gc.interval'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(interval, list),
|
||||
isinstance(gcinterval, list),
|
||||
True,
|
||||
"Check for list intervals response return valid data"
|
||||
"Check for list configs response return valid data"
|
||||
)
|
||||
self.debug("network.gc.interval: %s" % interval[0].value)
|
||||
# Router is stopped after (network.gc.interval *2) time. Wait for
|
||||
# (network.gc.interval *4) for moving router to 'Stopped'
|
||||
time.sleep(int(interval[0].value) * 4)
|
||||
self.debug("network.gc.interval: %s" % gcinterval[0].value)
|
||||
|
||||
gcwait = list_configurations(
|
||||
self.apiclient,
|
||||
name='network.gc.wait'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(gcwait, list),
|
||||
True,
|
||||
"Check for list config response return valid data"
|
||||
)
|
||||
self.debug("network.gc.wait: %s" % gcwait[0].value)
|
||||
|
||||
total_wait = int(gcinterval[0].value) + int(gcwait[0].value)
|
||||
# Router is stopped after (network.gc.interval + network.gc.wait) time
|
||||
# wait for total_wait * 2 for moving router to 'Stopped'
|
||||
time.sleep(total_wait * 2)
|
||||
|
||||
routers = list_routers(
|
||||
self.apiclient,
|
||||
|
|
@ -562,7 +575,7 @@ class TestRouterStopAssociateIp(cloudstackTestCase):
|
|||
@classmethod
|
||||
def setUpClass(cls):
|
||||
|
||||
cls.api_client = fetch_api_client()
|
||||
cls.api_client = super(TestRouterStopAssociateIp, cls).getClsTestClient().getApiClient()
|
||||
cls.services = Services().services
|
||||
# Get Zone, Domain and templates
|
||||
cls.zone = get_zone(cls.api_client, cls.services)
|
||||
|
|
@ -599,7 +612,6 @@ class TestRouterStopAssociateIp(cloudstackTestCase):
|
|||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
cls.api_client = fetch_api_client()
|
||||
# Clean up resources
|
||||
cleanup_resources(cls.api_client, cls.cleanup)
|
||||
|
||||
|
|
@ -778,7 +790,7 @@ class TestRouterStopAssociateIp(cloudstackTestCase):
|
|||
|
||||
res = str(result)
|
||||
self.assertEqual(
|
||||
result.count(str(public_ip.ipaddress.ipaddress)),
|
||||
res.count(str(public_ip.ipaddress.ipaddress)),
|
||||
1,
|
||||
"Check public IP address"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -644,12 +644,16 @@ class TestAccountSnapshotClean(cloudstackTestCase):
|
|||
# Wait for account cleanup interval
|
||||
time.sleep(int(interval[0].value) * 2)
|
||||
|
||||
with self.assertRaises(Exception):
|
||||
accounts = list_accounts(
|
||||
accounts = list_accounts(
|
||||
self.apiclient,
|
||||
id=self.account.account.id
|
||||
)
|
||||
uuids = []
|
||||
self.assertEqual(
|
||||
accounts,
|
||||
None,
|
||||
"List accounts should return an empty list"
|
||||
)
|
||||
uuids = []
|
||||
for host in hosts:
|
||||
# hosts[0].name = "nfs://192.168.100.21/export/test"
|
||||
parse_url = (host.name).split('/')
|
||||
|
|
@ -1128,7 +1132,7 @@ class TestSnapshotLimit(cloudstackTestCase):
|
|||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
self.assertEqual(
|
||||
len(snapshots),
|
||||
self.services["recurring_snapshot"]["maxsnaps"],
|
||||
"Check maximum number of recurring snapshots retained"
|
||||
|
|
@ -1136,10 +1140,11 @@ class TestSnapshotLimit(cloudstackTestCase):
|
|||
# Sleep to ensure that snapshot is reflected in sec storage
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
snapshot = snapshots[0]
|
||||
# Fetch values from database
|
||||
qresultset = self.dbclient.execute(
|
||||
"select backup_snap_id, account_id, volume_id from snapshots where id = %s;" \
|
||||
% self.snapshot.id
|
||||
% snapshot.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(qresultset, list),
|
||||
|
|
@ -1219,7 +1224,7 @@ class TestSnapshotLimit(cloudstackTestCase):
|
|||
"SSH access failed for management server: %s" %
|
||||
self.services["mgmt_server"]["ipaddress"])
|
||||
|
||||
res = str(result)
|
||||
res = str(uuids)
|
||||
self.assertEqual(
|
||||
res.count(snapshot_uuid),
|
||||
1,
|
||||
|
|
|
|||
Loading…
Reference in New Issue