mirror of https://github.com/apache/cloudstack.git
Updated the BVT tests with the following changes:
1. Ensure we poll + sleep instead of just sleep. 2. Additional checking for SSH command and all list commands.
This commit is contained in:
parent
50d77044a8
commit
13ecccff66
|
|
@ -57,10 +57,17 @@ class TestCreateDiskOffering(cloudstackTestCase):
|
|||
)
|
||||
self.cleanup.append(disk_offering)
|
||||
|
||||
self.debug("Created Disk offering with ID: %s" % disk_offering.id)
|
||||
|
||||
list_disk_response = list_disk_offering(
|
||||
self.apiclient,
|
||||
id=disk_offering.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_disk_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_disk_response),
|
||||
0,
|
||||
|
|
@ -134,6 +141,9 @@ class TestDiskOfferings(cloudstackTestCase):
|
|||
random_displaytext = random_gen()
|
||||
random_name = random_gen()
|
||||
|
||||
self.debug("Updating Disk offering with ID: %s" %
|
||||
self.disk_offering_1.id)
|
||||
|
||||
cmd = updateDiskOffering.updateDiskOfferingCmd()
|
||||
cmd.id = self.disk_offering_1.id
|
||||
cmd.displaytext = random_displaytext
|
||||
|
|
@ -145,7 +155,11 @@ class TestDiskOfferings(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=self.disk_offering_1.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_disk_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_disk_response),
|
||||
0,
|
||||
|
|
@ -175,6 +189,8 @@ class TestDiskOfferings(cloudstackTestCase):
|
|||
|
||||
self.disk_offering_2.delete(self.apiclient)
|
||||
|
||||
self.debug("Deleted Disk offering with ID: %s" %
|
||||
self.disk_offering_2.id)
|
||||
list_disk_response = list_disk_offering(
|
||||
self.apiclient,
|
||||
id=self.disk_offering_2.id
|
||||
|
|
@ -185,4 +201,4 @@ class TestDiskOfferings(cloudstackTestCase):
|
|||
None,
|
||||
"Check if disk offering exists in listDiskOfferings"
|
||||
)
|
||||
return
|
||||
return
|
||||
|
|
@ -92,26 +92,6 @@ class TestHosts(cloudstackTestCase):
|
|||
self.services = Services().services
|
||||
self.cleanup = []
|
||||
|
||||
# Get Zone and pod
|
||||
self.zone = get_zone(self.apiclient, self.services)
|
||||
self.pod = get_pod(self.apiclient, self.zone.id)
|
||||
|
||||
self.services["clusters"][0]["zoneid"] = self.zone.id
|
||||
self.services["clusters"][1]["zoneid"] = self.zone.id
|
||||
self.services["clusters"][2]["zoneid"] = self.zone.id
|
||||
|
||||
self.services["clusters"][0]["podid"] = self.pod.id
|
||||
self.services["clusters"][1]["podid"] = self.pod.id
|
||||
self.services["clusters"][2]["podid"] = self.pod.id
|
||||
|
||||
self.services["hosts"]["xenserver"]["zoneid"] = self.zone.id
|
||||
self.services["hosts"]["kvm"]["zoneid"] = self.zone.id
|
||||
self.services["hosts"]["vmware"]["zoneid"] = self.zone.id
|
||||
|
||||
self.services["hosts"]["xenserver"]["podid"] = self.pod.id
|
||||
self.services["hosts"]["kvm"]["podid"] = self.pod.id
|
||||
self.services["hosts"]["vmware"]["podid"] = self.pod.id
|
||||
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
|
|
@ -136,8 +116,17 @@ class TestHosts(cloudstackTestCase):
|
|||
|
||||
#Create clusters with Hypervisor type XEN/KVM/VWare
|
||||
for k, v in self.services["clusters"].items():
|
||||
cluster = Cluster.create(self.apiclient, v)
|
||||
|
||||
cluster = Cluster.create(
|
||||
self.apiclient,
|
||||
v,
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id
|
||||
)
|
||||
self.debug(
|
||||
"Created Cluster for hypervisor type %s & ID: %s" %(
|
||||
v["hypervisor"],
|
||||
cluster.id
|
||||
))
|
||||
self.assertEqual(
|
||||
cluster.hypervisortype,
|
||||
v["hypervisor"],
|
||||
|
|
@ -160,8 +149,15 @@ class TestHosts(cloudstackTestCase):
|
|||
host = Host.create(
|
||||
self.apiclient,
|
||||
cluster,
|
||||
self.services["hosts"][hypervisor_type]
|
||||
self.services["hosts"][hypervisor_type],
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id
|
||||
)
|
||||
self.debug(
|
||||
"Created host (ID: %s) in cluster ID %s" %(
|
||||
host.id,
|
||||
cluster.id
|
||||
))
|
||||
|
||||
#Cleanup Host & Cluster
|
||||
self.cleanup.append(host)
|
||||
|
|
@ -171,7 +167,11 @@ class TestHosts(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
clusterid=cluster.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_hosts_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_hosts_response),
|
||||
0,
|
||||
|
|
@ -190,7 +190,11 @@ class TestHosts(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=cluster.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_cluster_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_cluster_response),
|
||||
0,
|
||||
|
|
@ -208,4 +212,4 @@ class TestHosts(cloudstackTestCase):
|
|||
cluster.hypervisortype,
|
||||
"Check hypervisor type with is " + v["hypervisor"] + " or not"
|
||||
)
|
||||
return
|
||||
return
|
||||
|
|
@ -27,8 +27,10 @@ class Services:
|
|||
"firstname": "Test",
|
||||
"lastname": "User",
|
||||
"username": "test",
|
||||
"password": "password",
|
||||
},
|
||||
# Random characters are appended in create account to
|
||||
# ensure unique username generated each time
|
||||
"password": "fr3sca",
|
||||
},
|
||||
"iso_1":
|
||||
{
|
||||
"displaytext": "Test ISO 1",
|
||||
|
|
@ -80,7 +82,13 @@ class TestCreateIso(cloudstackTestCase):
|
|||
# Get Zone, Domain and templates
|
||||
self.zone = get_zone(self.apiclient, self.services)
|
||||
self.services["iso_2"]["zoneid"] = self.zone.id
|
||||
self.cleanup = []
|
||||
|
||||
self.account = Account.create(
|
||||
self.apiclient,
|
||||
self.services["account"]
|
||||
)
|
||||
|
||||
self.cleanup = [self.account]
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
|
|
@ -105,23 +113,37 @@ class TestCreateIso(cloudstackTestCase):
|
|||
# 2. UI should show the newly added ISO
|
||||
# 3. listIsos API should show the newly added ISO
|
||||
|
||||
iso = Iso.create(self.apiclient, self.services["iso_2"])
|
||||
iso.download(self.apiclient)
|
||||
self.cleanup.append(iso)
|
||||
iso = Iso.create(
|
||||
self.apiclient,
|
||||
self.services["iso_2"],
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.debug("ISO created with ID: %s" % iso.id)
|
||||
|
||||
try:
|
||||
iso.download(self.apiclient)
|
||||
except Exception as e:
|
||||
self.fail("Exception while downloading ISO %s: %s"\
|
||||
% (iso.id, e))
|
||||
|
||||
list_iso_response = list_isos(
|
||||
self.apiclient,
|
||||
id=iso.id
|
||||
)
|
||||
|
||||
iso_response = list_iso_response[0]
|
||||
self.assertEqual(
|
||||
isinstance(list_iso_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_iso_response),
|
||||
0,
|
||||
"Check template available in List ISOs"
|
||||
)
|
||||
|
||||
iso_response = list_iso_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
iso_response.displaytext,
|
||||
self.services["iso_2"]["displaytext"],
|
||||
|
|
@ -159,11 +181,31 @@ class TestISO(cloudstackTestCase):
|
|||
cls.services["account"],
|
||||
)
|
||||
cls.services["account"] = cls.account.account.name
|
||||
cls.iso_1 = Iso.create(cls.api_client, cls.services["iso_1"])
|
||||
cls.iso_1.download(cls.api_client)
|
||||
cls.iso_2 = Iso.create(cls.api_client, cls.services["iso_2"])
|
||||
cls.iso_2.download(cls.api_client)
|
||||
cls._cleanup = [cls.iso_2, cls.account]
|
||||
cls.iso_1 = Iso.create(
|
||||
cls.api_client,
|
||||
cls.services["iso_1"],
|
||||
account=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid
|
||||
)
|
||||
try:
|
||||
cls.iso_1.download(cls.api_client)
|
||||
except Exception as e:
|
||||
self.fail("Exception while downloading ISO %s: %s"\
|
||||
% (cls.iso_1.id, e))
|
||||
|
||||
cls.iso_2 = Iso.create(
|
||||
cls.api_client,
|
||||
cls.services["iso_2"],
|
||||
account=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid
|
||||
)
|
||||
try:
|
||||
cls.iso_2.download(cls.api_client)
|
||||
except Exception as e:
|
||||
self.fail("Exception while downloading ISO %s: %s"\
|
||||
% (cls.iso_2.id, e))
|
||||
|
||||
cls._cleanup = [cls.account]
|
||||
return
|
||||
|
||||
@classmethod
|
||||
|
|
@ -206,6 +248,8 @@ class TestISO(cloudstackTestCase):
|
|||
new_displayText = random_gen()
|
||||
new_name = random_gen()
|
||||
|
||||
self.debug("Updating ISO permissions for ISO: %s", self.iso_1.id)
|
||||
|
||||
cmd = updateIso.updateIsoCmd()
|
||||
#Assign new values to attributes
|
||||
cmd.id = self.iso_1.id
|
||||
|
|
@ -222,7 +266,11 @@ class TestISO(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=self.iso_1.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_iso_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_iso_response),
|
||||
0,
|
||||
|
|
@ -261,6 +309,7 @@ class TestISO(cloudstackTestCase):
|
|||
# 1. UI should not show the deleted ISP
|
||||
# 2. database (vm_template table) should not contain deleted ISO
|
||||
|
||||
self.debug("Deleting ISO with ID: %s" % self.iso_1.id)
|
||||
self.iso_1.delete(self.apiclient)
|
||||
|
||||
#ListIsos to verify deleted ISO is properly deleted
|
||||
|
|
@ -285,6 +334,8 @@ class TestISO(cloudstackTestCase):
|
|||
# for all kind of users
|
||||
# 3 .ListIsos should not display the system templates
|
||||
|
||||
self.debug("Extracting ISO with ID: %s" % self.iso_2.id)
|
||||
|
||||
cmd = extractIso.extractIsoCmd()
|
||||
cmd.id = self.iso_2.id
|
||||
cmd.mode = self.services["iso_2"]["mode"]
|
||||
|
|
@ -326,6 +377,8 @@ class TestISO(cloudstackTestCase):
|
|||
# 2. permission changes should be reflected in vm_template
|
||||
# table in database
|
||||
|
||||
self.debug("Updating permissions for ISO: %s" % self.iso_2.id)
|
||||
|
||||
cmd = updateIsoPermissions.updateIsoPermissionsCmd()
|
||||
cmd.id = self.iso_2.id
|
||||
#Update ISO permissions
|
||||
|
|
@ -341,7 +394,12 @@ class TestISO(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_iso_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
iso_response = list_iso_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -369,6 +427,11 @@ class TestISO(cloudstackTestCase):
|
|||
#1. copy ISO should be successful and secondary storage
|
||||
# should contain new copied ISO.
|
||||
|
||||
self.debug("Copy ISO from %s to %s" % (
|
||||
self.zone.id,
|
||||
self.services["destzoneid"]
|
||||
))
|
||||
|
||||
cmd = copyIso.copyIsoCmd()
|
||||
cmd.id = self.iso_2.id
|
||||
cmd.destzoneid = self.services["destzoneid"]
|
||||
|
|
@ -381,14 +444,19 @@ class TestISO(cloudstackTestCase):
|
|||
id=self.iso_2.id,
|
||||
zoneid=self.services["destzoneid"]
|
||||
)
|
||||
|
||||
iso_response = list_iso_response[0]
|
||||
self.assertEqual(
|
||||
isinstance(list_iso_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_iso_response),
|
||||
0,
|
||||
"Check template extracted in List ISO"
|
||||
)
|
||||
iso_response = list_iso_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
iso_response.id,
|
||||
self.iso_2.id,
|
||||
|
|
@ -400,9 +468,10 @@ class TestISO(cloudstackTestCase):
|
|||
"Check zone ID of the copied ISO"
|
||||
)
|
||||
|
||||
self.debug("Cleanup copied ISO: %s" % iso_response.id)
|
||||
# Cleanup- Delete the copied ISO
|
||||
cmd = deleteIso.deleteIsoCmd()
|
||||
cmd.id = iso_response.id
|
||||
cmd.zoneid = self.services["destzoneid"]
|
||||
self.apiclient.deleteIso(cmd)
|
||||
return
|
||||
return
|
||||
|
|
@ -31,6 +31,8 @@ class Services:
|
|||
# Networking mode: Basic or advanced
|
||||
"lb_switch_wait": 10,
|
||||
# Time interval after which LB switches the requests
|
||||
"sleep": 10,
|
||||
"timeout":20,
|
||||
"network": {
|
||||
"name": "Test Network",
|
||||
"displaytext": "Test Network",
|
||||
|
|
@ -167,7 +169,11 @@ class TestPublicIP(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=ip_address.ipaddress.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_pub_ip_addr_resp, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#listPublicIpAddresses should return newly created public IP
|
||||
self.assertNotEqual(
|
||||
len(list_pub_ip_addr_resp),
|
||||
|
|
@ -215,7 +221,11 @@ class TestPublicIP(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=ip_address.ipaddress.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_pub_ip_addr_resp, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_pub_ip_addr_resp),
|
||||
0,
|
||||
|
|
@ -310,7 +320,36 @@ class TestPortForwarding(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(src_nat_ip_addrs, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
src_nat_ip_addr = src_nat_ip_addrs[0]
|
||||
|
||||
# Check if VM is in Running state before creating NAT rule
|
||||
vm_response = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=self.virtual_machine.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(vm_response, list),
|
||||
True,
|
||||
"Check list VM returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(vm_response),
|
||||
0,
|
||||
"Check Port Forwarding Rule is created"
|
||||
)
|
||||
self.assertEqual(
|
||||
vm_response[0].state,
|
||||
'Running',
|
||||
"VM state should be Running before creating a NAT rule."
|
||||
)
|
||||
#Create NAT rule
|
||||
nat_rule = NATRule.create(
|
||||
self.apiclient,
|
||||
|
|
@ -323,6 +362,12 @@ class TestPortForwarding(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=nat_rule.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_nat_rule_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_nat_rule_response),
|
||||
0,
|
||||
|
|
@ -335,7 +380,14 @@ class TestPortForwarding(cloudstackTestCase):
|
|||
)
|
||||
#SSH virtual machine to test port forwarding
|
||||
try:
|
||||
self.debug("SSHing into VM with IP address %s with NAT IP %s" %
|
||||
(
|
||||
self.virtual_machine.ipaddress,
|
||||
src_nat_ip_addr.ipaddress
|
||||
))
|
||||
|
||||
self.virtual_machine.get_ssh_client(src_nat_ip_addr.ipaddress)
|
||||
|
||||
except Exception as e:
|
||||
self.fail(
|
||||
"SSH Access failed for %s: %s" % \
|
||||
|
|
@ -356,6 +408,10 @@ class TestPortForwarding(cloudstackTestCase):
|
|||
)
|
||||
# Check if the Public SSH port is inaccessible
|
||||
with self.assertRaises(Exception):
|
||||
self.debug(
|
||||
"SSHing into VM with IP address %s after NAT rule deletion" %
|
||||
self.virtual_machine.ipaddress)
|
||||
|
||||
remoteSSHClient.remoteSSHClient(
|
||||
src_nat_ip_addr.ipaddress,
|
||||
self.virtual_machine.ssh_port,
|
||||
|
|
@ -379,6 +435,30 @@ class TestPortForwarding(cloudstackTestCase):
|
|||
self.services["server"]
|
||||
)
|
||||
self.cleanup.append(ip_address)
|
||||
|
||||
# Check if VM is in Running state before creating NAT rule
|
||||
vm_response = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
id=self.virtual_machine.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(vm_response, list),
|
||||
True,
|
||||
"Check list VM returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(vm_response),
|
||||
0,
|
||||
"Check Port Forwarding Rule is created"
|
||||
)
|
||||
self.assertEqual(
|
||||
vm_response[0].state,
|
||||
'Running',
|
||||
"VM state should be Running before creating a NAT rule."
|
||||
)
|
||||
|
||||
#Create NAT rule
|
||||
nat_rule = NATRule.create(
|
||||
self.apiclient,
|
||||
|
|
@ -394,6 +474,11 @@ class TestPortForwarding(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=nat_rule.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_nat_rule_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_nat_rule_response),
|
||||
0,
|
||||
|
|
@ -406,6 +491,11 @@ class TestPortForwarding(cloudstackTestCase):
|
|||
)
|
||||
|
||||
try:
|
||||
self.debug("SSHing into VM with IP address %s with NAT IP %s" %
|
||||
(
|
||||
self.virtual_machine.ipaddress,
|
||||
ip_address.ipaddress.ipaddress
|
||||
))
|
||||
self.virtual_machine.get_ssh_client(ip_address.ipaddress.ipaddress)
|
||||
except Exception as e:
|
||||
self.fail(
|
||||
|
|
@ -426,6 +516,10 @@ class TestPortForwarding(cloudstackTestCase):
|
|||
)
|
||||
# Check if the Public SSH port is inaccessible
|
||||
with self.assertRaises(Exception):
|
||||
self.debug(
|
||||
"SSHing into VM with IP address %s after NAT rule deletion" %
|
||||
self.virtual_machine.ipaddress)
|
||||
|
||||
remoteSSHClient.remoteSSHClient(
|
||||
ip_address.ipaddress.ipaddress,
|
||||
self.virtual_machine.ssh_port,
|
||||
|
|
@ -515,8 +609,38 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(src_nat_ip_addrs, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
src_nat_ip_addr = src_nat_ip_addrs[0]
|
||||
|
||||
|
||||
# Check if VM is in Running state before creating LB rule
|
||||
vm_response = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(vm_response, list),
|
||||
True,
|
||||
"Check list VM returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(vm_response),
|
||||
0,
|
||||
"Check Port Forwarding Rule is created"
|
||||
)
|
||||
for vm in vm_response:
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
'Running',
|
||||
"VM state should be Running before creating a NAT rule."
|
||||
)
|
||||
|
||||
#Create Load Balancer rule and assign VMs to rule
|
||||
lb_rule = LoadBalancerRule.create(
|
||||
self.apiclient,
|
||||
|
|
@ -532,6 +656,11 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=lb_rule.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(lb_rules, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#verify listLoadBalancerRules lists the added load balancing rule
|
||||
self.assertNotEqual(
|
||||
len(lb_rules),
|
||||
|
|
@ -550,43 +679,71 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=lb_rule.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(lb_instance_rules, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(lb_instance_rules),
|
||||
0,
|
||||
"Check Load Balancer instances Rule in its List"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
self.assertIn(
|
||||
lb_instance_rules[0].id,
|
||||
self.vm_2.id,
|
||||
[self.vm_1.id, self.vm_2.id],
|
||||
"Check List Load Balancer instances Rules returns valid VM ID"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
self.assertIn(
|
||||
lb_instance_rules[1].id,
|
||||
self.vm_1.id,
|
||||
[self.vm_1.id, self.vm_2.id],
|
||||
"Check List Load Balancer instances Rules returns valid VM ID"
|
||||
)
|
||||
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
try:
|
||||
self.debug(
|
||||
"SSH into VM (IPaddress: %s) & NAT Rule (Public IP: %s)"%
|
||||
(self.vm_1.ipaddress, src_nat_ip_addr.ipaddress)
|
||||
)
|
||||
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
src_nat_ip_addr.ipaddress,
|
||||
self.services['lbrule']["publicport"],
|
||||
self.vm_1.username,
|
||||
self.vm_1.password
|
||||
)
|
||||
|
||||
# If Round Robin Algorithm is chosen,
|
||||
# each ssh command should alternate between VMs
|
||||
hostnames = [ssh_1.execute("hostname")[0]]
|
||||
|
||||
except Exception as e:
|
||||
self.fail("%s: SSH failed for VM with IP Address: %s" %
|
||||
(e, src_nat_ip_addr.ipaddress))
|
||||
|
||||
# If Round Robin Algorithm is chosen,
|
||||
# each ssh command should alternate between VMs
|
||||
hostnames = [ssh_1.execute("hostname")[0]]
|
||||
time.sleep(self.services["lb_switch_wait"])
|
||||
ssh_2 = remoteSSHClient.remoteSSHClient(
|
||||
|
||||
try:
|
||||
self.debug("SSHing into IP address: %s after adding VMs (ID: %s , %s)" %
|
||||
(
|
||||
src_nat_ip_addr.ipaddress,
|
||||
self.vm_1.id,
|
||||
self.vm_2.id
|
||||
))
|
||||
|
||||
ssh_2 = remoteSSHClient.remoteSSHClient(
|
||||
src_nat_ip_addr.ipaddress,
|
||||
self.services['lbrule']["publicport"],
|
||||
self.vm_1.username,
|
||||
self.vm_1.password
|
||||
)
|
||||
hostnames.append(ssh_2.execute("hostname")[0])
|
||||
|
||||
except Exception as e:
|
||||
self.fail("%s: SSH failed for VM with IP Address: %s" %
|
||||
(e, src_nat_ip_addr.ipaddress))
|
||||
|
||||
hostnames.append(ssh_2.execute("hostname")[0])
|
||||
self.assertIn(
|
||||
self.vm_1.name,
|
||||
hostnames,
|
||||
|
|
@ -600,14 +757,26 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
|
||||
#SSH should pass till there is a last VM associated with LB rule
|
||||
lb_rule.remove(self.apiclient, [self.vm_2])
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
try:
|
||||
self.debug("SSHing into IP address: %s after removing VM (ID: %s)" %
|
||||
(
|
||||
src_nat_ip_addr.ipaddress,
|
||||
self.vm_2.id
|
||||
))
|
||||
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
src_nat_ip_addr.ipaddress,
|
||||
self.services['lbrule']["publicport"],
|
||||
self.vm_1.username,
|
||||
self.vm_1.password
|
||||
)
|
||||
|
||||
hostnames.append(ssh_1.execute("hostname")[0])
|
||||
|
||||
except Exception as e:
|
||||
self.fail("%s: SSH failed for VM with IP Address: %s" %
|
||||
(e, src_nat_ip_addr.ipaddress))
|
||||
|
||||
hostnames.append(ssh_1.execute("hostname")[0])
|
||||
self.assertIn(
|
||||
self.vm_1.name,
|
||||
hostnames,
|
||||
|
|
@ -615,7 +784,9 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
)
|
||||
|
||||
lb_rule.remove(self.apiclient, [self.vm_1])
|
||||
|
||||
with self.assertRaises(Exception):
|
||||
self.debug("Removed all VMs, trying to SSH")
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
src_nat_ip_addr.ipaddress,
|
||||
self.services['lbrule']["publicport"],
|
||||
|
|
@ -633,7 +804,32 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
#2. attempt to ssh twice on the load balanced IP
|
||||
#3. verify using the hostname of the VM that
|
||||
# round robin is indeed happening as expected
|
||||
|
||||
|
||||
# Check if VM is in Running state before creating LB rule
|
||||
vm_response = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(vm_response, list),
|
||||
True,
|
||||
"Check list VM returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(vm_response),
|
||||
0,
|
||||
"Check Port Forwarding Rule is created"
|
||||
)
|
||||
for vm in vm_response:
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
'Running',
|
||||
"VM state should be Running before creating a NAT rule."
|
||||
)
|
||||
|
||||
#Create Load Balancer rule and assign VMs to rule
|
||||
lb_rule = LoadBalancerRule.create(
|
||||
self.apiclient,
|
||||
|
|
@ -649,7 +845,12 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=lb_rule.id
|
||||
)
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(lb_rules, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#verify listLoadBalancerRules lists the added load balancing rule
|
||||
self.assertNotEqual(
|
||||
len(lb_rules),
|
||||
|
|
@ -667,65 +868,95 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=lb_rule.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(lb_instance_rules, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(lb_instance_rules),
|
||||
0,
|
||||
"Check Load Balancer instances Rule in its List"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
self.assertIn(
|
||||
lb_instance_rules[0].id,
|
||||
self.vm_2.id,
|
||||
[self.vm_1.id, self.vm_2.id],
|
||||
"Check List Load Balancer instances Rules returns valid VM ID"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
self.assertIn(
|
||||
lb_instance_rules[1].id,
|
||||
self.vm_1.id,
|
||||
[self.vm_1.id, self.vm_2.id],
|
||||
"Check List Load Balancer instances Rules returns valid VM ID"
|
||||
)
|
||||
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
try:
|
||||
self.debug("SSHing into IP address: %s after adding VMs (ID: %s , %s)" %
|
||||
(
|
||||
self.non_src_nat_ip.ipaddress.ipaddress,
|
||||
self.vm_1.id,
|
||||
self.vm_2.id
|
||||
))
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
self.non_src_nat_ip.ipaddress.ipaddress,
|
||||
self.services['lbrule']["publicport"],
|
||||
self.vm_1.username,
|
||||
self.vm_1.password
|
||||
)
|
||||
|
||||
# If Round Robin Algorithm is chosen,
|
||||
# each ssh command should alternate between VMs
|
||||
hostnames = [ssh_1.execute("hostname")[0]]
|
||||
time.sleep(self.services["lb_switch_wait"])
|
||||
ssh_2 = remoteSSHClient.remoteSSHClient(
|
||||
# If Round Robin Algorithm is chosen,
|
||||
# each ssh command should alternate between VMs
|
||||
hostnames = [ssh_1.execute("hostname")[0]]
|
||||
|
||||
time.sleep(self.services["lb_switch_wait"])
|
||||
|
||||
self.debug("SSHing again into IP address: %s with VMs (ID: %s , %s) added to LB rule" %
|
||||
(
|
||||
self.non_src_nat_ip.ipaddress.ipaddress,
|
||||
self.vm_1.id,
|
||||
self.vm_2.id
|
||||
))
|
||||
ssh_2 = remoteSSHClient.remoteSSHClient(
|
||||
self.non_src_nat_ip.ipaddress.ipaddress,
|
||||
self.services['lbrule']["publicport"],
|
||||
self.vm_1.username,
|
||||
self.vm_1.password
|
||||
)
|
||||
|
||||
hostnames.append(ssh_2.execute("hostname")[0])
|
||||
self.assertIn(
|
||||
|
||||
hostnames.append(ssh_2.execute("hostname")[0])
|
||||
|
||||
self.assertIn(
|
||||
self.vm_1.name,
|
||||
hostnames,
|
||||
"Check if ssh succeeded for server1"
|
||||
)
|
||||
self.assertIn(
|
||||
self.assertIn(
|
||||
self.vm_2.name,
|
||||
hostnames,
|
||||
"Check if ssh succeeded for server2"
|
||||
)
|
||||
|
||||
#SSH should pass till there is a last VM associated with LB rule
|
||||
lb_rule.remove(self.apiclient, [self.vm_2])
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
#SSH should pass till there is a last VM associated with LB rule
|
||||
lb_rule.remove(self.apiclient, [self.vm_2])
|
||||
|
||||
self.debug("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" %
|
||||
(
|
||||
self.non_src_nat_ip.ipaddress.ipaddress,
|
||||
self.vm_2.id
|
||||
))
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
self.non_src_nat_ip.ipaddress.ipaddress,
|
||||
self.services['lbrule']["publicport"],
|
||||
self.vm_1.username,
|
||||
self.vm_1.password
|
||||
)
|
||||
|
||||
hostnames.append(ssh_1.execute("hostname")[0])
|
||||
|
||||
except Exception as e:
|
||||
self.fail("%s: SSH failed for VM with IP Address: %s" %
|
||||
(e, self.non_src_nat_ip.ipaddress.ipaddress))
|
||||
|
||||
hostnames.append(ssh_1.execute("hostname")[0])
|
||||
self.assertIn(
|
||||
self.vm_1.name,
|
||||
hostnames,
|
||||
|
|
@ -734,6 +965,11 @@ class TestLoadBalancingRule(cloudstackTestCase):
|
|||
|
||||
lb_rule.remove(self.apiclient, [self.vm_1])
|
||||
with self.assertRaises(Exception):
|
||||
self.fail("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" %
|
||||
(
|
||||
self.non_src_nat_ip.ipaddress.ipaddress,
|
||||
self.vm_1.id
|
||||
))
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
self.non_src_nat_ip.ipaddress.ipaddress,
|
||||
self.services['lbrule']["publicport"],
|
||||
|
|
@ -777,14 +1013,17 @@ class TestRebootRouter(cloudstackTestCase):
|
|||
accountid=self.account.account.name,
|
||||
serviceofferingid=self.service_offering.id
|
||||
)
|
||||
|
||||
|
||||
src_nat_ip_addrs = list_publicIP(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
src_nat_ip_addr = src_nat_ip_addrs[0]
|
||||
|
||||
try:
|
||||
src_nat_ip_addr = src_nat_ip_addrs[0]
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during fetching source NAT: %s" % e)
|
||||
|
||||
self.public_ip = PublicIPAddress.create(
|
||||
self.apiclient,
|
||||
self.vm_1.account,
|
||||
|
|
@ -824,22 +1063,54 @@ class TestRebootRouter(cloudstackTestCase):
|
|||
# still works through the sourceNAT Ip
|
||||
|
||||
#Retrieve router for the user account
|
||||
|
||||
routers = list_routers(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(routers, list),
|
||||
True,
|
||||
"Check list routers returns a valid list"
|
||||
)
|
||||
|
||||
router = routers[0]
|
||||
|
||||
|
||||
self.debug("Rebooting the router (ID: %s)" % router.id)
|
||||
|
||||
cmd = rebootRouter.rebootRouterCmd()
|
||||
cmd.id = router.id
|
||||
self.apiclient.rebootRouter(cmd)
|
||||
#Sleep to ensure router is rebooted properly
|
||||
time.sleep(60)
|
||||
|
||||
# Poll listVM to ensure VM is stopped properly
|
||||
timeout = self.services["timeout"]
|
||||
|
||||
while True:
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
# Ensure that VM is in stopped state
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.vm_1.id
|
||||
)
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Running':
|
||||
self.debug("VM state: %s" % vm.state)
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to start VM (ID: %s) in change service offering" % vm.id)
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
#we should be able to SSH after successful reboot
|
||||
try:
|
||||
self.debug("SSH into VM (ID : %s ) after reboot" % self.vm_1.id)
|
||||
|
||||
remoteSSHClient.remoteSSHClient(
|
||||
self.nat_rule.ipaddress,
|
||||
self.services["natrule"]["publicport"],
|
||||
|
|
@ -929,8 +1200,38 @@ class TestAssignRemoveLB(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(src_nat_ip_addrs, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.non_src_nat_ip = src_nat_ip_addrs[0]
|
||||
|
||||
|
||||
# Check if VM is in Running state before creating LB rule
|
||||
vm_response = VirtualMachine.list(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(vm_response, list),
|
||||
True,
|
||||
"Check list VM returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(vm_response),
|
||||
0,
|
||||
"Check Port Forwarding Rule is created"
|
||||
)
|
||||
for vm in vm_response:
|
||||
self.assertEqual(
|
||||
vm.state,
|
||||
'Running',
|
||||
"VM state should be Running before creating a NAT rule."
|
||||
)
|
||||
|
||||
lb_rule = LoadBalancerRule.create(
|
||||
self.apiclient,
|
||||
self.services["lbrule"],
|
||||
|
|
@ -938,26 +1239,52 @@ class TestAssignRemoveLB(cloudstackTestCase):
|
|||
self.account.account.name
|
||||
)
|
||||
lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2])
|
||||
#Create SSH client for each VM
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
|
||||
try:
|
||||
self.debug("SSHing into IP address: %s with VMs (ID: %s , %s) added to LB rule" %
|
||||
(
|
||||
self.non_src_nat_ip.ipaddress,
|
||||
self.vm_1.id,
|
||||
self.vm_2.id
|
||||
))
|
||||
#Create SSH client for each VM
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
self.non_src_nat_ip.ipaddress,
|
||||
self.services["lbrule"]["publicport"],
|
||||
self.vm_1.username,
|
||||
self.vm_1.password
|
||||
)
|
||||
|
||||
ssh_2 = remoteSSHClient.remoteSSHClient(
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.non_src_nat_ip.ipaddress)
|
||||
|
||||
try:
|
||||
self.debug("SSHing again into IP address: %s with VMs (ID: %s , %s) added to LB rule" %
|
||||
(
|
||||
self.non_src_nat_ip.ipaddress,
|
||||
self.vm_1.id,
|
||||
self.vm_2.id
|
||||
))
|
||||
ssh_2 = remoteSSHClient.remoteSSHClient(
|
||||
self.non_src_nat_ip.ipaddress,
|
||||
self.services["lbrule"]["publicport"],
|
||||
self.vm_2.username,
|
||||
self.vm_2.password
|
||||
)
|
||||
|
||||
# If Round Robin Algorithm is chosen,
|
||||
# each ssh command should alternate between VMs
|
||||
res_1 = ssh_1.execute("hostname")[0]
|
||||
self.debug(res_1)
|
||||
|
||||
time.sleep(self.services["lb_switch_wait"])
|
||||
|
||||
res_2 = ssh_2.execute("hostname")[0]
|
||||
self.debug(res_2)
|
||||
|
||||
# If Round Robin Algorithm is chosen,
|
||||
# each ssh command should alternate between VMs
|
||||
res_1 = ssh_1.execute("hostname")[0]
|
||||
time.sleep(self.services["lb_switch_wait"])
|
||||
res_2 = ssh_2.execute("hostname")[0]
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.non_src_nat_ip.ipaddress)
|
||||
|
||||
self.assertIn(
|
||||
self.vm_1.name,
|
||||
|
|
@ -972,15 +1299,27 @@ class TestAssignRemoveLB(cloudstackTestCase):
|
|||
|
||||
#Removing VM and assigning another VM to LB rule
|
||||
lb_rule.remove(self.apiclient, [self.vm_2])
|
||||
# Again make a SSH connection, as previous is not used after LB remove
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
|
||||
try:
|
||||
self.debug("SSHing again into IP address: %s with VM (ID: %s) added to LB rule" %
|
||||
(
|
||||
self.non_src_nat_ip.ipaddress,
|
||||
self.vm_1.id,
|
||||
))
|
||||
# Again make a SSH connection, as previous is not used after LB remove
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
self.non_src_nat_ip.ipaddress,
|
||||
self.services["lbrule"]["publicport"],
|
||||
self.vm_1.username,
|
||||
self.vm_1.password
|
||||
)
|
||||
res_1 = ssh_1.execute("hostname")[0]
|
||||
)
|
||||
res_1 = ssh_1.execute("hostname")[0]
|
||||
self.debug(res_1)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.non_src_nat_ip.ipaddress)
|
||||
|
||||
self.assertIn(
|
||||
self.vm_1.name,
|
||||
res_1,
|
||||
|
|
@ -988,23 +1327,33 @@ class TestAssignRemoveLB(cloudstackTestCase):
|
|||
)
|
||||
|
||||
lb_rule.assign(self.apiclient, [self.vm_3])
|
||||
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
|
||||
try:
|
||||
ssh_1 = remoteSSHClient.remoteSSHClient(
|
||||
self.non_src_nat_ip.ipaddress,
|
||||
self.services["lbrule"]["publicport"],
|
||||
self.vm_1.username,
|
||||
self.vm_1.password
|
||||
)
|
||||
ssh_3 = remoteSSHClient.remoteSSHClient(
|
||||
ssh_3 = remoteSSHClient.remoteSSHClient(
|
||||
self.non_src_nat_ip.ipaddress,
|
||||
self.services["lbrule"]["publicport"],
|
||||
self.vm_3.username,
|
||||
self.vm_3.password
|
||||
)
|
||||
res_1 = ssh_1.execute("hostname")[0]
|
||||
time.sleep(self.services["lb_switch_wait"])
|
||||
res_3 = ssh_3.execute("hostname")[0]
|
||||
|
||||
res_1 = ssh_1.execute("hostname")[0]
|
||||
self.debug(res_1)
|
||||
|
||||
time.sleep(self.services["lb_switch_wait"])
|
||||
|
||||
res_3 = ssh_3.execute("hostname")[0]
|
||||
self.debug(res_3)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.non_src_nat_ip.ipaddress)
|
||||
|
||||
self.assertIn(
|
||||
self.vm_1.name,
|
||||
res_1,
|
||||
|
|
@ -1069,8 +1418,12 @@ class TestReleaseIP(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.ip_addr = ip_addrs[0]
|
||||
|
||||
try:
|
||||
self.ip_addr = ip_addrs[0]
|
||||
except Exception as e:
|
||||
raise Exception("Failed: During acquiring source NAT for account: %s" %
|
||||
self.account.account.name)
|
||||
|
||||
self.nat_rule = NATRule.create(
|
||||
self.apiclient,
|
||||
self.virtual_machine,
|
||||
|
|
@ -1094,15 +1447,21 @@ class TestReleaseIP(cloudstackTestCase):
|
|||
|
||||
def test_releaseIP(self):
|
||||
"""Test for Associate/Disassociate public IP address"""
|
||||
|
||||
|
||||
self.debug("Deleting Public IP : %s" % self.ip_addr.id)
|
||||
|
||||
self.ip_address.delete(self.apiclient)
|
||||
|
||||
|
||||
# Sleep to ensure that deleted state is reflected in other calls
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
# ListPublicIpAddresses should not list deleted Public IP address
|
||||
list_pub_ip_addr_resp = list_publicIP(
|
||||
self.apiclient,
|
||||
id=self.ip_addr.id
|
||||
)
|
||||
|
||||
self.debug("List Public IP response" + str(list_pub_ip_addr_resp))
|
||||
|
||||
self.assertEqual(
|
||||
list_pub_ip_addr_resp,
|
||||
None,
|
||||
|
|
@ -1115,6 +1474,7 @@ class TestReleaseIP(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=self.nat_rule.id
|
||||
)
|
||||
self.debug("List NAT Rule response" + str(list_nat_rule))
|
||||
self.assertEqual(
|
||||
list_nat_rule,
|
||||
None,
|
||||
|
|
@ -1127,10 +1487,12 @@ class TestReleaseIP(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=self.lb_rule.id
|
||||
)
|
||||
self.debug("List LB Rule response" + str(list_lb_rule))
|
||||
|
||||
self.assertEqual(
|
||||
list_lb_rule,
|
||||
None,
|
||||
"Check if LB rules for IP Address are no longer available"
|
||||
list_lb_rule,
|
||||
None,
|
||||
"Check if LB rules for IP Address are no longer available"
|
||||
)
|
||||
|
||||
# SSH Attempt though public IP should fail
|
||||
|
|
@ -1183,7 +1545,13 @@ class TestDeleteAccount(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
src_nat_ip_addr = src_nat_ip_addrs[0]
|
||||
|
||||
try:
|
||||
src_nat_ip_addr = src_nat_ip_addrs[0]
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
src_nat_ip_addr.ipaddress)
|
||||
|
||||
self.lb_rule = LoadBalancerRule.create(
|
||||
self.apiclient,
|
||||
|
|
@ -1192,6 +1560,7 @@ class TestDeleteAccount(cloudstackTestCase):
|
|||
self.account.account.name
|
||||
)
|
||||
self.lb_rule.assign(self.apiclient, [self.vm_1])
|
||||
|
||||
self.nat_rule = NATRule.create(
|
||||
self.apiclient,
|
||||
self.vm_1,
|
||||
|
|
@ -1216,19 +1585,33 @@ class TestDeleteAccount(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
name='account.cleanup.interval'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(interval, list),
|
||||
True,
|
||||
"Check if account.cleanup.interval config present"
|
||||
)
|
||||
# Sleep to ensure that all resources are deleted
|
||||
time.sleep(int(interval[0].value))
|
||||
|
||||
# ListLoadBalancerRules should not list
|
||||
# associated rules with deleted account
|
||||
# Unable to find account testuser1 in domain 1 : Exception
|
||||
with self.assertRaises(Exception):
|
||||
list_lb_rules(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
try:
|
||||
list_lb_reponse = list_lb_rules(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
list_lb_reponse,
|
||||
None,
|
||||
"Check load balancing rule is properly deleted."
|
||||
)
|
||||
except Exception as e:
|
||||
|
||||
raise Exception(
|
||||
"Exception raised while fetching LB rules for account: %s" %
|
||||
self.account.account.name)
|
||||
# ListPortForwardingRules should not
|
||||
# list associated rules with deleted account
|
||||
with self.assertRaises(Exception):
|
||||
|
|
@ -1249,4 +1632,4 @@ class TestDeleteAccount(cloudstackTestCase):
|
|||
|
||||
def tearDown(self):
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
return
|
||||
return
|
||||
|
|
@ -62,16 +62,6 @@ class TestPrimaryStorageServices(cloudstackTestCase):
|
|||
self.zone = get_zone(self.apiclient, self.services)
|
||||
self.pod = get_pod(self.apiclient, self.zone.id)
|
||||
|
||||
self.services["nfs"][0]["zoneid"] = self.zone.id
|
||||
self.services["nfs"][1]["zoneid"] = self.zone.id
|
||||
self.services["nfs"][2]["zoneid"] = self.zone.id
|
||||
|
||||
self.services["nfs"][0]["podid"] = self.pod.id
|
||||
self.services["nfs"][1]["podid"] = self.pod.id
|
||||
self.services["nfs"][2]["podid"] = self.pod.id
|
||||
|
||||
self.services["iscsi"][0]["zoneid"] = self.zone.id
|
||||
self.services["iscsi"][0]["podid"] = self.pod.id
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
|
|
@ -101,12 +91,22 @@ class TestPrimaryStorageServices(cloudstackTestCase):
|
|||
zoneid=self.zone.id,
|
||||
hypervisortype=v["hypervisor"]
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(clusters, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
cluster = clusters[0]
|
||||
#Host should be present before adding primary storage
|
||||
list_hosts_response = list_hosts(
|
||||
self.apiclient,
|
||||
clusterid=cluster.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_hosts_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_hosts_response),
|
||||
|
|
@ -116,10 +116,14 @@ class TestPrimaryStorageServices(cloudstackTestCase):
|
|||
|
||||
storage = StoragePool.create(self.apiclient,
|
||||
v,
|
||||
clusterid=cluster.id
|
||||
clusterid=cluster.id,
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id
|
||||
)
|
||||
self.cleanup.append(storage)
|
||||
|
||||
self.debug("Created storage pool in cluster: %s" % cluster.id)
|
||||
|
||||
self.assertEqual(
|
||||
storage.state,
|
||||
'Up',
|
||||
|
|
@ -137,6 +141,11 @@ class TestPrimaryStorageServices(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=storage.id,
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(storage_pools_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(storage_pools_response),
|
||||
0,
|
||||
|
|
@ -165,14 +174,23 @@ class TestPrimaryStorageServices(cloudstackTestCase):
|
|||
zoneid=self.zone.id,
|
||||
hypervisortype=v["hypervisor"]
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(clusters, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
cluster = clusters[0]
|
||||
|
||||
storage = StoragePool.create(self.apiclient,
|
||||
v,
|
||||
clusterid=cluster.id
|
||||
clusterid=cluster.id,
|
||||
zoneid=self.zone.id,
|
||||
podid=self.pod.id
|
||||
)
|
||||
self.cleanup.append(storage)
|
||||
|
||||
self.debug("Created iSCSI storage pool in cluster: %s" % cluster.id)
|
||||
|
||||
self.assertEqual(
|
||||
storage.state,
|
||||
'Up',
|
||||
|
|
@ -184,7 +202,11 @@ class TestPrimaryStorageServices(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=storage.id,
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(storage_pools_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(storage_pools_response),
|
||||
0,
|
||||
|
|
@ -206,4 +228,4 @@ class TestPrimaryStorageServices(cloudstackTestCase):
|
|||
# Call cleanup for reusing primary storage
|
||||
cleanup_resources(self.apiclient, self.cleanup)
|
||||
self.cleanup = []
|
||||
return
|
||||
return
|
||||
|
|
@ -49,6 +49,8 @@ class Services:
|
|||
"password": "fr3sca",
|
||||
},
|
||||
"ostypeid":12,
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
"zoneid": 1,
|
||||
# Optional, if specified the mentioned zone will be
|
||||
# used for tests
|
||||
|
|
@ -124,6 +126,11 @@ class TestRouterServices(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
router = list_router_response[0]
|
||||
|
||||
hosts = list_hosts(
|
||||
|
|
@ -132,9 +139,20 @@ class TestRouterServices(cloudstackTestCase):
|
|||
type='Routing',
|
||||
state='Up'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list host returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
# Sleep to ensure that router is in ready state before double hop
|
||||
time.sleep(200)
|
||||
|
||||
self.debug("Router ID: %s, state: %s" % (router.id, router.state))
|
||||
|
||||
self.assertEqual(
|
||||
router.state,
|
||||
'Running',
|
||||
"Check list router response for router state"
|
||||
)
|
||||
|
||||
result = get_process_status(
|
||||
host.ipaddress,
|
||||
|
|
@ -145,6 +163,8 @@ class TestRouterServices(cloudstackTestCase):
|
|||
"service dnsmasq status"
|
||||
)
|
||||
res = str(result)
|
||||
self.debug("Dnsmasq process status: %s" % res)
|
||||
|
||||
self.assertEqual(
|
||||
res.count("running"),
|
||||
1,
|
||||
|
|
@ -166,6 +186,12 @@ class TestRouterServices(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
router = list_router_response[0]
|
||||
|
||||
hosts = list_hosts(
|
||||
|
|
@ -174,9 +200,19 @@ class TestRouterServices(cloudstackTestCase):
|
|||
type='Routing',
|
||||
state='Up'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
# Sleep to ensure that router is in ready state before double hop
|
||||
time.sleep(200)
|
||||
|
||||
self.debug("Router ID: %s, state: %s" % (router.id, router.state))
|
||||
self.assertEqual(
|
||||
router.state,
|
||||
'Running',
|
||||
"Check list router response for router state"
|
||||
)
|
||||
|
||||
result = get_process_status(
|
||||
host.ipaddress,
|
||||
|
|
@ -187,6 +223,8 @@ class TestRouterServices(cloudstackTestCase):
|
|||
"service dnsmasq status"
|
||||
)
|
||||
res = str(result)
|
||||
self.debug("Dnsmasq process status: %s" % res)
|
||||
|
||||
self.assertEqual(
|
||||
res.count("running"),
|
||||
1,
|
||||
|
|
@ -207,6 +245,7 @@ class TestRouterServices(cloudstackTestCase):
|
|||
1,
|
||||
"Check haproxy service is running or not"
|
||||
)
|
||||
self.debug("Haproxy process status: %s" % res)
|
||||
return
|
||||
|
||||
def test_03_restart_network_cleanup(self):
|
||||
|
|
@ -224,6 +263,11 @@ class TestRouterServices(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
router = list_router_response[0]
|
||||
|
||||
#Store old values before restart
|
||||
|
|
@ -237,15 +281,25 @@ class TestRouterServices(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(networks, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
network = networks[0]
|
||||
if network.state in ["Implemented", "Setup"]:
|
||||
break
|
||||
elif timeout == 0:
|
||||
break
|
||||
else:
|
||||
time.sleep(60)
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
|
||||
self.debug(
|
||||
"Restarting network with ID: %s, Network state: %s" % (
|
||||
network.id,
|
||||
network.state
|
||||
))
|
||||
cmd = restartNetwork.restartNetworkCmd()
|
||||
cmd.id = network.id
|
||||
cmd.cleanup = True
|
||||
|
|
@ -257,6 +311,11 @@ class TestRouterServices(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
router = list_router_response[0]
|
||||
|
||||
self.assertNotEqual(
|
||||
|
|
@ -283,15 +342,25 @@ class TestRouterServices(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(networks, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
network = networks[0]
|
||||
if network.state in ["Implemented", "Setup"]:
|
||||
break
|
||||
elif timeout == 0:
|
||||
break
|
||||
else:
|
||||
time.sleep(60)
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
|
||||
self.debug(
|
||||
"Restarting network with ID: %s, Network state: %s" % (
|
||||
network.id,
|
||||
network.state
|
||||
))
|
||||
cmd = restartNetwork.restartNetworkCmd()
|
||||
cmd.id = network.id
|
||||
cmd.cleanup = False
|
||||
|
|
@ -303,6 +372,11 @@ class TestRouterServices(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
router = list_router_response[0]
|
||||
|
||||
hosts = list_hosts(
|
||||
|
|
@ -311,6 +385,11 @@ class TestRouterServices(cloudstackTestCase):
|
|||
type='Routing',
|
||||
state='Up'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
res = get_process_status(
|
||||
|
|
@ -321,9 +400,11 @@ class TestRouterServices(cloudstackTestCase):
|
|||
router.linklocalip,
|
||||
"uptime"
|
||||
)
|
||||
|
||||
# res = 12:37:14 up 1 min, 0 users, load average: 0.61, 0.22, 0.08
|
||||
# Split result to check the uptime
|
||||
result = res[0].split()
|
||||
self.debug("Router Uptime: %s" % result)
|
||||
self.assertEqual(
|
||||
str(result[1]),
|
||||
'up',
|
||||
|
|
@ -357,7 +438,11 @@ class TestRouterServices(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_router_response),
|
||||
0,
|
||||
|
|
@ -374,6 +459,11 @@ class TestRouterServices(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=router.zoneid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(zones, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
zone = zones[0]
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -413,7 +503,11 @@ class TestRouterServices(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_router_response),
|
||||
0,
|
||||
|
|
@ -430,6 +524,11 @@ class TestRouterServices(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=router.zoneid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(zones, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
zone = zones[0]
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -459,6 +558,11 @@ class TestRouterServices(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
zoneid=router.zoneid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(ipranges_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
iprange = ipranges_response[0]
|
||||
self.assertEqual(
|
||||
router.gateway,
|
||||
|
|
@ -479,8 +583,14 @@ class TestRouterServices(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
router = list_router_response[0]
|
||||
|
||||
|
||||
self.debug("Stopping the router with ID: %s" % router.id)
|
||||
#Stop the router
|
||||
cmd = stopRouter.stopRouterCmd()
|
||||
cmd.id = router.id
|
||||
|
|
@ -491,7 +601,11 @@ class TestRouterServices(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=router.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#List router should have router in stopped state
|
||||
self.assertEqual(
|
||||
router_response[0].state,
|
||||
|
|
@ -512,8 +626,15 @@ class TestRouterServices(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
router = list_router_response[0]
|
||||
|
||||
self.debug("Starting the router with ID: %s" % router.id)
|
||||
|
||||
#Start the router
|
||||
cmd = startRouter.startRouterCmd()
|
||||
cmd.id = router.id
|
||||
|
|
@ -524,7 +645,11 @@ class TestRouterServices(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=router.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#List router should have router in running state
|
||||
self.assertEqual(
|
||||
router_response[0].state,
|
||||
|
|
@ -545,10 +670,17 @@ class TestRouterServices(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
router = list_router_response[0]
|
||||
|
||||
public_ip = router.publicip
|
||||
|
||||
self.debug("Rebooting the router with ID: %s" % router.id)
|
||||
|
||||
#Reboot the router
|
||||
cmd = rebootRouter.rebootRouterCmd()
|
||||
cmd.id = router.id
|
||||
|
|
@ -559,7 +691,11 @@ class TestRouterServices(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=router.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#List router should have router in running state and same public IP
|
||||
self.assertEqual(
|
||||
router_response[0].state,
|
||||
|
|
@ -589,7 +725,11 @@ class TestRouterServices(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_vms, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_vms),
|
||||
0,
|
||||
|
|
@ -597,6 +737,7 @@ class TestRouterServices(cloudstackTestCase):
|
|||
)
|
||||
|
||||
for vm in list_vms:
|
||||
self.debug("Stopping the VM with ID: %s" % vm.id)
|
||||
# Stop all virtual machines associated with that account
|
||||
cmd = stopVirtualMachine.stopVirtualMachineCmd()
|
||||
cmd.id = vm.id
|
||||
|
|
@ -606,24 +747,42 @@ class TestRouterServices(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
name='network.gc.interval'
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(config, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
response = config[0]
|
||||
|
||||
# Wait for network.gc.interval * 3 time
|
||||
time.sleep(int(response.value) * 3)
|
||||
|
||||
#Check status of network router
|
||||
list_router_response = list_routers(
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
#Check status of network router
|
||||
list_router_response = list_routers(
|
||||
self.apiclient,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
if isinstance(list_router_response, list):
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List router call failed!")
|
||||
time.sleep(5)
|
||||
timeout = timeout -1
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_router_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
router = list_router_response[0]
|
||||
|
||||
|
||||
self.debug("Router state after network.gc.interval: %s" % router.state)
|
||||
self.assertEqual(
|
||||
router.state,
|
||||
'Stopped',
|
||||
"Check state of the router after stopping all VMs associated"
|
||||
)
|
||||
return
|
||||
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ class Services:
|
|||
def __init__(self):
|
||||
self.services = {
|
||||
"storage": {
|
||||
"url": "nfs://192.168.100.131/SecStorage"
|
||||
"url": "nfs://192.168.100.131/SecondaryStorage"
|
||||
# Format: File_System_Type/Location/Path
|
||||
},
|
||||
"hypervisors": {
|
||||
|
|
@ -38,7 +38,7 @@ class Services:
|
|||
"templatefilter": "self",
|
||||
},
|
||||
},
|
||||
"sleep": 180,
|
||||
"sleep": 60,
|
||||
"timeout": 5,
|
||||
"zoneid": 1,
|
||||
# Optional, if specified the mentioned zone will be
|
||||
|
|
@ -46,7 +46,23 @@ class Services:
|
|||
}
|
||||
|
||||
class TestSecStorageServices(cloudstackTestCase):
|
||||
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.api_client = fetch_api_client()
|
||||
cls.services = Services().services
|
||||
cls._cleanup = []
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
#Cleanup resources used
|
||||
cleanup_resources(cls.api_client, cls._cleanup)
|
||||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
return
|
||||
|
||||
def setUp(self):
|
||||
|
||||
self.apiclient = self.testClient.getApiClient()
|
||||
|
|
@ -55,17 +71,6 @@ class TestSecStorageServices(cloudstackTestCase):
|
|||
# Get Zone and pod
|
||||
self.zone = get_zone(self.apiclient, self.services)
|
||||
self.pod = get_pod(self.apiclient, self.zone.id)
|
||||
|
||||
self.services["storage"]["zoneid"] = self.zone.id
|
||||
self.services["storage"]["podid"] = self.pod.id
|
||||
|
||||
self.services["hypervisors"][0]["zoneid"] = self.zone.id
|
||||
self.services["hypervisors"][1]["zoneid"] = self.zone.id
|
||||
self.services["hypervisors"][2]["zoneid"] = self.zone.id
|
||||
|
||||
self.services["hypervisors"][0]["podid"] = self.pod.id
|
||||
self.services["hypervisors"][1]["podid"] = self.pod.id
|
||||
self.services["hypervisors"][2]["podid"] = self.pod.id
|
||||
return
|
||||
|
||||
def tearDown(self):
|
||||
|
|
@ -88,7 +93,11 @@ class TestSecStorageServices(cloudstackTestCase):
|
|||
cmd.zoneid = self.zone.id
|
||||
cmd.url = self.services["storage"]["url"]
|
||||
sec_storage = self.apiclient.addSecondaryStorage(cmd)
|
||||
|
||||
|
||||
self.debug("Added secondary storage to zone: %s" % self.zone.id)
|
||||
# Cleanup at the end
|
||||
self._cleanup.append(sec_storage)
|
||||
|
||||
self.assertEqual(
|
||||
sec_storage.zoneid,
|
||||
self.zone.id,
|
||||
|
|
@ -96,11 +105,15 @@ class TestSecStorageServices(cloudstackTestCase):
|
|||
)
|
||||
|
||||
list_hosts_response = list_hosts(
|
||||
self.apiclient,
|
||||
type='SecondaryStorage',
|
||||
id=sec_storage.id
|
||||
self.apiclient,
|
||||
type='SecondaryStorage',
|
||||
id=sec_storage.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_hosts_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_hosts_response),
|
||||
0,
|
||||
|
|
@ -136,6 +149,11 @@ class TestSecStorageServices(cloudstackTestCase):
|
|||
zoneid=self.zone.id,
|
||||
podid=self.pod.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_hosts_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
# ListHosts has all 'routing' hosts in UP state
|
||||
self.assertNotEqual(
|
||||
len(list_hosts_response),
|
||||
|
|
@ -155,6 +173,11 @@ class TestSecStorageServices(cloudstackTestCase):
|
|||
zoneid=self.zone.id,
|
||||
podid=self.pod.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_storage_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_storage_response),
|
||||
0,
|
||||
|
|
@ -177,13 +200,19 @@ class TestSecStorageServices(cloudstackTestCase):
|
|||
zoneid=self.zone.id,
|
||||
)
|
||||
|
||||
if not list_hosts_response:
|
||||
if isinstance(list_hosts_response, list):
|
||||
# Sleep to ensure Secondary storage is Up
|
||||
time.sleep(int(self.services["sleep"]))
|
||||
timeout = timeout - 1
|
||||
elif timeout == 0 or list_hosts_response:
|
||||
elif timeout == 0 or isinstance(list_hosts_response, list):
|
||||
break
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_hosts_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_hosts_response),
|
||||
0,
|
||||
|
|
@ -197,6 +226,7 @@ class TestSecStorageServices(cloudstackTestCase):
|
|||
'Up',
|
||||
"Check state of secondary storage"
|
||||
)
|
||||
self.debug("Checking SSVM status in zone: %s" % self.zone.id)
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
|
||||
|
|
@ -207,13 +237,18 @@ class TestSecStorageServices(cloudstackTestCase):
|
|||
zoneid=self.zone.id,
|
||||
podid=self.pod.id
|
||||
)
|
||||
if not list_ssvm_response:
|
||||
if isinstance(list_ssvm_response, list):
|
||||
# Sleep to ensure SSVMs are Up and Running
|
||||
time.sleep(int(self.services["sleep"]))
|
||||
timeout = timeout - 1
|
||||
elif timeout == 0 or list_ssvm_response:
|
||||
elif timeout == 0 or isinstance(list_ssvm_response, list):
|
||||
break
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_ssvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#Verify SSVM response
|
||||
self.assertNotEqual(
|
||||
len(list_ssvm_response),
|
||||
|
|
@ -240,6 +275,9 @@ class TestSecStorageServices(cloudstackTestCase):
|
|||
|
||||
for k, v in self.services["hypervisors"].items():
|
||||
|
||||
self.debug("Downloading BUILTIN templates in zone: %s" %
|
||||
self.zone.id)
|
||||
|
||||
list_template_response = list_templates(
|
||||
self.apiclient,
|
||||
hypervisor=v["hypervisor"],
|
||||
|
|
@ -253,17 +291,30 @@ class TestSecStorageServices(cloudstackTestCase):
|
|||
if template.templatetype == "BUILTIN":
|
||||
templateid = template.id
|
||||
|
||||
# Wait to start a downloadin of template
|
||||
# Wait to start a downloading of template
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
|
||||
while True and (templateid != None):
|
||||
template_response = list_templates(
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
template_response = list_templates(
|
||||
self.apiclient,
|
||||
id=templateid,
|
||||
zoneid=self.zone.id,
|
||||
templatefilter=v["templatefilter"]
|
||||
)
|
||||
template = template_response[0]
|
||||
|
||||
if isinstance(template_response, list):
|
||||
template = template_response[0]
|
||||
break
|
||||
|
||||
elif timeout == 0:
|
||||
raise Exception("List template API call failed.")
|
||||
|
||||
time.sleep(1)
|
||||
timeout = timeout - 1
|
||||
|
||||
# If template is ready,
|
||||
# template.status = Download Complete
|
||||
# Downloading - x% Downloaded
|
||||
|
|
@ -273,16 +324,35 @@ class TestSecStorageServices(cloudstackTestCase):
|
|||
elif 'Downloaded' not in template.status.split():
|
||||
raise Exception
|
||||
elif 'Downloaded' in template.status.split():
|
||||
time.sleep(120)
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
#Ensuring the template is in ready state
|
||||
time.sleep(30)
|
||||
template_response = list_templates(
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
template_response = list_templates(
|
||||
self.apiclient,
|
||||
id=templateid,
|
||||
zoneid=self.zone.id,
|
||||
templatefilter=v["templatefilter"]
|
||||
)
|
||||
|
||||
if isinstance(template_response, list):
|
||||
template = template_response[0]
|
||||
break
|
||||
|
||||
elif timeout == 0:
|
||||
raise Exception("List template API call failed.")
|
||||
|
||||
time.sleep(1)
|
||||
timeout = timeout - 1
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(template_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
template = template_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -290,4 +360,4 @@ class TestSecStorageServices(cloudstackTestCase):
|
|||
True,
|
||||
"Check whether state of template is ready or not"
|
||||
)
|
||||
return
|
||||
return
|
||||
|
|
@ -61,10 +61,18 @@ class TestCreateServiceOffering(cloudstackTestCase):
|
|||
)
|
||||
self.cleanup.append(service_offering)
|
||||
|
||||
self.debug("Created service offering with ID: %s" % service_offering.id)
|
||||
|
||||
list_service_response = list_service_offering(
|
||||
self.apiclient,
|
||||
id=service_offering.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_service_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_service_response),
|
||||
0,
|
||||
|
|
@ -156,6 +164,8 @@ class TestServiceOfferings(cloudstackTestCase):
|
|||
random_displaytext = random_gen()
|
||||
random_name = random_gen()
|
||||
|
||||
self.debug("Updating service offering with ID: %s" %
|
||||
self.service_offering_1.id)
|
||||
|
||||
cmd = updateServiceOffering.updateServiceOfferingCmd()
|
||||
#Add parameters for API call
|
||||
|
|
@ -168,7 +178,12 @@ class TestServiceOfferings(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=self.service_offering_1.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_service_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_service_response),
|
||||
0,
|
||||
|
|
@ -195,6 +210,9 @@ class TestServiceOfferings(cloudstackTestCase):
|
|||
# 1. deleteServiceOffering should return
|
||||
# a valid information for newly created offering
|
||||
|
||||
self.debug("Deleting service offering with ID: %s" %
|
||||
self.service_offering_2.id)
|
||||
|
||||
self.service_offering_2.delete(self.apiclient)
|
||||
|
||||
list_service_response = list_service_offering(
|
||||
|
|
@ -209,4 +227,3 @@ class TestServiceOfferings(cloudstackTestCase):
|
|||
)
|
||||
|
||||
return
|
||||
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ from testcase.libs.base import *
|
|||
from testcase.libs.common import *
|
||||
import remoteSSHClient
|
||||
|
||||
|
||||
class Services:
|
||||
"""Test Snapshots Services
|
||||
"""
|
||||
|
|
@ -31,8 +32,8 @@ class Services:
|
|||
"name": "Tiny Instance",
|
||||
"displaytext": "Tiny Instance",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 64, # In MBs
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 64, # In MBs
|
||||
},
|
||||
"disk_offering": {
|
||||
"displaytext": "Small",
|
||||
|
|
@ -70,7 +71,7 @@ class Services:
|
|||
{
|
||||
"intervaltype": 'HOURLY',
|
||||
# Frequency of snapshots
|
||||
"maxsnaps": 1, # Should be min 2
|
||||
"maxsnaps": 1, # Should be min 2
|
||||
"schedule": 1,
|
||||
"timezone": 'US/Arizona',
|
||||
# Timezone Formats - http://cloud.mindtouch.us/CloudStack_Documentation/Developer's_Guide%3A_CloudStack
|
||||
|
|
@ -85,11 +86,11 @@ class Services:
|
|||
},
|
||||
"ostypeid": 12,
|
||||
# Cent OS 5.3 (64 bit)
|
||||
"diskdevice": "/dev/xvdb", # Data Disk
|
||||
"rootdisk": "/dev/xvda", # Root Disk
|
||||
"diskdevice": "/dev/xvdb", # Data Disk
|
||||
"rootdisk": "/dev/xvda", # Root Disk
|
||||
|
||||
"diskname": "Test Disk",
|
||||
"size": 1, # GBs
|
||||
"size": 1, # GBs
|
||||
"domainid": 1,
|
||||
|
||||
"mount_dir": "/mnt/tmp",
|
||||
|
|
@ -98,17 +99,14 @@ class Services:
|
|||
"sub_lvl_dir2": "test2",
|
||||
"random_data": "random.data",
|
||||
|
||||
"sec_storage": '192.168.100.131',
|
||||
# IP of Sec storage where snapshots are stored
|
||||
"exportpath": 'SecondaryStorage',
|
||||
#Export path of secondary storage
|
||||
"username": "root",
|
||||
"password": "password",
|
||||
"ssh_port": 22,
|
||||
"zoneid": 1,
|
||||
# Optional, if specified the mentioned zone will be
|
||||
# used for tests
|
||||
"sleep":60,
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
"mode": 'advanced',
|
||||
# Networking mode, Advanced, Basic
|
||||
}
|
||||
|
|
@ -199,13 +197,23 @@ class TestSnapshotRootDisk(cloudstackTestCase):
|
|||
type='ROOT'
|
||||
)
|
||||
|
||||
snapshot = Snapshot.create(self.apiclient, volumes[0].id)
|
||||
self.cleanup.append(snapshot)
|
||||
snapshot = Snapshot.create(
|
||||
self.apiclient,
|
||||
volumes[0].id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.debug("Snapshot created: ID - %s" % snapshot.id)
|
||||
|
||||
snapshots = list_snapshots(
|
||||
self.apiclient,
|
||||
id=snapshot.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(snapshots, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
snapshots,
|
||||
|
|
@ -242,17 +250,39 @@ class TestSnapshotRootDisk(cloudstackTestCase):
|
|||
'NULL',
|
||||
"Check if backup_snap_id is not null"
|
||||
)
|
||||
|
||||
# Get the Secondary Storage details from list Hosts
|
||||
hosts = list_hosts(
|
||||
self.apiclient,
|
||||
type='SecondaryStorage',
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
# Sleep to ensure that snapshot is reflected in sec storage
|
||||
time.sleep(self.services["sleep"])
|
||||
# hosts[0].name = "nfs://192.168.100.21/export/test"
|
||||
parse_url = (hosts[0].name).split('/')
|
||||
# parse_url = ['nfs:', '', '192.168.100.21', 'export', 'test']
|
||||
|
||||
# Login to VM to check snapshot present on sec disk
|
||||
ssh_client = self.virtual_machine_with_disk.get_ssh_client()
|
||||
# Split IP address and export path from name
|
||||
sec_storage_ip = parse_url[2]
|
||||
# Sec Storage IP: 192.168.100.21
|
||||
|
||||
cmds = [ "mkdir -p %s" % self.services["mount_dir"],
|
||||
export_path = '/'.join(parse_url[3:])
|
||||
# Export path: export/test
|
||||
|
||||
try:
|
||||
# Login to VM to check snapshot present on sec disk
|
||||
ssh_client = self.virtual_machine_with_disk.get_ssh_client()
|
||||
|
||||
cmds = [
|
||||
"mkdir -p %s" % self.services["mount_dir"],
|
||||
"mount %s:/%s %s" % (
|
||||
self.services["sec_storage"],
|
||||
self.services["exportpath"],
|
||||
sec_storage_ip,
|
||||
export_path,
|
||||
self.services["mount_dir"]
|
||||
),
|
||||
"ls %s/snapshots/%s/%s" % (
|
||||
|
|
@ -260,10 +290,14 @@ class TestSnapshotRootDisk(cloudstackTestCase):
|
|||
account_id,
|
||||
volume_id
|
||||
),
|
||||
]
|
||||
]
|
||||
|
||||
for c in cmds:
|
||||
result = ssh_client.execute(c)
|
||||
for c in cmds:
|
||||
result = ssh_client.execute(c)
|
||||
|
||||
except Exception:
|
||||
self.fail("SSH failed for Virtual machine: %s" %
|
||||
self.virtual_machine_with_disk.ipaddress)
|
||||
|
||||
res = str(result)
|
||||
# Check snapshot UUID in secondary storage and database
|
||||
|
|
@ -276,8 +310,13 @@ class TestSnapshotRootDisk(cloudstackTestCase):
|
|||
cmds = [
|
||||
"umount %s" % (self.services["mount_dir"]),
|
||||
]
|
||||
for c in cmds:
|
||||
result = ssh_client.execute(c)
|
||||
try:
|
||||
for c in cmds:
|
||||
result = ssh_client.execute(c)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for Virtual machine: %s" %
|
||||
self.virtual_machine_with_disk.ipaddress)
|
||||
|
||||
return
|
||||
|
||||
|
|
@ -377,13 +416,28 @@ class TestSnapshots(cloudstackTestCase):
|
|||
virtualmachineid=self.virtual_machine_with_disk.id,
|
||||
type='DATADISK'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(volume, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
snapshot = Snapshot.create(self.apiclient, volume[0].id)
|
||||
|
||||
self.debug("Creating a Snapshot from data volume: %s" % volume[0].id)
|
||||
snapshot = Snapshot.create(
|
||||
self.apiclient,
|
||||
volume[0].id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
snapshots = list_snapshots(
|
||||
self.apiclient,
|
||||
id=snapshot.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(snapshots, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
snapshots,
|
||||
None,
|
||||
|
|
@ -418,15 +472,39 @@ class TestSnapshots(cloudstackTestCase):
|
|||
'NULL',
|
||||
"Check if backup_snap_id is not null"
|
||||
)
|
||||
# Sleep to ensure that snapshot is reflected in sec storage
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
# Login to VM to check snapshot present on sec disk
|
||||
ssh_client = self.virtual_machine_with_disk.get_ssh_client()
|
||||
cmds = [ "mkdir -p %s" % self.services["mount_dir"],
|
||||
# Get the Secondary Storage details from list Hosts
|
||||
hosts = list_hosts(
|
||||
self.apiclient,
|
||||
type='SecondaryStorage',
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
# hosts[0].name = "nfs://192.168.100.21/export"
|
||||
parse_url = (hosts[0].name).split('/')
|
||||
# parse_url = ['nfs:', '', '192.168.100.21', 'export']
|
||||
|
||||
# Split IP address and export path from name
|
||||
sec_storage_ip = parse_url[2]
|
||||
# Sec Storage IP: 192.168.100.21
|
||||
|
||||
export_path = '/'.join(parse_url[3:])
|
||||
# Export path: export
|
||||
|
||||
try:
|
||||
# Login to VM to check snapshot present on sec disk
|
||||
ssh_client = self.virtual_machine_with_disk.get_ssh_client()
|
||||
|
||||
cmds = [
|
||||
"mkdir -p %s" % self.services["mount_dir"],
|
||||
"mount %s:/%s %s" % (
|
||||
self.services["sec_storage"],
|
||||
self.services["exportpath"],
|
||||
sec_storage_ip,
|
||||
export_path,
|
||||
self.services["mount_dir"]
|
||||
),
|
||||
"ls %s/snapshots/%s/%s" % (
|
||||
|
|
@ -435,8 +513,13 @@ class TestSnapshots(cloudstackTestCase):
|
|||
volume_id
|
||||
),
|
||||
]
|
||||
for c in cmds:
|
||||
result = ssh_client.execute(c)
|
||||
for c in cmds:
|
||||
result = ssh_client.execute(c)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.virtual_machine_with_disk.ipaddress)
|
||||
|
||||
res = str(result)
|
||||
# Check snapshot UUID in secondary storage and database
|
||||
self.assertEqual(
|
||||
|
|
@ -448,8 +531,14 @@ class TestSnapshots(cloudstackTestCase):
|
|||
cmds = [
|
||||
"umount %s" % (self.services["mount_dir"]),
|
||||
]
|
||||
for c in cmds:
|
||||
result = ssh_client.execute(c)
|
||||
try:
|
||||
for c in cmds:
|
||||
result = ssh_client.execute(c)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.virtual_machine_with_disk.ipaddress)
|
||||
|
||||
return
|
||||
|
||||
def test_03_volume_from_snapshot(self):
|
||||
|
|
@ -463,13 +552,15 @@ class TestSnapshots(cloudstackTestCase):
|
|||
random_data_0 = random_gen(100)
|
||||
random_data_1 = random_gen(100)
|
||||
|
||||
ssh_client = self.virtual_machine.get_ssh_client()
|
||||
#Format partition using ext3
|
||||
format_volume_to_ext3(
|
||||
try:
|
||||
ssh_client = self.virtual_machine.get_ssh_client()
|
||||
|
||||
#Format partition using ext3
|
||||
format_volume_to_ext3(
|
||||
ssh_client,
|
||||
self.services["diskdevice"]
|
||||
)
|
||||
cmds = [
|
||||
cmds = [
|
||||
"mkdir -p %s" % self.services["mount_dir"],
|
||||
"mount %s1 %s" % (
|
||||
self.services["diskdevice"],
|
||||
|
|
@ -496,15 +587,24 @@ class TestSnapshots(cloudstackTestCase):
|
|||
self.services["random_data"]
|
||||
),
|
||||
]
|
||||
for c in cmds:
|
||||
ssh_client.execute(c)
|
||||
for c in cmds:
|
||||
ssh_client.execute(c)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.virtual_machine.ipaddress)
|
||||
# Unmount the Sec Storage
|
||||
cmds = [
|
||||
"umount %s" % (self.services["mount_dir"]),
|
||||
]
|
||||
for c in cmds:
|
||||
ssh_client.execute(c)
|
||||
|
||||
try:
|
||||
for c in cmds:
|
||||
ssh_client.execute(c)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.virtual_machine.ipaddress)
|
||||
|
||||
list_volume_response = list_volumes(
|
||||
self.apiclient,
|
||||
|
|
@ -520,25 +620,34 @@ class TestSnapshots(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.cleanup.append(snapshot)
|
||||
self.debug("Created Snapshot from volume: %s" % volume_response.id)
|
||||
|
||||
#Create volume from snapshot
|
||||
self.debug("Creating volume from snapshot: %s" % snapshot.id)
|
||||
volume = Volume.create_from_snapshot(
|
||||
self.apiclient,
|
||||
snapshot.id,
|
||||
self.services
|
||||
)
|
||||
self.apiclient,
|
||||
snapshot.id,
|
||||
self.services,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
volumes = list_volumes(
|
||||
self.apiclient,
|
||||
id=volume.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(volumes, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(volumes),
|
||||
None,
|
||||
"Check Volume list Length"
|
||||
)
|
||||
self.assertEqual (
|
||||
|
||||
self.assertEqual(
|
||||
volumes[0].id,
|
||||
volume.id,
|
||||
"Check Volume in the List Volumes"
|
||||
|
|
@ -552,9 +661,11 @@ class TestSnapshots(cloudstackTestCase):
|
|||
cmd.virtualmachineid = new_virtual_machine.id
|
||||
self.apiclient.attachVolume(cmd)
|
||||
|
||||
#Login to VM to verify test directories and files
|
||||
ssh = new_virtual_machine.get_ssh_client()
|
||||
cmds = [
|
||||
try:
|
||||
#Login to VM to verify test directories and files
|
||||
ssh = new_virtual_machine.get_ssh_client()
|
||||
|
||||
cmds = [
|
||||
"mkdir -p %s" % self.services["mount_dir"],
|
||||
"mount %s1 %s" % (
|
||||
self.services["diskdevice"],
|
||||
|
|
@ -562,21 +673,26 @@ class TestSnapshots(cloudstackTestCase):
|
|||
),
|
||||
]
|
||||
|
||||
for c in cmds:
|
||||
ssh.execute(c)
|
||||
for c in cmds:
|
||||
ssh.execute(c)
|
||||
|
||||
returned_data_0 = ssh.execute("cat %s/%s/%s/%s" % (
|
||||
returned_data_0 = ssh.execute("cat %s/%s/%s/%s" % (
|
||||
self.services["mount_dir"],
|
||||
self.services["sub_dir"],
|
||||
self.services["sub_lvl_dir1"],
|
||||
self.services["random_data"]
|
||||
))
|
||||
returned_data_1 = ssh.execute("cat %s/%s/%s/%s" % (
|
||||
returned_data_1 = ssh.execute("cat %s/%s/%s/%s" % (
|
||||
self.services["mount_dir"],
|
||||
self.services["sub_dir"],
|
||||
self.services["sub_lvl_dir2"],
|
||||
self.services["random_data"]
|
||||
))
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.new_virtual_machine.ipaddress)
|
||||
|
||||
#Verify returned data
|
||||
self.assertEqual(
|
||||
random_data_0,
|
||||
|
|
@ -592,8 +708,13 @@ class TestSnapshots(cloudstackTestCase):
|
|||
cmds = [
|
||||
"umount %s" % (self.services["mount_dir"]),
|
||||
]
|
||||
for c in cmds:
|
||||
result = ssh_client.execute(c)
|
||||
try:
|
||||
for c in cmds:
|
||||
ssh_client.execute(c)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP: %s" %
|
||||
self.new_virtual_machine.ipaddress)
|
||||
return
|
||||
|
||||
def test_04_delete_snapshot(self):
|
||||
|
|
@ -601,19 +722,26 @@ class TestSnapshots(cloudstackTestCase):
|
|||
"""
|
||||
|
||||
#1. Snapshot the Volume
|
||||
#2. Delete the snapshot
|
||||
#3. Verify snapshot is removed by calling List Snapshots API
|
||||
#2. Delete the snapshot
|
||||
#3. Verify snapshot is removed by calling List Snapshots API
|
||||
|
||||
volumes = list_volumes(
|
||||
self.apiclient,
|
||||
virtualmachineid=self.virtual_machine.id,
|
||||
type='DATADISK'
|
||||
)
|
||||
|
||||
snapshot = Snapshot.create(self.apiclient, volumes[0].id)
|
||||
self.assertEqual(
|
||||
isinstance(volumes, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
snapshot = Snapshot.create(
|
||||
self.apiclient,
|
||||
volumes[0].id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
snapshot.delete(self.apiclient)
|
||||
#Sleep to ensure all database records are updated
|
||||
time.sleep(60)
|
||||
|
||||
snapshots = list_snapshots(
|
||||
self.apiclient,
|
||||
|
|
@ -639,7 +767,11 @@ class TestSnapshots(cloudstackTestCase):
|
|||
virtualmachineid=self.virtual_machine_with_disk.id,
|
||||
type='ROOT'
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(volume, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
recurring_snapshot = SnapshotPolicy.create(
|
||||
self.apiclient,
|
||||
volume[0].id,
|
||||
|
|
@ -653,6 +785,11 @@ class TestSnapshots(cloudstackTestCase):
|
|||
id=recurring_snapshot.id,
|
||||
volumeid=volume[0].id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_snapshots_policy, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
list_snapshots_policy,
|
||||
None,
|
||||
|
|
@ -675,13 +812,30 @@ class TestSnapshots(cloudstackTestCase):
|
|||
(self.services["recurring_snapshot"]["maxsnaps"]) * 3600
|
||||
)
|
||||
|
||||
snapshots = list_snapshots(
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
snapshots = list_snapshots(
|
||||
self.apiclient,
|
||||
volumeid=volume[0].id,
|
||||
intervaltype=\
|
||||
self.services["recurring_snapshot"]["intervaltype"],
|
||||
snapshottype='RECURRING'
|
||||
)
|
||||
|
||||
if isinstance(snapshots, list):
|
||||
break
|
||||
|
||||
elif timeout == 0:
|
||||
raise Exception("List snapshots API call failed.")
|
||||
|
||||
time.sleep(1)
|
||||
timeout = timeout - 1
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(snapshots, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
len(snapshots),
|
||||
|
|
@ -703,6 +857,12 @@ class TestSnapshots(cloudstackTestCase):
|
|||
type='DATADISK'
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(volume, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
recurring_snapshot = SnapshotPolicy.create(
|
||||
self.apiclient,
|
||||
volume[0].id,
|
||||
|
|
@ -715,6 +875,13 @@ class TestSnapshots(cloudstackTestCase):
|
|||
id=recurring_snapshot.id,
|
||||
volumeid=volume[0].id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_snapshots_policy, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
list_snapshots_policy,
|
||||
None,
|
||||
|
|
@ -737,15 +904,31 @@ class TestSnapshots(cloudstackTestCase):
|
|||
time.sleep(
|
||||
(self.services["recurring_snapshot"]["maxsnaps"]) * 3600
|
||||
)
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
snapshots = list_snapshots(
|
||||
self.apiclient,
|
||||
volumeid=volume[0].id,
|
||||
intervaltype=\
|
||||
self.services["recurring_snapshot"]["intervaltype"],
|
||||
snapshottype='RECURRING'
|
||||
)
|
||||
|
||||
if isinstance(snapshots, list):
|
||||
break
|
||||
|
||||
elif timeout == 0:
|
||||
raise Exception("List snapshots API call failed.")
|
||||
|
||||
snapshots = list_snapshots(
|
||||
self.apiclient,
|
||||
volumeid=volume[0].id,
|
||||
intervaltype=\
|
||||
self.services["recurring_snapshot"]["intervaltype"],
|
||||
snapshottype='RECURRING'
|
||||
time.sleep(1)
|
||||
timeout = timeout - 1
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(snapshots, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
len(snapshots),
|
||||
self.services["recurring_snapshot"]["maxsnaps"],
|
||||
|
|
@ -767,10 +950,12 @@ class TestSnapshots(cloudstackTestCase):
|
|||
random_data_0 = random_gen(100)
|
||||
random_data_1 = random_gen(100)
|
||||
|
||||
#Login to virtual machine
|
||||
ssh_client = self.virtual_machine.get_ssh_client()
|
||||
try:
|
||||
#Login to virtual machine
|
||||
ssh_client = self.virtual_machine.get_ssh_client()
|
||||
|
||||
cmds = [ "mkdir -p %s" % self.services["mount_dir"],
|
||||
cmds = [
|
||||
"mkdir -p %s" % self.services["mount_dir"],
|
||||
"mount %s1 %s" % (
|
||||
self.services["rootdisk"],
|
||||
self.services["mount_dir"]
|
||||
|
|
@ -797,33 +982,51 @@ class TestSnapshots(cloudstackTestCase):
|
|||
)
|
||||
]
|
||||
|
||||
for c in cmds:
|
||||
ssh_client.execute(c)
|
||||
for c in cmds:
|
||||
ssh_client.execute(c)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP address: %s" %
|
||||
self.virtual_machine.ipaddress)
|
||||
|
||||
# Unmount the Volume
|
||||
cmds = [
|
||||
"umount %s" % (self.services["mount_dir"]),
|
||||
]
|
||||
for c in cmds:
|
||||
result = ssh_client.execute(c)
|
||||
ssh_client.execute(c)
|
||||
|
||||
volumes = list_volumes(
|
||||
self.apiclient,
|
||||
virtualmachineid=self.virtual_machine.id,
|
||||
type='ROOT'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(volumes, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
volume = volumes[0]
|
||||
|
||||
#Create a snapshot of volume
|
||||
snapshot = Snapshot.create(self.apiclient, volume.id)
|
||||
self.cleanup.append(snapshot)
|
||||
snapshot = Snapshot.create(
|
||||
self.apiclient,
|
||||
volume.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
self.debug("Snapshot created from volume ID: %s" % volume.id)
|
||||
# Generate template from the snapshot
|
||||
template = Template.create_from_snapshot(
|
||||
self.apiclient,
|
||||
snapshot,
|
||||
self.services["templates"]
|
||||
)
|
||||
self.cleanup.append(template)
|
||||
self.debug("Template created from snapshot ID: %s" % snapshot.id)
|
||||
|
||||
# Verify created template
|
||||
templates = list_templates(
|
||||
self.apiclient,
|
||||
|
|
@ -842,6 +1045,7 @@ class TestSnapshots(cloudstackTestCase):
|
|||
template.id,
|
||||
"Check new template id in list resources call"
|
||||
)
|
||||
self.debug("Deploying new VM from template: %s" % template.id)
|
||||
|
||||
# Deploy new virtual machine using template
|
||||
new_virtual_machine = VirtualMachine.create(
|
||||
|
|
@ -854,9 +1058,11 @@ class TestSnapshots(cloudstackTestCase):
|
|||
)
|
||||
self.cleanup.append(new_virtual_machine)
|
||||
|
||||
#Login to VM & mount directory
|
||||
ssh = new_virtual_machine.get_ssh_client()
|
||||
cmds = [
|
||||
try:
|
||||
#Login to VM & mount directory
|
||||
ssh = new_virtual_machine.get_ssh_client()
|
||||
|
||||
cmds = [
|
||||
"mkdir -p %s" % self.services["mount_dir"],
|
||||
"mount %s1 %s" % (
|
||||
self.services["rootdisk"],
|
||||
|
|
@ -864,21 +1070,25 @@ class TestSnapshots(cloudstackTestCase):
|
|||
)
|
||||
]
|
||||
|
||||
for c in cmds:
|
||||
ssh.execute(c)
|
||||
for c in cmds:
|
||||
ssh.execute(c)
|
||||
|
||||
returned_data_0 = ssh.execute("cat %s/%s/%s/%s" % (
|
||||
returned_data_0 = ssh.execute("cat %s/%s/%s/%s" % (
|
||||
self.services["mount_dir"],
|
||||
self.services["sub_dir"],
|
||||
self.services["sub_lvl_dir1"],
|
||||
self.services["random_data"]
|
||||
))
|
||||
returned_data_1 = ssh.execute("cat %s/%s/%s/%s" % (
|
||||
returned_data_1 = ssh.execute("cat %s/%s/%s/%s" % (
|
||||
self.services["mount_dir"],
|
||||
self.services["sub_dir"],
|
||||
self.services["sub_lvl_dir2"],
|
||||
self.services["random_data"]
|
||||
))
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP address: %s" %
|
||||
new_virtual_machine.ipaddress)
|
||||
#Verify returned data
|
||||
self.assertEqual(
|
||||
random_data_0,
|
||||
|
|
@ -894,6 +1104,11 @@ class TestSnapshots(cloudstackTestCase):
|
|||
cmds = [
|
||||
"umount %s" % (self.services["mount_dir"]),
|
||||
]
|
||||
for c in cmds:
|
||||
result = ssh_client.execute(c)
|
||||
try:
|
||||
for c in cmds:
|
||||
ssh_client.execute(c)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for VM with IP address: %s" %
|
||||
new_virtual_machine.ipaddress)
|
||||
return
|
||||
|
|
|
|||
|
|
@ -22,9 +22,6 @@ class Services:
|
|||
|
||||
def __init__(self):
|
||||
self.services = {
|
||||
"cpvm": {
|
||||
"mgmtserverIP": '192.168.100.154' # For Telnet
|
||||
},
|
||||
"host": {
|
||||
"username": 'root', # Credentials for SSH
|
||||
"password": 'fr3sca',
|
||||
|
|
@ -33,7 +30,8 @@ class Services:
|
|||
"zoneid": 1,
|
||||
# Optional, if specified the mentioned zone will be
|
||||
# used for tests
|
||||
"sleep": 120,
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
}
|
||||
|
||||
class TestSSVMs(cloudstackTestCase):
|
||||
|
|
@ -73,7 +71,11 @@ class TestSSVMs(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
systemvmtype='secondarystoragevm'
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_ssvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#Verify SSVM response
|
||||
self.assertNotEqual(
|
||||
len(list_ssvm_response),
|
||||
|
|
@ -82,6 +84,12 @@ class TestSSVMs(cloudstackTestCase):
|
|||
)
|
||||
|
||||
list_zones_response = list_zones(self.apiclient)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_zones_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
# Number of Sec storage VMs = No of Zones
|
||||
self.assertEqual(
|
||||
len(list_ssvm_response),
|
||||
|
|
@ -121,6 +129,11 @@ class TestSSVMs(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
zoneid=ssvm.zoneid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(ipranges_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
iprange = ipranges_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -134,7 +147,11 @@ class TestSSVMs(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=ssvm.zoneid
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(zone_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertEqual(
|
||||
ssvm.dns1,
|
||||
zone_response[0].dns1,
|
||||
|
|
@ -166,7 +183,11 @@ class TestSSVMs(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
systemvmtype='consoleproxy'
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_cpvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#Verify CPVM response
|
||||
self.assertNotEqual(
|
||||
len(list_cpvm_response),
|
||||
|
|
@ -175,6 +196,13 @@ class TestSSVMs(cloudstackTestCase):
|
|||
)
|
||||
list_zones_response = list_zones(self.apiclient)
|
||||
# Number of Console Proxy VMs = No of Zones
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_zones_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
len(list_cpvm_response),
|
||||
len(list_zones_response),
|
||||
|
|
@ -211,6 +239,11 @@ class TestSSVMs(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
zoneid=cpvm.zoneid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(ipranges_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
iprange = ipranges_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -256,6 +289,11 @@ class TestSSVMs(cloudstackTestCase):
|
|||
type='Routing',
|
||||
state='Up'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
list_ssvm_response = list_ssvms(
|
||||
|
|
@ -263,7 +301,11 @@ class TestSSVMs(cloudstackTestCase):
|
|||
systemvmtype='secondarystoragevm',
|
||||
hostid=host.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_ssvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
ssvm = list_ssvm_response[0]
|
||||
|
||||
self.debug("Cheking cloud process status")
|
||||
|
|
@ -323,6 +365,11 @@ class TestSSVMs(cloudstackTestCase):
|
|||
type='Routing',
|
||||
state='Up'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
list_cpvm_response = list_ssvms(
|
||||
|
|
@ -330,18 +377,22 @@ class TestSSVMs(cloudstackTestCase):
|
|||
systemvmtype='consoleproxy',
|
||||
hostid=host.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_cpvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
cpvm = list_cpvm_response[0]
|
||||
|
||||
try:
|
||||
telnet = telnetlib.Telnet(
|
||||
self.services["cpvm"]["mgmtserverIP"],
|
||||
str(self.apiclient.connection.mgtSvr),
|
||||
'8250'
|
||||
)
|
||||
except Exception as e:
|
||||
self.fail(
|
||||
"Telnet Access failed for %s: %s" % \
|
||||
(self.services["cpvm"]["mgmtserverIP"], e)
|
||||
(self.apiclient.connection.mgtSvr, e)
|
||||
)
|
||||
|
||||
self.debug("Checking cloud process status")
|
||||
|
|
@ -379,6 +430,11 @@ class TestSSVMs(cloudstackTestCase):
|
|||
type='Routing',
|
||||
state='Up'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
list_ssvm_response = list_ssvms(
|
||||
|
|
@ -386,20 +442,36 @@ class TestSSVMs(cloudstackTestCase):
|
|||
systemvmtype='secondarystoragevm',
|
||||
hostid=host.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_ssvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
ssvm = list_ssvm_response[0]
|
||||
|
||||
cmd = stopSystemVm.stopSystemVmCmd()
|
||||
cmd.id = ssvm.id
|
||||
self.apiclient.stopSystemVm(cmd)
|
||||
|
||||
#Sleep to ensure that SSVM is properly restarted
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
list_ssvm_response = list_ssvms(
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
list_ssvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
id=ssvm.id
|
||||
)
|
||||
|
||||
if isinstance(list_ssvm_response, list):
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List SSVM call failed!")
|
||||
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_ssvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
ssvm_response = list_ssvm_response[0]
|
||||
self.assertEqual(
|
||||
ssvm_response.state,
|
||||
|
|
@ -428,6 +500,11 @@ class TestSSVMs(cloudstackTestCase):
|
|||
type='Routing',
|
||||
state='Up'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
list_cpvm_response = list_ssvms(
|
||||
|
|
@ -435,18 +512,30 @@ class TestSSVMs(cloudstackTestCase):
|
|||
systemvmtype='consoleproxy',
|
||||
hostid=host.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_cpvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
cpvm = list_cpvm_response[0]
|
||||
|
||||
cmd = stopSystemVm.stopSystemVmCmd()
|
||||
cmd.id = cpvm.id
|
||||
self.apiclient.stopSystemVm(cmd)
|
||||
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
list_cpvm_response = list_ssvms(
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
list_cpvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
id=cpvm.id
|
||||
)
|
||||
if isinstance(list_cpvm_response, list):
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List CPVM call failed!")
|
||||
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
|
||||
cpvm_response = list_cpvm_response[0]
|
||||
|
||||
|
|
@ -475,6 +564,11 @@ class TestSSVMs(cloudstackTestCase):
|
|||
type='Routing',
|
||||
state='Up'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
list_ssvm_response = list_ssvms(
|
||||
|
|
@ -482,6 +576,13 @@ class TestSSVMs(cloudstackTestCase):
|
|||
systemvmtype='secondarystoragevm',
|
||||
hostid=host.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_ssvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
ssvm_response = list_ssvm_response[0]
|
||||
|
||||
#Store the public & private IP values before reboot
|
||||
|
|
@ -492,13 +593,20 @@ class TestSSVMs(cloudstackTestCase):
|
|||
cmd.id = ssvm_response.id
|
||||
self.apiclient.rebootSystemVm(cmd)
|
||||
|
||||
#Sleep to ensure that SSVM is properly stopped/started
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
list_ssvm_response = list_ssvms(
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
list_ssvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
id=ssvm_response.id
|
||||
)
|
||||
if isinstance(list_ssvm_response, list):
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List SSVM call failed!")
|
||||
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
|
||||
ssvm_response = list_ssvm_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -537,6 +645,11 @@ class TestSSVMs(cloudstackTestCase):
|
|||
type='Routing',
|
||||
state='Up'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(hosts, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
host = hosts[0]
|
||||
|
||||
list_cpvm_response = list_ssvms(
|
||||
|
|
@ -544,6 +657,11 @@ class TestSSVMs(cloudstackTestCase):
|
|||
systemvmtype='consoleproxy',
|
||||
hostid=host.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_cpvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
cpvm_response = list_cpvm_response[0]
|
||||
|
||||
#Store the public & private IP values before reboot
|
||||
|
|
@ -554,13 +672,19 @@ class TestSSVMs(cloudstackTestCase):
|
|||
cmd.id = cpvm_response.id
|
||||
self.apiclient.rebootSystemVm(cmd)
|
||||
|
||||
#Sleep to ensure that SSVM is properly stopped/started
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
list_cpvm_response = list_ssvms(
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
list_cpvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
id=cpvm_response.id
|
||||
)
|
||||
if isinstance(list_cpvm_response, list):
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List CPVM call failed!")
|
||||
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
|
||||
cpvm_response = list_cpvm_response[0]
|
||||
|
||||
|
|
@ -601,6 +725,11 @@ class TestSSVMs(cloudstackTestCase):
|
|||
zoneid=self.zone.id,
|
||||
systemvmtype='secondarystoragevm'
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_ssvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
ssvm_response = list_ssvm_response[0]
|
||||
|
||||
old_name = ssvm_response.name
|
||||
|
|
@ -609,14 +738,21 @@ class TestSSVMs(cloudstackTestCase):
|
|||
cmd.id = ssvm_response.id
|
||||
self.apiclient.destroySystemVm(cmd)
|
||||
|
||||
#Sleep to ensure that new SSVM is created
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
list_ssvm_response = list_ssvms(
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
list_ssvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
zoneid=self.zone.id,
|
||||
systemvmtype='secondarystoragevm'
|
||||
)
|
||||
if isinstance(list_ssvm_response, list):
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List SSVM call failed!")
|
||||
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
|
||||
ssvm_response = list_ssvm_response[0]
|
||||
|
||||
# Verify Name, Public IP, Private IP and Link local IP
|
||||
|
|
@ -643,6 +779,7 @@ class TestSSVMs(cloudstackTestCase):
|
|||
True,
|
||||
"Check whether SSVM has public IP field"
|
||||
)
|
||||
|
||||
#Call to verify cloud process is running
|
||||
self.test_03_ssvm_internals()
|
||||
return
|
||||
|
|
@ -663,6 +800,11 @@ class TestSSVMs(cloudstackTestCase):
|
|||
systemvmtype='consoleproxy',
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_cpvm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
cpvm_response = list_cpvm_response[0]
|
||||
|
||||
old_name = cpvm_response.name
|
||||
|
|
@ -671,14 +813,21 @@ class TestSSVMs(cloudstackTestCase):
|
|||
cmd.id = cpvm_response.id
|
||||
self.apiclient.destroySystemVm(cmd)
|
||||
|
||||
#Sleep to ensure that new CPVM is created
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
list_cpvm_response = list_ssvms(
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
list_cpvm_response = list_ssvms(
|
||||
self.apiclient,
|
||||
systemvmtype='consoleproxy',
|
||||
zoneid=self.zone.id
|
||||
)
|
||||
if isinstance(list_cpvm_response, list):
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List CPVM call failed!")
|
||||
|
||||
time.sleep(self.services["sleep"])
|
||||
timeout = timeout - 1
|
||||
|
||||
cpvm_response = list_cpvm_response[0]
|
||||
|
||||
# Verify Name, Public IP, Private IP and Link local IP
|
||||
|
|
@ -705,6 +854,7 @@ class TestSSVMs(cloudstackTestCase):
|
|||
True,
|
||||
"Check whether CPVM has public IP field"
|
||||
)
|
||||
|
||||
#Call to verify cloud process is running
|
||||
self.test_04_cpvm_internals()
|
||||
return
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ from testcase.libs.common import *
|
|||
import urllib
|
||||
from random import random
|
||||
#Import System modules
|
||||
import time
|
||||
import datetime
|
||||
|
||||
|
||||
class Services:
|
||||
|
|
@ -35,8 +35,8 @@ class Services:
|
|||
"name": "Tiny Instance",
|
||||
"displaytext": "Tiny Instance",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 64, # In MBs
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 64, # In MBs
|
||||
},
|
||||
"disk_offering": {
|
||||
"displaytext": "Small",
|
||||
|
|
@ -72,7 +72,8 @@ class Services:
|
|||
"mode": "HTTP_DOWNLOAD",
|
||||
},
|
||||
"templatefilter": 'self',
|
||||
"destzoneid": 5, # For Copy template (Destination zone)
|
||||
"destzoneid": 5,
|
||||
# For Copy template (Destination zone)
|
||||
"isfeatured": True,
|
||||
"ispublic": True,
|
||||
"isextractable": False,
|
||||
|
|
@ -82,7 +83,10 @@ class Services:
|
|||
"zoneid": 1,
|
||||
# Optional, if specified the mentioned zone will be
|
||||
# used for tests
|
||||
"mode": 'advanced', # Networking mode: Advanced, basic
|
||||
"mode": 'advanced',
|
||||
# Networking mode: Advanced, basic
|
||||
"sleep": 30,
|
||||
"timeout": 10,
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -149,8 +153,30 @@ class TestCreateTemplate(cloudstackTestCase):
|
|||
#Stop virtual machine
|
||||
cls.virtual_machine.stop(cls.api_client)
|
||||
|
||||
#Wait before server has be successfully stopped
|
||||
time.sleep(30)
|
||||
# Poll listVM to ensure VM is stopped properly
|
||||
timeout = cls.services["timeout"]
|
||||
while True:
|
||||
time.sleep(cls.services["sleep"])
|
||||
|
||||
# Ensure that VM is in stopped state
|
||||
list_vm_response = list_virtual_machines(
|
||||
cls.api_client,
|
||||
id=cls.virtual_machine.id
|
||||
)
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Stopped':
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to stop VM (ID: %s) in change service offering" %
|
||||
vm.id)
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
list_volume = list_volumes(
|
||||
cls.api_client,
|
||||
virtualmachineid=cls.virtual_machine.id,
|
||||
|
|
@ -191,10 +217,14 @@ class TestCreateTemplate(cloudstackTestCase):
|
|||
template = Template.create(
|
||||
self.apiclient,
|
||||
self.services["template_1"],
|
||||
self.volume.id
|
||||
self.volume.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
self.cleanup.append(template)
|
||||
|
||||
self.debug("Created template with ID: %s" % template.id)
|
||||
|
||||
list_template_response = list_templates(
|
||||
self.apiclient,
|
||||
templatefilter=\
|
||||
|
|
@ -202,11 +232,16 @@ class TestCreateTemplate(cloudstackTestCase):
|
|||
id=template.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_template_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
#Verify template response to check whether template added successfully
|
||||
self.assertNotEqual(
|
||||
len(list_template_response),
|
||||
0,
|
||||
"Check template avaliable in List Templates"
|
||||
"Check template available in List Templates"
|
||||
)
|
||||
template_response = list_template_response[0]
|
||||
|
||||
|
|
@ -282,29 +317,58 @@ class TestTemplates(cloudstackTestCase):
|
|||
#Stop virtual machine
|
||||
cls.virtual_machine.stop(cls.api_client)
|
||||
|
||||
#Wait before server has be successfully stopped
|
||||
time.sleep(30)
|
||||
# Poll listVM to ensure VM is stopped properly
|
||||
timeout = cls.services["timeout"]
|
||||
while True:
|
||||
time.sleep(cls.services["sleep"])
|
||||
|
||||
# Ensure that VM is in stopped state
|
||||
list_vm_response = list_virtual_machines(
|
||||
cls.api_client,
|
||||
id=cls.virtual_machine.id
|
||||
)
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Stopped':
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to stop VM (ID: %s) in change service offering" %
|
||||
vm.id)
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
list_volume = list_volumes(
|
||||
cls.api_client,
|
||||
virtualmachineid=cls.virtual_machine.id,
|
||||
type='ROOT'
|
||||
)
|
||||
cls.volume = list_volume[0]
|
||||
try:
|
||||
cls.volume = list_volume[0]
|
||||
except Exception as e:
|
||||
raise Exception(
|
||||
"Exception: Unable to find root volume foe VM: %s" %
|
||||
cls.virtual_machine.id)
|
||||
|
||||
#Create templates for Edit, Delete & update permissions testcases
|
||||
cls.template_1 = Template.create(
|
||||
cls.api_client,
|
||||
cls.services["template_1"],
|
||||
cls.volume.id
|
||||
cls.volume.id,
|
||||
account=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid
|
||||
)
|
||||
cls.template_2 = Template.create(
|
||||
cls.api_client,
|
||||
cls.services["template_2"],
|
||||
cls.volume.id
|
||||
cls.volume.id,
|
||||
account=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid
|
||||
)
|
||||
cls._cleanup = [
|
||||
cls.template_2,
|
||||
cls.service_offering,
|
||||
cls.disk_offering,
|
||||
cls.account,
|
||||
|
|
@ -346,9 +410,9 @@ class TestTemplates(cloudstackTestCase):
|
|||
"""Test Edit template
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. UI should show the edited values for template
|
||||
# 2. database (vm_template table) should have updated values
|
||||
# Validate the following:
|
||||
# 1. UI should show the edited values for template
|
||||
# 2. database (vm_template table) should have updated values
|
||||
|
||||
new_displayText = random_gen()
|
||||
new_name = random_gen()
|
||||
|
|
@ -364,20 +428,45 @@ class TestTemplates(cloudstackTestCase):
|
|||
|
||||
self.apiclient.updateTemplate(cmd)
|
||||
|
||||
# Verify template response for updated attributes
|
||||
list_template_response = list_templates(
|
||||
self.debug("Edited template with new name: %s" % new_name)
|
||||
|
||||
# Sleep to ensure update reflected across all the calls
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
# Verify template response for updated attributes
|
||||
list_template_response = list_templates(
|
||||
self.apiclient,
|
||||
templatefilter=\
|
||||
self.services["templatefilter"],
|
||||
id=self.template_1.id
|
||||
id=self.template_1.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
if isinstance(list_template_response, list):
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception("List Template failed!")
|
||||
|
||||
time.sleep(10)
|
||||
timeout = timeout -1
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_template_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_template_response),
|
||||
0,
|
||||
"Check template available in List Templates"
|
||||
)
|
||||
template_response = list_template_response[0]
|
||||
|
||||
|
||||
self.debug("New Name: %s" % new_displayText)
|
||||
self.debug("Name in Template response: %s"
|
||||
% template_response.displaytext)
|
||||
self.assertEqual(
|
||||
template_response.displaytext,
|
||||
new_displayText,
|
||||
|
|
@ -404,9 +493,11 @@ class TestTemplates(cloudstackTestCase):
|
|||
"""Test delete template
|
||||
"""
|
||||
|
||||
# Validate the following:
|
||||
# 1. UI should not show the deleted template
|
||||
# 2. database (vm_template table) should not contain deleted template
|
||||
# Validate the following:
|
||||
# 1. UI should not show the deleted template
|
||||
# 2. database (vm_template table) should not contain deleted template
|
||||
|
||||
self.debug("Deleting Template ID: %s" % self.template_1.id)
|
||||
|
||||
self.template_1.delete(self.apiclient)
|
||||
|
||||
|
|
@ -414,7 +505,9 @@ class TestTemplates(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
templatefilter=\
|
||||
self.services["templatefilter"],
|
||||
id=self.template_1.id
|
||||
id=self.template_1.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
# Verify template is deleted properly using ListTemplates
|
||||
self.assertEqual(
|
||||
|
|
@ -427,11 +520,13 @@ class TestTemplates(cloudstackTestCase):
|
|||
def test_04_extract_template(self):
|
||||
"Test for extract template"
|
||||
|
||||
# Validate the following
|
||||
# 1. Admin should able extract and download the templates
|
||||
# 2. ListTemplates should display all the public templates
|
||||
# for all kind of users
|
||||
# 3 .ListTemplates should not display the system templates
|
||||
# Validate the following
|
||||
# 1. Admin should able extract and download the templates
|
||||
# 2. ListTemplates should display all the public templates
|
||||
# for all kind of users
|
||||
# 3 .ListTemplates should not display the system templates
|
||||
|
||||
self.debug("Extracting template with ID: %s" % self.template_2.id)
|
||||
|
||||
cmd = extractTemplate.extractTemplateCmd()
|
||||
cmd.id = self.template_2.id
|
||||
|
|
@ -469,11 +564,13 @@ class TestTemplates(cloudstackTestCase):
|
|||
def test_05_template_permissions(self):
|
||||
"""Update & Test for template permissions"""
|
||||
|
||||
# Validate the following
|
||||
# 1. listTemplatePermissions returns valid
|
||||
# permissions set for template
|
||||
# 2. permission changes should be reflected in vm_template
|
||||
# table in database
|
||||
# Validate the following
|
||||
# 1. listTemplatePermissions returns valid
|
||||
# permissions set for template
|
||||
# 2. permission changes should be reflected in vm_template
|
||||
# table in database
|
||||
|
||||
self.debug("Updating Template permissions ID:%s" % self.template_2.id)
|
||||
|
||||
cmd = updateTemplatePermissions.updateTemplatePermissionsCmd()
|
||||
# Update template permissions
|
||||
|
|
@ -481,7 +578,6 @@ class TestTemplates(cloudstackTestCase):
|
|||
cmd.isfeatured = self.services["isfeatured"]
|
||||
cmd.ispublic = self.services["ispublic"]
|
||||
cmd.isextractable = self.services["isextractable"]
|
||||
|
||||
self.apiclient.updateTemplatePermissions(cmd)
|
||||
|
||||
list_template_response = list_templates(
|
||||
|
|
@ -491,7 +587,11 @@ class TestTemplates(cloudstackTestCase):
|
|||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_template_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
# Verify template response for updated permissions for normal user
|
||||
template_response = list_template_response[0]
|
||||
|
||||
|
|
@ -520,6 +620,10 @@ class TestTemplates(cloudstackTestCase):
|
|||
# 1. copy template should be successful and
|
||||
# secondary storage should contain new copied template.
|
||||
|
||||
self.debug("Copy template from Zone: %s to %s" % (
|
||||
self.services["sourcezoneid"],
|
||||
self.services["destzoneid"]
|
||||
))
|
||||
cmd = copyTemplate.copyTemplateCmd()
|
||||
cmd.id = self.template_2.id
|
||||
cmd.destzoneid = self.services["destzoneid"]
|
||||
|
|
@ -534,6 +638,11 @@ class TestTemplates(cloudstackTestCase):
|
|||
id=self.template_2.id,
|
||||
zoneid=self.services["destzoneid"]
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_template_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_template_response),
|
||||
0,
|
||||
|
|
@ -571,7 +680,11 @@ class TestTemplates(cloudstackTestCase):
|
|||
account=self.user.account.name,
|
||||
domainid=self.user.account.domainid
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_template_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_template_response),
|
||||
0,
|
||||
|
|
@ -598,6 +711,11 @@ class TestTemplates(cloudstackTestCase):
|
|||
account=self.user.account.name,
|
||||
domainid=self.user.account.domainid
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_template_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_template_response),
|
||||
|
|
|
|||
|
|
@ -103,6 +103,8 @@ class Services:
|
|||
"diskdevice": '/dev/xvdd',
|
||||
# Disk device where ISO is attached to instance
|
||||
"mount_dir": "/mnt/tmp",
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
"hostid": 5,
|
||||
#Migrate VM to hostid
|
||||
"ostypeid": 12,
|
||||
|
|
@ -114,6 +116,7 @@ class Services:
|
|||
# Networking mode: Basic or Advanced
|
||||
}
|
||||
|
||||
|
||||
class TestDeployVM(cloudstackTestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
|
@ -178,14 +181,20 @@ class TestDeployVM(cloudstackTestCase):
|
|||
"Verify listVirtualMachines response for virtual machine: %s" \
|
||||
% self.virtual_machine.id
|
||||
)
|
||||
|
||||
vm_response = list_vm_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
"Check VM available in List Virtual Machines"
|
||||
)
|
||||
|
||||
vm_response = list_vm_response[0]
|
||||
|
||||
self.assertEqual(
|
||||
|
||||
vm_response.id,
|
||||
|
|
@ -297,13 +306,19 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
# 2. listVM command should return
|
||||
# this VM.State of this VM should be ""Stopped"".
|
||||
|
||||
self.debug("Stopping VM - ID: %s" % self.virtual_machine.id)
|
||||
self.small_virtual_machine.stop(self.apiclient)
|
||||
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
|
|
@ -323,13 +338,20 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
# Validate the following
|
||||
# 1. listVM command should return this VM.State
|
||||
# of this VM should be Running".
|
||||
|
||||
|
||||
self.debug("Starting VM - ID: %s" % self.virtual_machine.id)
|
||||
self.small_virtual_machine.start(self.apiclient)
|
||||
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
|
|
@ -356,13 +378,19 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
# 2. listVM command should return the deployed VM.
|
||||
# State of this VM should be "Running"
|
||||
|
||||
self.debug("Rebooting VM - ID: %s" % self.virtual_machine.id)
|
||||
self.small_virtual_machine.reboot(self.apiclient)
|
||||
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
|
|
@ -385,21 +413,72 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
# this Vm matches the one specified for "Small" service offering.
|
||||
# 2. Using listVM command verify that this Vm
|
||||
# has Small service offering Id.
|
||||
|
||||
|
||||
self.debug("Stopping VM - ID: %s" % self.medium_virtual_machine.id)
|
||||
|
||||
self.medium_virtual_machine.stop(self.apiclient)
|
||||
|
||||
cmd = changeServiceForVirtualMachine.changeServiceForVirtualMachineCmd()
|
||||
cmd.id = self.medium_virtual_machine.id
|
||||
cmd.serviceofferingid = self.small_offering.id
|
||||
|
||||
self.apiclient.changeServiceForVirtualMachine(cmd)
|
||||
|
||||
self.medium_virtual_machine.start(self.apiclient)
|
||||
|
||||
list_vm_response = list_virtual_machines(
|
||||
|
||||
# Poll listVM to ensure VM is stopped properly
|
||||
timeout = self.services["timeout"]
|
||||
|
||||
while True:
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
# Ensure that VM is in stopped state
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.medium_virtual_machine.id
|
||||
)
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Stopped':
|
||||
self.debug("VM state: %s" % vm.state)
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to stop VM (ID: %s) in change service offering" % vm.id)
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
self.debug("Change Service offering VM - ID: %s" %
|
||||
self.medium_virtual_machine.id)
|
||||
|
||||
cmd = changeServiceForVirtualMachine.changeServiceForVirtualMachineCmd()
|
||||
cmd.id = self.medium_virtual_machine.id
|
||||
cmd.serviceofferingid = self.small_offering.id
|
||||
self.apiclient.changeServiceForVirtualMachine(cmd)
|
||||
|
||||
self.debug("Starting VM - ID: %s" % self.medium_virtual_machine.id)
|
||||
self.medium_virtual_machine.start(self.apiclient)
|
||||
|
||||
# Poll listVM to ensure VM is started properly
|
||||
timeout = self.services["timeout"]
|
||||
|
||||
while True:
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
# Ensure that VM is in running state
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.medium_virtual_machine.id
|
||||
)
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Running':
|
||||
self.debug("VM state: %s" % vm.state)
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to start VM (ID: %s) after changing service offering" % vm.id)
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
try:
|
||||
ssh = self.medium_virtual_machine.get_ssh_client()
|
||||
except Exception as e:
|
||||
|
|
@ -417,20 +496,26 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
meminfo = ssh.execute("cat /proc/meminfo")
|
||||
#MemTotal: 1017464 kB
|
||||
total_mem = [i for i in meminfo if "MemTotal" in i][0].split()[1]
|
||||
|
||||
self.assertEqual(
|
||||
cpu_cnt,
|
||||
|
||||
self.debug(
|
||||
"CUP count: %s, CPU Speed: %s, Mem Info: %s" % (
|
||||
cpu_cnt,
|
||||
cpu_speed,
|
||||
total_mem
|
||||
))
|
||||
self.assertAlmostEqual(
|
||||
int(cpu_cnt),
|
||||
self.small_offering.cpunumber,
|
||||
"Check CPU Count for small offering"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
self.assertAlmostEqual(
|
||||
list_vm_response[0].cpuspeed,
|
||||
self.small_offering.cpuspeed,
|
||||
"Check CPU Speed for small offering"
|
||||
)
|
||||
self.assertEqual(
|
||||
total_mem,
|
||||
self.assertAlmostEqual(
|
||||
int(total_mem) / 1024, # In MBs
|
||||
self.small_offering.memory,
|
||||
"Check Memory(kb) for small offering"
|
||||
)
|
||||
|
|
@ -446,24 +531,76 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
# this Vm matches the one specified for "Medium" service offering.
|
||||
# 2. Using listVM command verify that this Vm
|
||||
# has Medium service offering Id.
|
||||
|
||||
# Sleep to ensure that VM is in proper state
|
||||
time.sleep(120)
|
||||
|
||||
self.debug("Stopping VM - ID: %s" % self.small_virtual_machine.id)
|
||||
self.small_virtual_machine.stop(self.apiclient)
|
||||
|
||||
|
||||
# Poll listVM to ensure VM is stopped properly
|
||||
timeout = self.services["timeout"]
|
||||
|
||||
while True:
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
# Ensure that VM is in stopped state
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Stopped':
|
||||
self.debug("VM state: %s" % vm.state)
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to stop VM (ID: %s) in change service offering" % vm.id)
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
self.debug("Change service offering VM - ID: %s" %
|
||||
self.small_virtual_machine.id)
|
||||
|
||||
cmd = changeServiceForVirtualMachine.changeServiceForVirtualMachineCmd()
|
||||
cmd.id = self.small_virtual_machine.id
|
||||
cmd.serviceofferingid = self.medium_offering.id
|
||||
self.apiclient.changeServiceForVirtualMachine(cmd)
|
||||
|
||||
|
||||
self.debug("Starting VM - ID: %s" % self.small_virtual_machine.id)
|
||||
self.small_virtual_machine.start(self.apiclient)
|
||||
|
||||
|
||||
# Poll listVM to ensure VM is started properly
|
||||
timeout = self.services["timeout"]
|
||||
|
||||
while True:
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
# Ensure that VM is in running state
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Running':
|
||||
self.debug("VM state: %s" % vm.state)
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to start VM (ID: %s) after changing service offering" % vm.id)
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
# Sleep to ensure that VM is started properly
|
||||
time.sleep(120)
|
||||
|
||||
try:
|
||||
ssh_client = self.small_virtual_machine.get_ssh_client()
|
||||
except Exception as e:
|
||||
|
|
@ -471,7 +608,7 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
"SSH Access failed for %s: %s" % \
|
||||
(self.small_virtual_machine.ipaddress, e)
|
||||
)
|
||||
|
||||
|
||||
cpuinfo = ssh_client.execute("cat /proc/cpuinfo")
|
||||
|
||||
cpu_cnt = len([i for i in cpuinfo if "processor" in i])
|
||||
|
|
@ -481,20 +618,27 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
meminfo = ssh_client.execute("cat /proc/meminfo")
|
||||
#MemTotal: 1017464 kB
|
||||
total_mem = [i for i in meminfo if "MemTotal" in i][0].split()[1]
|
||||
|
||||
self.assertEqual(
|
||||
cpu_cnt,
|
||||
|
||||
self.debug(
|
||||
"CUP count: %s, CPU Speed: %s, Mem Info: %s" % (
|
||||
cpu_cnt,
|
||||
cpu_speed,
|
||||
total_mem
|
||||
))
|
||||
self.assertAlmostEqual(
|
||||
int(cpu_cnt),
|
||||
self.medium_offering.cpunumber,
|
||||
"Check CPU Count for medium offering"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
self.assertAlmostEqual(
|
||||
list_vm_response[0].cpuspeed,
|
||||
self.medium_offering.cpuspeed,
|
||||
"Check CPU Speed for medium offering"
|
||||
)
|
||||
self.assertEqual(
|
||||
total_mem,
|
||||
|
||||
self.assertAlmostEqual(
|
||||
int(total_mem) / 1024, # In MBs
|
||||
self.medium_offering.memory,
|
||||
"Check Memory(kb) for medium offering"
|
||||
)
|
||||
|
|
@ -508,13 +652,20 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
# 1. Should not be able to login to the VM.
|
||||
# 2. listVM command should return this VM.State
|
||||
# of this VM should be "Destroyed".
|
||||
|
||||
|
||||
self.debug("Destroy VM - ID: %s" % self.small_virtual_machine.id)
|
||||
self.small_virtual_machine.delete(self.apiclient)
|
||||
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
|
|
@ -537,7 +688,9 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
# 1. listVM command should return this VM.
|
||||
# State of this VM should be "Stopped".
|
||||
# 2. We should be able to Start this VM successfully.
|
||||
|
||||
|
||||
self.debug("Recovering VM - ID: %s" % self.small_virtual_machine.id)
|
||||
|
||||
cmd = recoverVirtualMachine.recoverVirtualMachineCmd()
|
||||
cmd.id = self.small_virtual_machine.id
|
||||
self.apiclient.recoverVirtualMachine(cmd)
|
||||
|
|
@ -546,7 +699,12 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
len(list_vm_response),
|
||||
0,
|
||||
|
|
@ -569,7 +727,12 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
# 2. listVM command should return this VM.State of this VM
|
||||
# should be "Running" and the host should be the host
|
||||
# to which the VM was migrated to
|
||||
|
||||
|
||||
self.debug("Migrating VM-ID: %s to Host: %s" % (
|
||||
self.medium_virtual_machine.id,
|
||||
self.services["hostid"]
|
||||
))
|
||||
|
||||
cmd = migrateVirtualMachine.migrateVirtualMachineCmd()
|
||||
cmd.hostid = self.services["hostid"]
|
||||
cmd.virtualmachineid = self.medium_virtual_machine.id
|
||||
|
|
@ -579,6 +742,12 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=self.medium_virtual_machine.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_vm_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
list_vm_response,
|
||||
None,
|
||||
|
|
@ -605,7 +774,9 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
"""
|
||||
# Validate the following
|
||||
# 1. listVM command should NOT return this VM any more.
|
||||
|
||||
|
||||
self.debug("Expunge VM-ID: %s" % self.small_virtual_machine.id)
|
||||
|
||||
cmd = destroyVirtualMachine.destroyVirtualMachineCmd()
|
||||
cmd.id = self.small_virtual_machine.id
|
||||
self.apiclient.destroyVirtualMachine(cmd)
|
||||
|
|
@ -623,7 +794,6 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=self.small_virtual_machine.id
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
list_vm_response,
|
||||
None,
|
||||
|
|
@ -642,32 +812,55 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
# 5. Detach ISO
|
||||
# 6. Check the device is properly detached by logging into VM
|
||||
|
||||
iso = Iso.create(self.apiclient, self.services["iso"])
|
||||
self.cleanup.append(iso)
|
||||
iso.download(self.apiclient)
|
||||
|
||||
iso = Iso.create(
|
||||
self.apiclient,
|
||||
self.services["iso"],
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid
|
||||
)
|
||||
|
||||
self.debug("Successfully created ISO with ID: %s" % iso.id)
|
||||
try:
|
||||
iso.download(self.apiclient)
|
||||
except Exception as e:
|
||||
self.fail("Exception while downloading ISO %s: %s"\
|
||||
% (iso.id, e))
|
||||
|
||||
self.debug("Attach ISO with ID: %s to VM ID: %s" % (
|
||||
iso.id,
|
||||
self.virtual_machine.id
|
||||
))
|
||||
#Attach ISO to virtual machine
|
||||
cmd = attachIso.attachIsoCmd()
|
||||
cmd.id = iso.id
|
||||
cmd.virtualmachineid = self.virtual_machine.id
|
||||
self.apiclient.attachIso(cmd)
|
||||
|
||||
ssh_client = self.virtual_machine.get_ssh_client()
|
||||
|
||||
cmds = [
|
||||
"mkdir -p %s" % self.services["mount_dir"],
|
||||
"mount -rt iso9660 %s %s" \
|
||||
% (self.services["diskdevice"], self.services["mount_dir"]),
|
||||
|
||||
try:
|
||||
ssh_client = self.virtual_machine.get_ssh_client()
|
||||
|
||||
cmds = [
|
||||
"mkdir -p %s" % self.services["mount_dir"],
|
||||
"mount -rt iso9660 %s %s" \
|
||||
% (
|
||||
self.services["diskdevice"],
|
||||
self.services["mount_dir"]
|
||||
),
|
||||
]
|
||||
for c in cmds:
|
||||
res = ssh_client.execute(c)
|
||||
|
||||
for c in cmds:
|
||||
res = ssh_client.execute(c)
|
||||
|
||||
self.assertEqual(res, [], "Check mount is successful or not")
|
||||
self.assertEqual(res, [], "Check mount is successful or not")
|
||||
|
||||
c = "fdisk -l|grep %s|head -1" % self.services["diskdevice"]
|
||||
res = ssh_client.execute(c)
|
||||
#Disk /dev/xvdd: 4393 MB, 4393723904 bytes
|
||||
c = "fdisk -l|grep %s|head -1" % self.services["diskdevice"]
|
||||
res = ssh_client.execute(c)
|
||||
#Disk /dev/xvdd: 4393 MB, 4393723904 bytes
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for virtual machine: %s - %s" %
|
||||
self.virtual_machine.ipaddress, e)
|
||||
|
||||
# Res may contain more than one strings depending on environment
|
||||
# Split strings to form new list which is used for assertion on ISO size
|
||||
result = []
|
||||
|
|
@ -680,6 +873,11 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
self.apiclient,
|
||||
id=iso.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(iso_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
iso_size = iso_response[0].size
|
||||
|
||||
self.assertEqual(
|
||||
|
|
@ -687,18 +885,29 @@ class TestVMLifeCycle(cloudstackTestCase):
|
|||
True,
|
||||
"Check size of the attached ISO"
|
||||
)
|
||||
|
||||
#Unmount ISO
|
||||
command = "umount %s" % self.services["diskdevice"]
|
||||
ssh_client.execute(command)
|
||||
|
||||
try:
|
||||
#Unmount ISO
|
||||
command = "umount %s" % self.services["mount_dir"]
|
||||
ssh_client.execute(command)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for virtual machine: %s - %s" %
|
||||
self.virtual_machine.ipaddress, e)
|
||||
|
||||
#Detach from VM
|
||||
cmd = detachIso.detachIsoCmd()
|
||||
cmd.virtualmachineid = self.virtual_machine.id
|
||||
self.apiclient.detachIso(cmd)
|
||||
|
||||
res = ssh_client.execute(c)
|
||||
result = self.services["diskdevice"] in res[0].split()
|
||||
|
||||
try:
|
||||
res = ssh_client.execute(c)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH failed for virtual machine: %s - %s" %
|
||||
self.virtual_machine.ipaddress, e)
|
||||
|
||||
# Check if ISO is properly detached from VM (using fdisk)
|
||||
result = self.services["diskdevice"] in str(res)
|
||||
|
||||
self.assertEqual(
|
||||
result,
|
||||
|
|
|
|||
|
|
@ -37,8 +37,8 @@ class Services:
|
|||
"name": "Tiny Instance",
|
||||
"displaytext": "Tiny Instance",
|
||||
"cpunumber": 1,
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 64, # In MBs
|
||||
"cpuspeed": 100, # in MHz
|
||||
"memory": 64, # In MBs
|
||||
},
|
||||
"disk_offering": {
|
||||
"displaytext": "Small",
|
||||
|
|
@ -51,8 +51,8 @@ class Services:
|
|||
"domainid": 1,
|
||||
},
|
||||
},
|
||||
"customdisksize": 1, # GBs
|
||||
"username": "root", # Creds for SSH to VM
|
||||
"customdisksize": 1, # GBs
|
||||
"username": "root", # Creds for SSH to VM
|
||||
"password": "password",
|
||||
"ssh_port": 22,
|
||||
"diskname": "TestDiskServ",
|
||||
|
|
@ -67,6 +67,8 @@ class Services:
|
|||
# Optional, if specified the mentioned zone will be
|
||||
# used for tests
|
||||
"mode": 'advanced',
|
||||
"sleep": 60,
|
||||
"timeout": 10,
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -134,7 +136,7 @@ class TestCreateVolume(cloudstackTestCase):
|
|||
|
||||
# Validate the following
|
||||
# 1. Create volumes from the different sizes
|
||||
# 2. Verify the size of volume with acrual size allocated
|
||||
# 2. Verify the size of volume with actual size allocated
|
||||
|
||||
self.volumes = []
|
||||
for k, v in self.services["volume_offerings"].items():
|
||||
|
|
@ -143,14 +145,20 @@ class TestCreateVolume(cloudstackTestCase):
|
|||
v,
|
||||
zoneid=self.zone.id,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
diskofferingid=self.disk_offering.id
|
||||
)
|
||||
self.debug("Created a volume with ID: %s" % volume.id)
|
||||
self.volumes.append(volume)
|
||||
self.cleanup.append(volume)
|
||||
|
||||
volume = Volume.create_custom_disk(self.apiClient, self.services)
|
||||
volume = Volume.create_custom_disk(
|
||||
self.apiClient,
|
||||
self.services,
|
||||
account=self.account.account.name,
|
||||
domainid=self.account.account.domainid,
|
||||
)
|
||||
self.debug("Created a volume with custom offering: %s" % volume.id)
|
||||
self.volumes.append(volume)
|
||||
self.cleanup.append(volume)
|
||||
|
||||
#Attach a volume with different disk offerings
|
||||
#and check the memory allocated to each of them
|
||||
|
|
@ -159,31 +167,74 @@ class TestCreateVolume(cloudstackTestCase):
|
|||
self.apiClient,
|
||||
id=volume.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_volume_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
list_volume_response,
|
||||
None,
|
||||
"Check if volume exists in ListVolumes"
|
||||
)
|
||||
self.debug(
|
||||
"Attaching volume (ID: %s) to VM (ID: %s)" % (
|
||||
volume.id,
|
||||
self.virtual_machine.id
|
||||
))
|
||||
self.virtual_machine.attach_volume(
|
||||
self.apiClient,
|
||||
volume
|
||||
)
|
||||
try:
|
||||
ssh = self.virtual_machine.get_ssh_client()
|
||||
|
||||
attached_volume = self.virtual_machine.attach_volume(
|
||||
self.apiClient,
|
||||
volume
|
||||
)
|
||||
ssh.execute("reboot")
|
||||
|
||||
ssh = self.virtual_machine.get_ssh_client()
|
||||
except Exception as e:
|
||||
self.fail("SSH access failed for VM %s - %s" %
|
||||
(self.virtual_machine.ipaddress, e))
|
||||
|
||||
ssh.execute("reboot")
|
||||
#Sleep to ensure the machine is rebooted properly
|
||||
time.sleep(120)
|
||||
ssh = self.virtual_machine.get_ssh_client(
|
||||
# Poll listVM to ensure VM is started properly
|
||||
timeout = self.services["timeout"]
|
||||
while True:
|
||||
time.sleep(self.services["sleep"])
|
||||
|
||||
# Ensure that VM is in running state
|
||||
list_vm_response = list_virtual_machines(
|
||||
self.apiClient,
|
||||
id=self.virtual_machine.id
|
||||
)
|
||||
|
||||
if isinstance(list_vm_response, list):
|
||||
|
||||
vm = list_vm_response[0]
|
||||
if vm.state == 'Running':
|
||||
self.debug("VM state: %s" % vm.state)
|
||||
break
|
||||
|
||||
if timeout == 0:
|
||||
raise Exception(
|
||||
"Failed to start VM (ID: %s) " % vm.id)
|
||||
|
||||
timeout = timeout - 1
|
||||
|
||||
try:
|
||||
ssh = self.virtual_machine.get_ssh_client(
|
||||
reconnect=True
|
||||
)
|
||||
c = "fdisk -l"
|
||||
res = ssh.execute(c)
|
||||
c = "fdisk -l"
|
||||
res = ssh.execute(c)
|
||||
|
||||
except Exception as e:
|
||||
self.fail("SSH access failed for VM: %s - %s" %
|
||||
(self.virtual_machine.ipaddress, e))
|
||||
|
||||
# Disk /dev/sda doesn't contain a valid partition table
|
||||
# Disk /dev/sda: 21.5 GB, 21474836480 bytes
|
||||
|
||||
result = str(res)
|
||||
self.debug("fdisk result: %s" % result)
|
||||
|
||||
self.assertEqual(
|
||||
str(list_volume_response[0].size) in result,
|
||||
True,
|
||||
|
|
@ -204,6 +255,7 @@ class TestCreateVolume(cloudstackTestCase):
|
|||
except Exception as e:
|
||||
raise Exception("Warning: Exception during cleanup : %s" % e)
|
||||
|
||||
|
||||
class TestVolumes(cloudstackTestCase):
|
||||
|
||||
@classmethod
|
||||
|
|
@ -246,7 +298,9 @@ class TestVolumes(cloudstackTestCase):
|
|||
|
||||
cls.volume = Volume.create(
|
||||
cls.api_client,
|
||||
cls.services
|
||||
cls.services,
|
||||
account=cls.account.account.name,
|
||||
domainid=cls.account.account.domainid
|
||||
)
|
||||
cls._cleanup = [
|
||||
cls.service_offering,
|
||||
|
|
@ -273,15 +327,22 @@ class TestVolumes(cloudstackTestCase):
|
|||
# 2. "Attach Disk" pop-up box will display with list of instances
|
||||
# 3. disk should be attached to instance successfully
|
||||
|
||||
self.debug(
|
||||
"Attaching volume (ID: %s) to VM (ID: %s)" % (
|
||||
self.volume.id,
|
||||
self.virtual_machine.id
|
||||
))
|
||||
self.virtual_machine.attach_volume(self.apiClient, self.volume)
|
||||
|
||||
#Sleep to ensure the current state will reflected in other calls
|
||||
time.sleep(60)
|
||||
|
||||
list_volume_response = list_volumes(
|
||||
self.apiClient,
|
||||
id=self.volume.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_volume_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
self.assertNotEqual(
|
||||
list_volume_response,
|
||||
None,
|
||||
|
|
@ -293,9 +354,15 @@ class TestVolumes(cloudstackTestCase):
|
|||
None,
|
||||
"Check if volume state (attached) is reflected"
|
||||
)
|
||||
try:
|
||||
#Format the attached volume to a known fs
|
||||
format_volume_to_ext3(self.virtual_machine.get_ssh_client())
|
||||
|
||||
#Format the attached volume to a known fs
|
||||
format_volume_to_ext3(self.virtual_machine.get_ssh_client())
|
||||
except Exception as e:
|
||||
|
||||
self.fail("SSH failed for VM: %s - %s" %
|
||||
(self.virtual_machine.ipaddress, e))
|
||||
return
|
||||
|
||||
def test_03_download_attached_volume(self):
|
||||
"""Download a Volume attached to a VM
|
||||
|
|
@ -305,6 +372,8 @@ class TestVolumes(cloudstackTestCase):
|
|||
# "Failed - Invalid state of the volume with ID:
|
||||
# It should be either detached or the VM should be in stopped state
|
||||
|
||||
self.debug("Extract attached Volume ID: %s" % self.volume.id)
|
||||
|
||||
cmd = extractVolume.extractVolumeCmd()
|
||||
cmd.id = self.volume.id
|
||||
cmd.mode = "HTTP_DOWNLOAD"
|
||||
|
|
@ -323,13 +392,15 @@ class TestVolumes(cloudstackTestCase):
|
|||
# "Failed - Invalid state of the volume with ID:
|
||||
# It should be either detached or the VM should be in stopped state
|
||||
|
||||
self.debug("Trying to delete attached Volume ID: %s" %
|
||||
self.volume.id)
|
||||
|
||||
cmd = deleteVolume.deleteVolumeCmd()
|
||||
cmd.id = self.volume.id
|
||||
#Proper exception should be raised; deleting attach VM is not allowed
|
||||
with self.assertRaises(Exception):
|
||||
self.apiClient.deleteVolume(cmd)
|
||||
|
||||
|
||||
def test_05_detach_volume(self):
|
||||
"""Detach a Volume attached to a VM
|
||||
"""
|
||||
|
|
@ -338,13 +409,25 @@ class TestVolumes(cloudstackTestCase):
|
|||
# Data disk should be detached from instance and detached data disk
|
||||
# details should be updated properly
|
||||
|
||||
self.debug(
|
||||
"Detaching volume (ID: %s) from VM (ID: %s)" % (
|
||||
self.volume.id,
|
||||
self.virtual_machine.id
|
||||
))
|
||||
|
||||
self.virtual_machine.detach_volume(self.apiClient, self.volume)
|
||||
#Sleep to ensure the current state will reflected in other calls
|
||||
time.sleep(60)
|
||||
time.sleep(self.services["sleep"])
|
||||
list_volume_response = list_volumes(
|
||||
self.apiClient,
|
||||
id=self.volume.id
|
||||
)
|
||||
self.assertEqual(
|
||||
isinstance(list_volume_response, list),
|
||||
True,
|
||||
"Check list response returns a valid list"
|
||||
)
|
||||
|
||||
self.assertNotEqual(
|
||||
list_volume_response,
|
||||
None,
|
||||
|
|
@ -364,6 +447,8 @@ class TestVolumes(cloudstackTestCase):
|
|||
# Validate the following
|
||||
# 1. able to download the volume when its not attached to instance
|
||||
|
||||
self.debug("Extract detached Volume ID: %s" % self.volume.id)
|
||||
|
||||
cmd = extractVolume.extractVolumeCmd()
|
||||
cmd.id = self.volume.id
|
||||
cmd.mode = "HTTP_DOWNLOAD"
|
||||
|
|
@ -380,8 +465,7 @@ class TestVolumes(cloudstackTestCase):
|
|||
fd.write(response.read())
|
||||
fd.close()
|
||||
|
||||
except Exception as e:
|
||||
print e
|
||||
except Exception:
|
||||
self.fail(
|
||||
"Extract Volume Failed with invalid URL %s (vol id: %s)" \
|
||||
% (extract_vol.url, self.volume.id)
|
||||
|
|
@ -397,11 +481,12 @@ class TestVolumes(cloudstackTestCase):
|
|||
# (UI should not allow to delete the volume when it is attached
|
||||
# to instance by hiding the menu Item)
|
||||
|
||||
self.debug("Delete Volume ID: %s" % self.volume.id)
|
||||
|
||||
cmd = deleteVolume.deleteVolumeCmd()
|
||||
cmd.id = self.volume.id
|
||||
self.apiClient.deleteVolume(cmd)
|
||||
|
||||
time.sleep(60)
|
||||
list_volume_response = list_volumes(
|
||||
self.apiClient,
|
||||
id=self.volume.id,
|
||||
|
|
@ -412,3 +497,4 @@ class TestVolumes(cloudstackTestCase):
|
|||
None,
|
||||
"Check if volume exists in ListVolumes"
|
||||
)
|
||||
return
|
||||
|
|
|
|||
|
|
@ -12,6 +12,8 @@ from cloudstackAPI import *
|
|||
#Import System modules
|
||||
import time
|
||||
import hashlib
|
||||
import base64
|
||||
import types
|
||||
|
||||
|
||||
class Domain:
|
||||
|
|
@ -160,10 +162,11 @@ class VirtualMachine:
|
|||
@classmethod
|
||||
def create(cls, apiclient, services, templateid=None, accountid=None,
|
||||
domainid=None, networkids=None, serviceofferingid=None,
|
||||
mode='basic'):
|
||||
securitygroupids=None, mode='basic'):
|
||||
"""Create the instance"""
|
||||
|
||||
cmd = deployVirtualMachine.deployVirtualMachineCmd()
|
||||
|
||||
|
||||
if serviceofferingid:
|
||||
cmd.serviceofferingid = serviceofferingid
|
||||
elif "serviceoffering" in services:
|
||||
|
|
@ -204,8 +207,33 @@ class VirtualMachine:
|
|||
|
||||
if "diskoffering" in services:
|
||||
cmd.diskofferingid = services["diskoffering"]
|
||||
|
||||
|
||||
if securitygroupids:
|
||||
cmd.securitygroupids = securitygroupids
|
||||
|
||||
if "userdata" in services:
|
||||
cmd.userdata = base64.b64encode(services["userdata"])
|
||||
|
||||
virtual_machine = apiclient.deployVirtualMachine(cmd)
|
||||
|
||||
# VM should be in Running state after deploy
|
||||
timeout = 10
|
||||
while True:
|
||||
vm_status = VirtualMachine.list(
|
||||
apiclient,
|
||||
id=virtual_machine.id
|
||||
)
|
||||
if isinstance(vm_status, list):
|
||||
if vm_status[0].state == 'Running':
|
||||
break
|
||||
elif timeout == 0:
|
||||
raise Exception(
|
||||
"TimeOutException: Failed to start VM (ID: %s)" %
|
||||
virtual_machine.id)
|
||||
|
||||
time.sleep(30)
|
||||
timeout = timeout -1
|
||||
|
||||
if mode.lower() == 'advanced':
|
||||
public_ip = PublicIPAddress.create(
|
||||
apiclient,
|
||||
|
|
@ -332,27 +360,43 @@ class Volume:
|
|||
return Volume(apiclient.createVolume(cmd).__dict__)
|
||||
|
||||
@classmethod
|
||||
def create_custom_disk(cls, apiclient, services):
|
||||
def create_custom_disk(cls, apiclient, services, account=None, domainid=None):
|
||||
"""Create Volume from Custom disk offering"""
|
||||
cmd = createVolume.createVolumeCmd()
|
||||
cmd.name = services["diskname"]
|
||||
cmd.diskofferingid = services["customdiskofferingid"]
|
||||
cmd.size = services["customdisksize"]
|
||||
cmd.zoneid = services["zoneid"]
|
||||
cmd.account = services["account"]
|
||||
cmd.domainid = services["domainid"]
|
||||
|
||||
if account:
|
||||
cmd.account = account
|
||||
else:
|
||||
cmd.account = services["account"]
|
||||
|
||||
if domainid:
|
||||
cmd.domainid = domainid
|
||||
else:
|
||||
cmd.domainid = services["domainid"]
|
||||
|
||||
return Volume(apiclient.createVolume(cmd).__dict__)
|
||||
|
||||
@classmethod
|
||||
def create_from_snapshot(cls, apiclient, snapshot_id, services):
|
||||
def create_from_snapshot(cls, apiclient, snapshot_id, services,
|
||||
account=None, domainid=None):
|
||||
"""Create Volume from snapshot"""
|
||||
cmd = createVolume.createVolumeCmd()
|
||||
cmd.name = "-".join([services["diskname"], random_gen()])
|
||||
cmd.snapshotid = snapshot_id
|
||||
cmd.zoneid = services["zoneid"]
|
||||
cmd.size = services["size"]
|
||||
cmd.account = services["account"]
|
||||
cmd.domainid = services["domainid"]
|
||||
if account:
|
||||
cmd.account = account
|
||||
else:
|
||||
cmd.account = services["account"]
|
||||
if domainid:
|
||||
cmd.domainid = domainid
|
||||
else:
|
||||
cmd.domainid = services["domainid"]
|
||||
return Volume(apiclient.createVolume(cmd).__dict__)
|
||||
|
||||
def delete(self, apiclient):
|
||||
|
|
@ -369,6 +413,7 @@ class Volume:
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listVolumes(cmd))
|
||||
|
||||
|
||||
class Snapshot:
|
||||
"""Manage Snapshot Lifecycle
|
||||
"""
|
||||
|
|
@ -410,7 +455,7 @@ class Template:
|
|||
@classmethod
|
||||
def create(cls, apiclient, services, volumeid=None,
|
||||
account=None, domainid=None):
|
||||
"""Create template from Volume or URL"""
|
||||
"""Create template from Volume"""
|
||||
#Create template from Virtual machine and Volume ID
|
||||
cmd = createTemplate.createTemplateCmd()
|
||||
cmd.displaytext = services["displaytext"]
|
||||
|
|
@ -420,7 +465,8 @@ class Template:
|
|||
cmd.isfeatured = services["isfeatured"] if "isfeatured" in services else False
|
||||
cmd.ispublic = services["ispublic"] if "ispublic" in services else False
|
||||
cmd.isextractable = services["isextractable"] if "isextractable" in services else False
|
||||
|
||||
cmd.passwordenabled = services["passwordenabled"] if "passwordenabled" in services else False
|
||||
|
||||
if volumeid:
|
||||
cmd.volumeid = volumeid
|
||||
|
||||
|
|
@ -434,7 +480,8 @@ class Template:
|
|||
|
||||
@classmethod
|
||||
def register(cls, apiclient, services, account=None, domainid=None):
|
||||
"""Create template from Volume or URL"""
|
||||
"""Create template from URL"""
|
||||
|
||||
#Create template from Virtual machine and Volume ID
|
||||
cmd = registerTemplate.registerTemplateCmd()
|
||||
cmd.displaytext = services["displaytext"]
|
||||
|
|
@ -454,35 +501,48 @@ class Template:
|
|||
|
||||
if domainid:
|
||||
cmd.domainid = domainid
|
||||
|
||||
return Template(apiclient.registerTemplate(cmd)[0].__dict__)
|
||||
|
||||
# Register Template
|
||||
template = apiclient.registerTemplate(cmd)
|
||||
|
||||
if isinstance(template, list):
|
||||
return Template(template[0].__dict__)
|
||||
|
||||
@classmethod
|
||||
def create_from_snapshot(cls, apiclient, snapshot, services):
|
||||
def create_from_snapshot(cls, apiclient, snapshot, services, random_name=True):
|
||||
"""Create Template from snapshot"""
|
||||
#Create template from Virtual machine and Snapshot ID
|
||||
cmd = createTemplate.createTemplateCmd()
|
||||
cmd.displaytext = services["displaytext"]
|
||||
cmd.name = "-".join([services["name"], random_gen()])
|
||||
cmd.name = "-".join([
|
||||
services["name"],
|
||||
random_gen()
|
||||
]) if random_name else services["name"]
|
||||
cmd.ostypeid = services["ostypeid"]
|
||||
cmd.snapshotid = snapshot.id
|
||||
return Template(apiclient.createTemplate(cmd).__dict__)
|
||||
|
||||
def delete(self, apiclient):
|
||||
"""Delete Template"""
|
||||
|
||||
cmd = deleteTemplate.deleteTemplateCmd()
|
||||
cmd.id = self.id
|
||||
apiclient.deleteTemplate(cmd)
|
||||
|
||||
def download(self, apiclient):
|
||||
def download(self, apiclient, timeout=5):
|
||||
"""Download Template"""
|
||||
#Sleep to ensure template is in proper state before download
|
||||
time.sleep(30)
|
||||
|
||||
while True:
|
||||
template_response = list_templates(
|
||||
template_response = Template.list(
|
||||
apiclient,
|
||||
id=self.id,
|
||||
zoneid=self.zoneid,
|
||||
templatefilter='self'
|
||||
)
|
||||
if isinstance(template_response, list):
|
||||
|
||||
template = template_response[0]
|
||||
# If template is ready,
|
||||
# template.status = Download Complete
|
||||
|
|
@ -491,11 +551,22 @@ class Template:
|
|||
|
||||
if template.status == 'Download Complete' :
|
||||
break
|
||||
elif 'Downloaded' not in template.status.split():
|
||||
raise Exception
|
||||
|
||||
elif 'Downloaded' not in template.status.split() or \
|
||||
'Installing' not in template.status.split():
|
||||
raise Exception("ErrorInDownload")
|
||||
|
||||
elif 'Downloaded' in template.status.split():
|
||||
time.sleep(120)
|
||||
|
||||
time.sleep(10)
|
||||
|
||||
elif timeout == 0:
|
||||
break
|
||||
|
||||
else:
|
||||
time.sleep(10)
|
||||
timeout = timeout - 1
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def list(cls, apiclient, **kwargs):
|
||||
"""List all templates matching criteria"""
|
||||
|
|
@ -504,6 +575,7 @@ class Template:
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listTemplates(cmd))
|
||||
|
||||
|
||||
class Iso:
|
||||
"""Manage ISO life cycle"""
|
||||
|
||||
|
|
@ -520,6 +592,7 @@ class Iso:
|
|||
cmd.ostypeid = services["ostypeid"]
|
||||
cmd.url = services["url"]
|
||||
cmd.zoneid = services["zoneid"]
|
||||
|
||||
if "isextractable" in services:
|
||||
cmd.isextractable = services["isextractable"]
|
||||
if "isfeatured" in services:
|
||||
|
|
@ -531,8 +604,11 @@ class Iso:
|
|||
cmd.account = account
|
||||
if domainid:
|
||||
cmd.domainid = domainid
|
||||
|
||||
return Iso(apiclient.registerIso(cmd)[0].__dict__)
|
||||
# Register ISO
|
||||
iso = apiclient.registerIso(cmd)
|
||||
|
||||
if iso:
|
||||
return Iso(iso[0].__dict__)
|
||||
|
||||
def delete(self, apiclient):
|
||||
"""Delete an ISO"""
|
||||
|
|
@ -541,21 +617,31 @@ class Iso:
|
|||
apiclient.deleteIso(cmd)
|
||||
return
|
||||
|
||||
def download(self, apiclient):
|
||||
def download(self, apiclient, timeout=5):
|
||||
"""Download an ISO"""
|
||||
#Ensuring ISO is successfully downloaded
|
||||
while True:
|
||||
time.sleep(120)
|
||||
time.sleep(60)
|
||||
|
||||
cmd = listIsos.listIsosCmd()
|
||||
cmd.id = self.id
|
||||
response = apiclient.listIsos(cmd)[0]
|
||||
# Check whether download is in progress (for Ex: 10% Downloaded)
|
||||
# or ISO is 'Successfully Installed'
|
||||
if response.status == 'Successfully Installed':
|
||||
return
|
||||
elif 'Downloaded' not in response.status.split():
|
||||
raise Exception
|
||||
iso_response = apiclient.listIsos(cmd)
|
||||
|
||||
if isinstance(iso_response, list):
|
||||
response = iso_response[0]
|
||||
# Again initialize timeout to avoid listISO failure
|
||||
timeout = 5
|
||||
|
||||
# Check whether download is in progress(for Ex:10% Downloaded)
|
||||
# or ISO is 'Successfully Installed'
|
||||
if response.status == 'Successfully Installed':
|
||||
return
|
||||
elif 'Downloaded' not in response.status.split():
|
||||
raise Exception("ErrorInDownload")
|
||||
elif timeout == 0:
|
||||
raise Exception("TimeoutException")
|
||||
else:
|
||||
timeout = timeout - 1
|
||||
return
|
||||
|
||||
@classmethod
|
||||
|
|
@ -603,7 +689,7 @@ class PublicIPAddress:
|
|||
return(apiclient.listPublicIpAddresses(cmd))
|
||||
|
||||
class NATRule:
|
||||
"""Manage NAT rule"""
|
||||
"""Manage port forwarding rule"""
|
||||
|
||||
def __init__(self, items):
|
||||
self.__dict__.update(items)
|
||||
|
|
@ -622,6 +708,7 @@ class NATRule:
|
|||
cmd.publicport = services["publicport"]
|
||||
cmd.protocol = services["protocol"]
|
||||
cmd.virtualmachineid = virtual_machine.id
|
||||
|
||||
return NATRule(apiclient.createPortForwardingRule(cmd).__dict__)
|
||||
|
||||
def delete(self, apiclient):
|
||||
|
|
@ -638,6 +725,69 @@ class NATRule:
|
|||
cmd = listPortForwardingRules.listPortForwardingRulesCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listPortForwardingRules(cmd))
|
||||
|
||||
|
||||
class StaticNATRule:
|
||||
"""Manage Static NAT rule"""
|
||||
|
||||
def __init__(self, items):
|
||||
self.__dict__.update(items)
|
||||
|
||||
@classmethod
|
||||
def create(cls, apiclient, services, ipaddressid=None):
|
||||
"""Creates static ip forwarding rule"""
|
||||
|
||||
cmd = createIpForwardingRule.createIpForwardingRuleCmd()
|
||||
cmd.protocol = services["protocol"]
|
||||
cmd.startport = services["startport"]
|
||||
|
||||
if "endport" in services:
|
||||
cmd.endport = services["endport"]
|
||||
|
||||
if "cidrlist" in services:
|
||||
cmd.cidrlist = services["cidrlist"]
|
||||
|
||||
if ipaddressid:
|
||||
cmd.ipaddressid = ipaddressid
|
||||
elif "ipaddressid" in services:
|
||||
cmd.ipaddressid = services["ipaddressid"]
|
||||
|
||||
return StaticNATRule(apiclient.createIpForwardingRule(cmd).__dict__)
|
||||
|
||||
def delete(self, apiclient):
|
||||
"""Delete IP forwarding rule"""
|
||||
cmd = deleteIpForwardingRule.deleteIpForwardingRuleCmd()
|
||||
cmd.id = self.id
|
||||
apiclient.deleteIpForwardingRule(cmd)
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def list(cls, apiclient, **kwargs):
|
||||
"""List all IP forwarding rules matching criteria"""
|
||||
|
||||
cmd = listIpForwardingRules.listIpForwardingRulesCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listIpForwardingRules(cmd))
|
||||
|
||||
@classmethod
|
||||
def enable(cls, apiclient, ipaddressid, virtualmachineid):
|
||||
"""Enables Static NAT rule"""
|
||||
|
||||
cmd = enableStaticNat.enableStaticNatCmd()
|
||||
cmd.ipaddressid = ipaddressid
|
||||
cmd.virtualmachineid = virtualmachineid
|
||||
apiclient.enableStaticNat(cmd)
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def disable(cls, apiclient, ipaddressid, virtualmachineid):
|
||||
"""Disables Static NAT rule"""
|
||||
|
||||
cmd = disableStaticNat.disableStaticNatCmd()
|
||||
cmd.ipaddressid = ipaddressid
|
||||
apiclient.disableStaticNat(cmd)
|
||||
return
|
||||
|
||||
|
||||
class FireWallRule:
|
||||
"""Manage Firewall rule"""
|
||||
|
|
@ -747,6 +897,7 @@ class DiskOffering:
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listDiskOfferings(cmd))
|
||||
|
||||
|
||||
class SnapshotPolicy:
|
||||
"""Manage snapshot policies"""
|
||||
|
||||
|
|
@ -830,6 +981,7 @@ class LoadBalancerRule:
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listLoadBalancerRules(cmd))
|
||||
|
||||
|
||||
class Cluster:
|
||||
"""Manage Cluster life cycle"""
|
||||
|
||||
|
|
@ -837,13 +989,21 @@ class Cluster:
|
|||
self.__dict__.update(items)
|
||||
|
||||
@classmethod
|
||||
def create(cls, apiclient, services):
|
||||
def create(cls, apiclient, services, zoneid=None, podid=None):
|
||||
"""Create Cluster"""
|
||||
cmd = addCluster.addClusterCmd()
|
||||
cmd.clustertype = services["clustertype"]
|
||||
cmd.hypervisor = services["hypervisor"]
|
||||
cmd.zoneid = services["zoneid"]
|
||||
cmd.podid = services["podid"]
|
||||
|
||||
if zoneid:
|
||||
cmd.zoneid = zoneid
|
||||
else:
|
||||
cmd.zoneid = services["zoneid"]
|
||||
|
||||
if podid:
|
||||
cmd.podid = podid
|
||||
else:
|
||||
cmd.podid = services["podid"]
|
||||
|
||||
if "username" in services:
|
||||
cmd.username = services["username"]
|
||||
|
|
@ -871,6 +1031,7 @@ class Cluster:
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listClusters(cmd))
|
||||
|
||||
|
||||
class Host:
|
||||
"""Manage Host life cycle"""
|
||||
|
||||
|
|
@ -878,14 +1039,23 @@ class Host:
|
|||
self.__dict__.update(items)
|
||||
|
||||
@classmethod
|
||||
def create(cls, apiclient, cluster, services):
|
||||
"""Create Host"""
|
||||
def create(cls, apiclient, cluster, services, zoneid=None, podid=None):
|
||||
"""Create Host in cluster"""
|
||||
|
||||
cmd = addHost.addHostCmd()
|
||||
cmd.hypervisor = services["hypervisor"]
|
||||
cmd.url = services["url"]
|
||||
cmd.zoneid = services["zoneid"]
|
||||
cmd.clusterid = cluster.id
|
||||
cmd.podid = services["podid"]
|
||||
|
||||
if zoneid:
|
||||
cmd.zoneid = zoneid
|
||||
else:
|
||||
cmd.zoneid = services["zoneid"]
|
||||
|
||||
if podid:
|
||||
cmd.podid = podid
|
||||
else:
|
||||
cmd.podid = services["podid"]
|
||||
|
||||
if "clustertype" in services:
|
||||
cmd.clustertype = services["clustertype"]
|
||||
|
|
@ -893,8 +1063,12 @@ class Host:
|
|||
cmd.username = services["username"]
|
||||
if "password" in services:
|
||||
cmd.password = services["password"]
|
||||
|
||||
return Host(apiclient.addHost(cmd)[0].__dict__)
|
||||
|
||||
# Add host
|
||||
host = apiclient.addHost(cmd)
|
||||
|
||||
if isinstance(host, list):
|
||||
return Host(host[0])
|
||||
|
||||
def delete(self, apiclient):
|
||||
"""Delete Host"""
|
||||
|
|
@ -902,7 +1076,7 @@ class Host:
|
|||
cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd()
|
||||
cmd.id = self.id
|
||||
apiclient.prepareHostForMaintenance(cmd)
|
||||
time.sleep(60)
|
||||
time.sleep(30)
|
||||
|
||||
cmd = deleteHost.deleteHostCmd()
|
||||
cmd.id = self.id
|
||||
|
|
@ -917,36 +1091,46 @@ class Host:
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listHosts(cmd))
|
||||
|
||||
|
||||
class StoragePool:
|
||||
"""Manage Storage pools"""
|
||||
"""Manage Storage pools (Primary Storage)"""
|
||||
|
||||
def __init__(self, items):
|
||||
self.__dict__.update(items)
|
||||
|
||||
@classmethod
|
||||
def create(cls, apiclient, services, clusterid=None):
|
||||
"""Create Storage pool"""
|
||||
def create(cls, apiclient, services, clusterid=None, zoneid=None, podid=None):
|
||||
"""Create Storage pool (Primary Storage)"""
|
||||
|
||||
cmd = createStoragePool.createStoragePoolCmd()
|
||||
cmd.name = services["name"]
|
||||
cmd.podid = services["podid"]
|
||||
|
||||
if podid:
|
||||
cmd.podid = podid
|
||||
else:
|
||||
cmd.podid = services["podid"]
|
||||
|
||||
cmd.url = services["url"]
|
||||
if clusterid:
|
||||
cmd.clusterid = clusterid
|
||||
elif "clusterid" in services:
|
||||
cmd.clusterid = services["clusterid"]
|
||||
cmd.zoneid = services["zoneid"]
|
||||
|
||||
if zoneid:
|
||||
cmd.zoneid = zoneid
|
||||
else:
|
||||
cmd.zoneid = services["zoneid"]
|
||||
|
||||
return StoragePool(apiclient.createStoragePool(cmd).__dict__)
|
||||
|
||||
def delete(self, apiclient):
|
||||
"""Delete Storage pool"""
|
||||
"""Delete Storage pool (Primary Storage)"""
|
||||
|
||||
# Storage pool must be in maintenance mode before deletion
|
||||
cmd = enableStorageMaintenance.enableStorageMaintenanceCmd()
|
||||
cmd.id = self.id
|
||||
apiclient.enableStorageMaintenance(cmd)
|
||||
time.sleep(60)
|
||||
time.sleep(30)
|
||||
cmd = deleteStoragePool.deleteStoragePoolCmd()
|
||||
cmd.id = self.id
|
||||
apiclient.deleteStoragePool(cmd)
|
||||
|
|
@ -960,6 +1144,7 @@ class StoragePool:
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listStoragePools(cmd))
|
||||
|
||||
|
||||
class Network:
|
||||
"""Manage Network pools"""
|
||||
|
||||
|
|
@ -996,15 +1181,16 @@ class Network:
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listNetworks(cmd))
|
||||
|
||||
|
||||
class Vpn:
|
||||
"""Manage Network pools"""
|
||||
"""Manage VPN life cycle"""
|
||||
|
||||
def __init__(self, items):
|
||||
self.__dict__.update(items)
|
||||
|
||||
@classmethod
|
||||
def create(cls, apiclient, publicipid, account=None, domainid=None):
|
||||
"""Create VPN for Public IP"""
|
||||
"""Create VPN for Public IP address"""
|
||||
cmd = createRemoteAccessVpn.createRemoteAccessVpnCmd()
|
||||
cmd.publicipid = publicipid
|
||||
if account:
|
||||
|
|
@ -1015,7 +1201,7 @@ class Vpn:
|
|||
return Vpn(apiclient.createRemoteAccessVpn(cmd).__dict__)
|
||||
|
||||
def delete(self, apiclient):
|
||||
"""Delete Account"""
|
||||
"""Delete remote VPN access"""
|
||||
|
||||
cmd = deleteRemoteAccessVpn.deleteRemoteAccessVpnCmd()
|
||||
cmd.publicipid = self.publicipid
|
||||
|
|
@ -1023,14 +1209,14 @@ class Vpn:
|
|||
|
||||
|
||||
class VpnUser:
|
||||
"""Manage Network pools"""
|
||||
"""Manage VPN user"""
|
||||
|
||||
def __init__(self, items):
|
||||
self.__dict__.update(items)
|
||||
|
||||
@classmethod
|
||||
def create(cls, apiclient, username, password, account=None, domainid=None):
|
||||
"""Create VPN for Public IP"""
|
||||
"""Create VPN user"""
|
||||
cmd = addVpnUser.addVpnUserCmd()
|
||||
cmd.username = username
|
||||
cmd.password = password
|
||||
|
|
@ -1043,7 +1229,7 @@ class VpnUser:
|
|||
return VpnUser(apiclient.addVpnUser(cmd).__dict__)
|
||||
|
||||
def delete(self, apiclient):
|
||||
"""Delete Account"""
|
||||
"""Remove VPN user"""
|
||||
|
||||
cmd = removeVpnUser.removeVpnUserCmd()
|
||||
cmd.username = self.username
|
||||
|
|
@ -1089,6 +1275,7 @@ class Zone:
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listZones(cmd))
|
||||
|
||||
|
||||
class Pod:
|
||||
"""Manage Pod"""
|
||||
|
||||
|
|
@ -1124,7 +1311,7 @@ class Pod:
|
|||
return apiclient.listPods(cmd)
|
||||
|
||||
|
||||
class PublicIp:
|
||||
class PublicIpRange:
|
||||
"""Manage VlanIpRange"""
|
||||
|
||||
def __init__(self, items):
|
||||
|
|
@ -1133,6 +1320,7 @@ class PublicIp:
|
|||
@classmethod
|
||||
def create(cls, apiclient, services):
|
||||
"""Create VlanIpRange"""
|
||||
|
||||
cmd = createVlanIpRange.createVlanIpRangeCmd()
|
||||
cmd.gateway = services["gateway"]
|
||||
cmd.netmask = services["netmask"]
|
||||
|
|
@ -1143,7 +1331,7 @@ class PublicIp:
|
|||
cmd.podid = services["podid"]
|
||||
cmd.vlan = services["vlan"]
|
||||
|
||||
return PublicIp(apiclient.createVlanIpRange(cmd).__dict__)
|
||||
return PublicIpRange(apiclient.createVlanIpRange(cmd).__dict__)
|
||||
|
||||
def delete(self, apiclient):
|
||||
"""Delete VlanIpRange"""
|
||||
|
|
@ -1160,6 +1348,7 @@ class PublicIp:
|
|||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listVlanIpRanges(cmd))
|
||||
|
||||
|
||||
class SecondaryStorage:
|
||||
"""Manage Secondary storage"""
|
||||
|
||||
|
|
@ -1168,18 +1357,83 @@ class SecondaryStorage:
|
|||
|
||||
@classmethod
|
||||
def create(cls, apiclient, services):
|
||||
"""Create Sec Storage"""
|
||||
"""Create Secondary Storage"""
|
||||
cmd = addSecondaryStorage.addSecondaryStorageCmd()
|
||||
|
||||
cmd.url = services["url"]
|
||||
if "zoneid" in services:
|
||||
cmd.zoneid = services["zoneid"]
|
||||
return Pod(apiclient.addSecondaryStorage(cmd).__dict__)
|
||||
return SecondaryStorage(apiclient.addSecondaryStorage(cmd).__dict__)
|
||||
|
||||
def delete(self, apiclient):
|
||||
"""Delete Sec. Storage"""
|
||||
"""Delete Secondary Storage"""
|
||||
|
||||
cmd = deleteHost.deleteHostCmd()
|
||||
cmd.id = self.id
|
||||
apiclient.deleteHost(cmd)
|
||||
|
||||
|
||||
class SecurityGroup:
|
||||
"""Manage Security Groups"""
|
||||
|
||||
def __init__(self, items):
|
||||
self.__dict__.update(items)
|
||||
|
||||
@classmethod
|
||||
def create(cls, apiclient, services, account=None, domainid=None,
|
||||
description=None):
|
||||
"""Create security group"""
|
||||
cmd = createSecurityGroup.createSecurityGroupCmd()
|
||||
|
||||
cmd.name = services["name"]
|
||||
if account:
|
||||
cmd.account = account
|
||||
if domainid:
|
||||
cmd.domainid=domainid
|
||||
if description:
|
||||
cmd.description=description
|
||||
|
||||
return SecurityGroup(apiclient.createSecurityGroup(cmd).__dict__)
|
||||
|
||||
def delete(self, apiclient):
|
||||
"""Delete Security Group"""
|
||||
|
||||
cmd = deleteSecurityGroup.deleteSecurityGroupCmd()
|
||||
cmd.id = self.id
|
||||
apiclient.deleteSecurityGroup(cmd)
|
||||
|
||||
def authorize(self, apiclient, services,
|
||||
account=None, domainid=None):
|
||||
"""Authorize Ingress Rule"""
|
||||
|
||||
cmd=authorizeSecurityGroupIngress.authorizeSecurityGroupIngressCmd()
|
||||
|
||||
if domainid:
|
||||
cmd.domainid = domainid
|
||||
if account:
|
||||
cmd.account = account
|
||||
cmd.usersecuritygrouplist[0].account=account
|
||||
else:
|
||||
cmd.usersecuritygrouplist[0].account='admin'
|
||||
|
||||
cmd.securitygroupid=self.id
|
||||
cmd.protocol=services["protocol"]
|
||||
cmd.startport = services["startport"]
|
||||
cmd.endport = services["endport"]
|
||||
cmd.usersecuritygrouplist[0].group=services["type"]
|
||||
return (apiclient.authorizeSecurityGroupIngress(cmd).__dict__)
|
||||
|
||||
def revoke(self, apiclient, id):
|
||||
"""Revoke ingress rule"""
|
||||
|
||||
cmd=revokeSecurityGroupIngress.revokeSecurityGroupIngressCmd()
|
||||
cmd.id=id
|
||||
apiclient.revokeSecurityGroupIngress()
|
||||
|
||||
@classmethod
|
||||
def list(cls, apiclient, **kwargs):
|
||||
"""Lists all security groups."""
|
||||
|
||||
cmd = listSecurityGroups.listSecurityGroupsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listSecurityGroups(cmd))
|
||||
|
|
@ -23,7 +23,13 @@ def get_zone(apiclient, services=None):
|
|||
if services:
|
||||
if "zoneid" in services:
|
||||
cmd.id = services["zoneid"]
|
||||
return apiclient.listZones(cmd)[0]
|
||||
|
||||
zones = apiclient.listZones(cmd)
|
||||
|
||||
if isinstance(zones, list):
|
||||
return zones[0]
|
||||
else:
|
||||
raise Exception("Failed to find specified zone.")
|
||||
|
||||
def get_pod(apiclient, zoneid, services=None):
|
||||
"Returns a default pod for specified zone"
|
||||
|
|
@ -34,8 +40,13 @@ def get_pod(apiclient, zoneid, services=None):
|
|||
if services:
|
||||
if "podid" in services:
|
||||
cmd.id = services["podid"]
|
||||
|
||||
return apiclient.listPods(cmd)[0]
|
||||
|
||||
pods = apiclient.listPods(cmd)
|
||||
|
||||
if isinstance(pods, list):
|
||||
return pods[0]
|
||||
else:
|
||||
raise Exception("Exception: Failed to find specified pod.")
|
||||
|
||||
def get_template(apiclient, zoneid, ostypeid=12, services=None):
|
||||
"Returns a template"
|
||||
|
|
@ -53,18 +64,25 @@ def get_template(apiclient, zoneid, ostypeid=12, services=None):
|
|||
for template in list_templates:
|
||||
if template.ostypeid == ostypeid:
|
||||
return template
|
||||
|
||||
raise Exception("Exception: Failed to find template with OSTypeID: %s" %
|
||||
ostypeid)
|
||||
return
|
||||
|
||||
def download_systemplates_sec_storage(server, services):
|
||||
"""Download System templates on sec storage"""
|
||||
|
||||
# Login to management server
|
||||
ssh = remoteSSHClient.remoteSSHClient(
|
||||
try:
|
||||
# Login to management server
|
||||
ssh = remoteSSHClient.remoteSSHClient(
|
||||
server["ipaddress"],
|
||||
server["port"],
|
||||
server["username"],
|
||||
server["password"]
|
||||
)
|
||||
print ssh
|
||||
except Exception as e:
|
||||
raise Exception("SSH access failted for server with IP address: %s" %
|
||||
server["ipaddess"])
|
||||
# Mount Secondary Storage on Management Server
|
||||
cmds = [
|
||||
"mkdir -p %s" % services["mnt_dir"],
|
||||
|
|
@ -93,14 +111,14 @@ def download_systemplates_sec_storage(server, services):
|
|||
if res.count("Successfully installed system VM template") == 1:
|
||||
return
|
||||
else:
|
||||
self.debug("Failed to download System Templates on Sec Storage")
|
||||
raise Exception("Failed to download System Templates on Sec Storage")
|
||||
return
|
||||
|
||||
def wait_for_ssvms(apiclient, zoneid, podid):
|
||||
"""After setup wait for SSVMs to come Up"""
|
||||
|
||||
time.sleep(180)
|
||||
timeout = 20
|
||||
time.sleep(30)
|
||||
timeout = 40
|
||||
while True:
|
||||
list_ssvm_response = list_ssvms(
|
||||
apiclient,
|
||||
|
|
@ -111,12 +129,12 @@ def wait_for_ssvms(apiclient, zoneid, podid):
|
|||
ssvm = list_ssvm_response[0]
|
||||
if ssvm.state != 'Running':
|
||||
# Sleep to ensure SSVMs are Up and Running
|
||||
time.sleep(120)
|
||||
time.sleep(30)
|
||||
timeout = timeout - 1
|
||||
elif ssvm.state == 'Running':
|
||||
break
|
||||
elif timeout == 0:
|
||||
self.debug("SSVMs failed to start")
|
||||
raise Exception("SSVM failled to come up")
|
||||
break
|
||||
|
||||
timeout = 20
|
||||
|
|
@ -130,18 +148,18 @@ def wait_for_ssvms(apiclient, zoneid, podid):
|
|||
cpvm = list_ssvm_response[0]
|
||||
if cpvm.state != 'Running':
|
||||
# Sleep to ensure SSVMs are Up and Running
|
||||
time.sleep(120)
|
||||
time.sleep(30)
|
||||
timeout = timeout - 1
|
||||
elif cpvm.state == 'Running':
|
||||
break
|
||||
elif timeout == 0:
|
||||
self.debug("SSVMs failed to start")
|
||||
raise Exception("SSVM failled to come up")
|
||||
break
|
||||
return
|
||||
|
||||
def download_builtin_templates(apiclient, zoneid, hypervisor, host, linklocalip):
|
||||
"""After setup wait till builtin templates are downloaded"""
|
||||
|
||||
|
||||
# Change IPTABLES Rules
|
||||
result = get_process_status(
|
||||
host["ipaddress"],
|
||||
|
|
@ -159,7 +177,10 @@ def download_builtin_templates(apiclient, zoneid, hypervisor, host, linklocalip)
|
|||
zoneid=zoneid,
|
||||
templatefilter='self'
|
||||
)
|
||||
|
||||
|
||||
if not isinstance(list_template_response, list):
|
||||
raise Exception("Failed to download BUILTIN templates")
|
||||
|
||||
# Ensure all BUILTIN templates are downloaded
|
||||
templateid = None
|
||||
for template in list_template_response:
|
||||
|
|
@ -168,7 +189,7 @@ def download_builtin_templates(apiclient, zoneid, hypervisor, host, linklocalip)
|
|||
|
||||
# Sleep to ensure that template is in downloading state after adding
|
||||
# Sec storage
|
||||
time.sleep(120)
|
||||
time.sleep(30)
|
||||
while True:
|
||||
template_response = list_templates(
|
||||
apiclient,
|
||||
|
|
@ -184,9 +205,9 @@ def download_builtin_templates(apiclient, zoneid, hypervisor, host, linklocalip)
|
|||
if template.status == 'Download Complete' :
|
||||
break
|
||||
elif 'Downloaded' not in template.status.split():
|
||||
raise Exception
|
||||
raise Exception("ErrorInDownload")
|
||||
elif 'Downloaded' in template.status.split():
|
||||
time.sleep(120)
|
||||
time.sleep(30)
|
||||
return
|
||||
|
||||
def update_resource_limit(apiclient, resourcetype, account=None, domainid=None,
|
||||
|
|
@ -393,4 +414,4 @@ def list_usage_records(apiclient, **kwargs):
|
|||
|
||||
cmd = listUsageRecords.listUsageRecordsCmd()
|
||||
[setattr(cmd, k, v) for k, v in kwargs.items()]
|
||||
return(apiclient.listUsageRecords(cmd))
|
||||
return(apiclient.listUsageRecords(cmd))
|
||||
|
|
@ -40,7 +40,7 @@ def is_server_ssh_ready(ipaddress, port, username, password, retries=50):
|
|||
if loop_cnt == 0:
|
||||
raise e
|
||||
loop_cnt = loop_cnt - 1
|
||||
time.sleep(60)
|
||||
time.sleep(30)
|
||||
else:
|
||||
return ssh
|
||||
|
||||
|
|
@ -90,10 +90,12 @@ def get_process_status(hostip, port, username, password, linklocalip, process):
|
|||
# Ensure the SSH login is successful
|
||||
while True:
|
||||
res = ssh.execute(ssh_command)
|
||||
|
||||
if res[0] != "Host key verification failed.":
|
||||
break
|
||||
elif timeout == 0:
|
||||
break
|
||||
|
||||
time.sleep(5)
|
||||
timeout = timeout - 1
|
||||
return res
|
||||
return res
|
||||
Loading…
Reference in New Issue