From 980be4dfc9a325b8a7fafca7894611ca415675a7 Mon Sep 17 00:00:00 2001 From: Jayapal Date: Thu, 23 Feb 2017 11:10:47 +0530 Subject: [PATCH 01/59] CLOUDSTACK-9757: Fixed issue in traffic from additional public subnet (cherry picked from commit baac747089ef48ea6627a6aacf27156222862352) Signed-off-by: Rohit Yadav --- .../network/router/CommandSetupHelper.java | 27 ++++++++++++++++++- .../config/opt/cloud/bin/cs/CsAddress.py | 2 +- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/server/src/com/cloud/network/router/CommandSetupHelper.java b/server/src/com/cloud/network/router/CommandSetupHelper.java index 3e4318a3269..13c80961e3f 100644 --- a/server/src/com/cloud/network/router/CommandSetupHelper.java +++ b/server/src/com/cloud/network/router/CommandSetupHelper.java @@ -668,19 +668,38 @@ public class CommandSetupHelper { for (final Map.Entry> vlanAndIp : vlanIpMap.entrySet()) { final List ipAddrList = vlanAndIp.getValue(); + // Source nat ip address should always be sent first + Collections.sort(ipAddrList, new Comparator() { + @Override + public int compare(final PublicIpAddress o1, final PublicIpAddress o2) { + final boolean s1 = o1.isSourceNat(); + final boolean s2 = o2.isSourceNat(); + return s1 ^ s2 ? s1 ^ true ? 1 : -1 : 0; + } + }); + + // Get network rate - required for IpAssoc final Integer networkRate = _networkModel.getNetworkRate(ipAddrList.get(0).getNetworkId(), router.getId()); final Network network = _networkModel.getNetwork(ipAddrList.get(0).getNetworkId()); final IpAddressTO[] ipsToSend = new IpAddressTO[ipAddrList.size()]; int i = 0; + boolean firstIP = true; for (final PublicIpAddress ipAddr : ipAddrList) { final boolean add = ipAddr.getState() == IpAddress.State.Releasing ? false : true; + boolean sourceNat = ipAddr.isSourceNat(); + /* enable sourceNAT for the first ip of the public interface + * For additional public subnet source nat rule needs to be added for vm to reach ips in that subnet + */ + if (firstIP) { + sourceNat = true; + } final String macAddress = vlanMacAddress.get(BroadcastDomainType.getValue(BroadcastDomainType.fromString(ipAddr.getVlanTag()))); - final IpAddressTO ip = new IpAddressTO(ipAddr.getAccountId(), ipAddr.getAddress().addr(), add, false, ipAddr.isSourceNat(), BroadcastDomainType.fromString(ipAddr.getVlanTag()).toString(), ipAddr.getGateway(), + final IpAddressTO ip = new IpAddressTO(ipAddr.getAccountId(), ipAddr.getAddress().addr(), add, firstIP, sourceNat, BroadcastDomainType.fromString(ipAddr.getVlanTag()).toString(), ipAddr.getGateway(), ipAddr.getNetmask(), macAddress, networkRate, ipAddr.isOneToOneNat()); ip.setTrafficType(network.getTrafficType()); @@ -690,6 +709,12 @@ public class CommandSetupHelper { sourceNatIpAdd = new Pair(ip, ipAddr.getNetworkId()); addSourceNat = add; } + + //for additional public subnet on delete it is not sure which ip is set to first ip. So on delete we + //want to set sourcenat to true for all ips to delete source nat rules. + if (!firstIP || add) { + firstIP = false; + } } final IpAssocVpcCommand cmd = new IpAssocVpcCommand(ipsToSend); cmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, _routerControlHelper.getRouterControlIp(router.getId())); diff --git a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py index 4eac3483a97..e0eb350ead0 100755 --- a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py +++ b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py @@ -571,7 +571,7 @@ class CsIP: if self.get_type() in ["guest"] and not cmdline.is_redundant(): pwdsvc = CsPasswdSvc(self.address['public_ip']).start() - if self.get_type() == "public" and self.config.is_vpc(): + if self.get_type() == "public" and self.config.is_vpc() and method == "add": if self.address["source_nat"]: vpccidr = cmdline.get_vpccidr() self.fw.append( From 033631faacca932126a06b05dfcce52af8684dbe Mon Sep 17 00:00:00 2001 From: Jayapal Date: Mon, 20 Feb 2017 18:29:14 +0530 Subject: [PATCH 02/59] CLOUDSTACK-8871: fixed issue with the xenserver 6.2 ipset nethash (cherry picked from commit 175c8d83b8a628566a4c443db0de587874718c8c) Signed-off-by: Rohit Yadav --- scripts/vm/hypervisor/xenserver/vmops | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/scripts/vm/hypervisor/xenserver/vmops b/scripts/vm/hypervisor/xenserver/vmops index 0dfa6da038e..46aeffa7d70 100755 --- a/scripts/vm/hypervisor/xenserver/vmops +++ b/scripts/vm/hypervisor/xenserver/vmops @@ -356,10 +356,21 @@ def allow_egress_traffic(session): return 'false' return 'true' +def getIpsetType(): + try: + out = util.pread2(['/bin/bash', '-c', "ipset -v | awk '{print $5}'"]) + out.replace(".","") + if int(out) < 6: + return 'iptreemap' + else: + return 'nethash' + except: + return 'iptreemap' def ipset(ipsetname, proto, start, end, cidrs): + type = getIpsetType() try: - util.pread2(['ipset', '-N', ipsetname, 'nethash']) + util.pread2(['ipset', '-N', ipsetname, type]) except: logging.debug("ipset chain already exists: " + ipsetname) @@ -367,7 +378,7 @@ def ipset(ipsetname, proto, start, end, cidrs): ipsettmp = ''.join(''.join(ipsetname.split('-')).split('_')) + str(int(time.time()) % 1000) try: - util.pread2(['ipset', '-N', ipsettmp, 'nethash']) + util.pread2(['ipset', '-N', ipsettmp, type]) except: logging.debug("Failed to create temp ipset, reusing old name= " + ipsettmp) try: @@ -396,7 +407,7 @@ def ipset(ipsetname, proto, start, end, cidrs): # the old ipset entry could be of iphash type, try to delete and recreate try: util.pread2(['ipset', '-X', ipsetname]) - util.pread2(['ipset', '-N', ipsetname, 'nethash']) + util.pread2(['ipset', '-N', ipsetname, type]) util.pread2(['ipset', '-W', ipsettmp, ipsetname]) except: logging.debug("Failed to swap ipset " + ipsetname) @@ -672,14 +683,15 @@ def default_network_rules_systemvm(session, args): @echo def create_ipset_forvm (ipsetname): result = True + type = getIpsetType() try: logging.debug("Creating ipset chain .... " + ipsetname) util.pread2(['ipset', '-F', ipsetname]) util.pread2(['ipset', '-X', ipsetname]) - util.pread2(['ipset', '-N', ipsetname, 'iphash']) + util.pread2(['ipset', '-N', ipsetname, type]) except: logging.debug("ipset chain not exists creating.... " + ipsetname) - util.pread2(['ipset', '-N', ipsetname, 'iphash']) + util.pread2(['ipset', '-N', ipsetname, type]) return result @@ -1252,9 +1264,10 @@ def inflate_rules (zipped): @echo def cache_ipset_keyword(): + type = getIpsetType() tmpname = 'ipsetqzvxtmp' try: - util.pread2(['/bin/bash', '-c', 'ipset -N ' + tmpname + ' iphash']) + util.pread2(['/bin/bash', '-c', 'ipset -N ' + tmpname + type]) except: util.pread2(['/bin/bash', '-c', 'ipset -F ' + tmpname]) From 6471978054f512f2484c4fb6734ad955d840b68a Mon Sep 17 00:00:00 2001 From: Jayapal Date: Mon, 6 Mar 2017 19:01:15 +0530 Subject: [PATCH 03/59] CLOUDSTACK-9821: Fixed issue in deploying vm in basic zone (cherry picked from commit 43a991d2ebc613957c9909e0dca80d991fe20e16) Signed-off-by: Rohit Yadav --- scripts/vm/hypervisor/xenserver/vmops | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/vm/hypervisor/xenserver/vmops b/scripts/vm/hypervisor/xenserver/vmops index 46aeffa7d70..93d2af21d87 100755 --- a/scripts/vm/hypervisor/xenserver/vmops +++ b/scripts/vm/hypervisor/xenserver/vmops @@ -1267,19 +1267,19 @@ def cache_ipset_keyword(): type = getIpsetType() tmpname = 'ipsetqzvxtmp' try: - util.pread2(['/bin/bash', '-c', 'ipset -N ' + tmpname + type]) + util.pread2(['ipset', '-N', tmpname, type]) except: - util.pread2(['/bin/bash', '-c', 'ipset -F ' + tmpname]) + util.pread2(['ipset', '-F', tmpname]) try: - util.pread2(['/bin/bash', '-c', 'iptables -A INPUT -m set --set ' + tmpname + ' src' + ' -j ACCEPT']) - util.pread2(['/bin/bash', '-c', 'iptables -D INPUT -m set --set ' + tmpname + ' src' + ' -j ACCEPT']) + util.pread2(['iptables -A INPUT -m set --set ' + tmpname + ' src' + ' -j ACCEPT']) + util.pread2(['iptables -D INPUT -m set --set ' + tmpname + ' src' + ' -j ACCEPT']) keyword = 'set' except: keyword = 'match-set' try: - util.pread2(['/bin/bash', '-c', 'ipset -X ' + tmpname]) + util.pread2(['ipset', '-X', tmpname]) except: pass From ca60ebb93d78993e06374271d124c77f8a4dc511 Mon Sep 17 00:00:00 2001 From: nvazquez Date: Wed, 22 Feb 2017 15:54:02 -0300 Subject: [PATCH 04/59] Fix for test_snapshots.py using nfs2 instead of nfs template (cherry picked from commit b792df163a1953772aa9a4c293b87cba5826a953) Signed-off-by: Rohit Yadav --- test/integration/smoke/test_snapshots.py | 2 +- tools/marvin/marvin/config/test_data.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/test/integration/smoke/test_snapshots.py b/test/integration/smoke/test_snapshots.py index a6230bc4940..e212e3e9783 100644 --- a/test/integration/smoke/test_snapshots.py +++ b/test/integration/smoke/test_snapshots.py @@ -275,7 +275,7 @@ class TestSnapshotRootDisk(cloudstackTestCase): assert isinstance(clusters,list) and len(clusters)>0 storage = StoragePool.create(self.apiclient, - self.services["nfs"], + self.services["nfs2"], clusterid=clusters[0].id, zoneid=self.zone.id, podid=self.pod.id diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py index c9bb7c603de..13b7d9f7a8f 100644 --- a/tools/marvin/marvin/config/test_data.py +++ b/tools/marvin/marvin/config/test_data.py @@ -938,6 +938,10 @@ test_data = { "url": "nfs://nfs/export/automation/1/testprimary", "name": "Primary XEN" }, + "nfs2": { + "url": "nfs://nfs/export/automation/1/testprimary2", + "name": "Primary XEN 2" + }, "iscsi": { "url": "iscsi://192.168.100.21/iqn.2012-01.localdomain.clo-cstack-cos6:iser/1", From 823d1971a5e3106349de1cf75d21f7a2a23c1de7 Mon Sep 17 00:00:00 2001 From: nvazquez Date: Wed, 1 Mar 2017 12:17:59 -0300 Subject: [PATCH 05/59] Fix for test failure (cherry picked from commit c66df6e11f766844c4566543eb4b95945b2e7873) Signed-off-by: Rohit Yadav --- .../PrimaryDataStoreProviderManagerImpl.java | 2 +- test/integration/smoke/test_snapshots.py | 81 ++++++++++++++----- 2 files changed, 63 insertions(+), 20 deletions(-) diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/PrimaryDataStoreProviderManagerImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/PrimaryDataStoreProviderManagerImpl.java index 49bcb5b6981..b799c8be389 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/PrimaryDataStoreProviderManagerImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/PrimaryDataStoreProviderManagerImpl.java @@ -56,7 +56,7 @@ public class PrimaryDataStoreProviderManagerImpl implements PrimaryDataStoreProv @Override public PrimaryDataStore getPrimaryDataStore(long dataStoreId) { - StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStoreId); + StoragePoolVO dataStoreVO = dataStoreDao.findByIdIncludingRemoved(dataStoreId); if (dataStoreVO == null) { throw new CloudRuntimeException("Unable to locate datastore with id " + dataStoreId); } diff --git a/test/integration/smoke/test_snapshots.py b/test/integration/smoke/test_snapshots.py index e212e3e9783..63f3e2c8cf7 100644 --- a/test/integration/smoke/test_snapshots.py +++ b/test/integration/smoke/test_snapshots.py @@ -19,14 +19,16 @@ from marvin.codes import FAILED from nose.plugins.attrib import attr from marvin.cloudstackTestCase import cloudstackTestCase from marvin.lib.utils import (cleanup_resources, - is_snapshot_on_nfs) + is_snapshot_on_nfs, + validateList) from marvin.lib.base import (VirtualMachine, Account, Template, ServiceOffering, Snapshot, StoragePool, - Volume) + Volume, + DiskOffering) from marvin.lib.common import (get_domain, get_template, get_zone, @@ -36,6 +38,7 @@ from marvin.lib.common import (get_domain, list_storage_pools, list_clusters) from marvin.lib.decoratorGenerators import skipTestIf +from marvin.codes import PASS class Templates: @@ -134,6 +137,10 @@ class TestSnapshotRootDisk(cloudstackTestCase): cls.apiclient, cls.services["service_offerings"]["tiny"] ) + cls.disk_offering = DiskOffering.create( + cls.apiclient, + cls.services["disk_offering"] + ) cls.virtual_machine = cls.virtual_machine_with_disk = \ VirtualMachine.create( cls.apiclient, @@ -149,6 +156,7 @@ class TestSnapshotRootDisk(cloudstackTestCase): cls._cleanup.append(cls.service_offering) cls._cleanup.append(cls.account) cls._cleanup.append(cls.template) + cls._cleanup.append(cls.disk_offering) return @classmethod @@ -267,7 +275,35 @@ class TestSnapshotRootDisk(cloudstackTestCase): """Test listing volume snapshots with removed data stores """ - # 1) Create new Primary Storage + # 1 - Create new volume -> V + # 2 - Create new Primary Storage -> PS + # 3 - Attach and detach volume V from vm + # 4 - Migrate volume V to PS + # 5 - Take volume V snapshot -> S + # 6 - List snapshot and verify it gets properly listed although Primary Storage was removed + + # Create new volume + vol = Volume.create( + self.apiclient, + self.services["volume"], + diskofferingid=self.disk_offering.id, + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid, + ) + self.cleanup.append(vol) + self.assertIsNotNone(vol, "Failed to create volume") + vol_res = Volume.list( + self.apiclient, + id=vol.id + ) + self.assertEqual( + validateList(vol_res)[0], + PASS, + "Invalid response returned for list volumes") + vol_uuid = vol_res[0].id + + # Create new Primary Storage clusters = list_clusters( self.apiclient, zoneid=self.zone.id @@ -280,6 +316,9 @@ class TestSnapshotRootDisk(cloudstackTestCase): zoneid=self.zone.id, podid=self.pod.id ) + self.cleanup.append(self.virtual_machine_with_disk) + self.cleanup.append(storage) + self.assertEqual( storage.state, 'Up', @@ -314,22 +353,26 @@ class TestSnapshotRootDisk(cloudstackTestCase): "Check storage pool type " ) - # 2) Migrate VM ROOT volume to new Primary Storage - volumes = list_volumes( + # Attach created volume to vm, then detach it to be able to migrate it + self.virtual_machine_with_disk.stop(self.apiclient) + self.virtual_machine_with_disk.attach_volume( self.apiclient, - virtualmachineid=self.virtual_machine_with_disk.id, - type='ROOT', - listall=True + vol ) + self.virtual_machine_with_disk.detach_volume( + self.apiclient, + vol + ) + + # Migrate volume to new Primary Storage Volume.migrate(self.apiclient, - storageid=storage.id, - volumeid=volumes[0].id, - livemigrate="true" - ) + storageid=storage.id, + volumeid=vol.id + ) volume_response = list_volumes( self.apiclient, - id=volumes[0].id, + id=vol.id, ) self.assertNotEqual( len(volume_response), @@ -342,22 +385,21 @@ class TestSnapshotRootDisk(cloudstackTestCase): storage.id, "Check volume storage id" ) - self.cleanup.append(self.virtual_machine_with_disk) - self.cleanup.append(storage) - # 3) Take snapshot of VM ROOT volume + # Take snapshot of new volume snapshot = Snapshot.create( self.apiclient, volume_migrated.id, account=self.account.name, domainid=self.account.domainid ) + self.debug("Snapshot created: ID - %s" % snapshot.id) - # 4) Delete VM and created Primery Storage + # Delete volume, VM and created Primary Storage cleanup_resources(self.apiclient, self.cleanup) - # 5) List snapshot and verify it gets properly listed although Primary Storage was removed + # List snapshot and verify it gets properly listed although Primary Storage was removed snapshot_response = Snapshot.list( self.apiclient, id=snapshot.id @@ -373,10 +415,11 @@ class TestSnapshotRootDisk(cloudstackTestCase): "Check snapshot id" ) - # 6) Delete snapshot and verify it gets properly deleted (should not be listed) + # Delete snapshot and verify it gets properly deleted (should not be listed) self.cleanup = [snapshot] cleanup_resources(self.apiclient, self.cleanup) + self.cleanup = [] snapshot_response_2 = Snapshot.list( self.apiclient, id=snapshot.id From 0bc56787d8d3ebec619d4063e44cbb0e7edf59d2 Mon Sep 17 00:00:00 2001 From: Nitin Kumar Maharana Date: Wed, 8 Mar 2017 02:05:35 +0530 Subject: [PATCH 06/59] CLOUDSTACK-9611: Dedicating a Guest VLAN range to Project does not work. (cherry picked from commit 63f534f292c34d13e60b4ac94c85c1fb464225ed) Signed-off-by: Rohit Yadav --- .../network/DedicateGuestVlanRangeCmd.java | 8 +- ui/scripts/system.js | 191 +++++++++++------- 2 files changed, 121 insertions(+), 78 deletions(-) diff --git a/api/src/org/apache/cloudstack/api/command/admin/network/DedicateGuestVlanRangeCmd.java b/api/src/org/apache/cloudstack/api/command/admin/network/DedicateGuestVlanRangeCmd.java index 2890c27be60..ba7b4b99c25 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/network/DedicateGuestVlanRangeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/network/DedicateGuestVlanRangeCmd.java @@ -50,17 +50,13 @@ public class DedicateGuestVlanRangeCmd extends BaseCmd { @Parameter(name = ApiConstants.VLAN_RANGE, type = CommandType.STRING, required = true, description = "guest vlan range to be dedicated") private String vlan; - @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, required = true, description = "account who will own the VLAN") + @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "account who will own the VLAN") private String accountName; @Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class, description = "project who will own the VLAN") private Long projectId; - @Parameter(name = ApiConstants.DOMAIN_ID, - type = CommandType.UUID, - entityType = DomainResponse.class, - required = true, - description = "domain ID of the account owning a VLAN") + @Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, description = "domain ID of the account owning a VLAN") private Long domainId; @Parameter(name = ApiConstants.PHYSICAL_NETWORK_ID, diff --git a/ui/scripts/system.js b/ui/scripts/system.js index b7bdd7ba8c0..02734bf2df3 100644 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -1793,27 +1793,57 @@ fields: { vlanrange: { label: 'label.vlan.vni.range', - /* select: function(args) { - var items = []; - if(args.context.physicalNetworks[0].vlan != null && args.context.physicalNetworks[0].vlan.length > 0) { - var vlanranges = args.context.physicalNetworks[0].vlan.split(";"); - for(var i = 0; i < vlanranges.length ; i++) { - items.push({id: vlanranges[i], description: vlanranges[i]}); - } - } - args.response.success({data: items}); - },*/ validation: { required: true } }, - account: { - label: 'label.account', - validation: { - required: true + scope: { + label: 'label.scope', + docID: 'helpGuestNetworkZoneScope', + select: function(args) { + var array1 = []; + + array1.push({ + id: 'account-specific', + description: 'label.account' + }); + array1.push({ + id: 'project-specific', + description: 'label.project' + }); + + args.response.success({ + data: array1 + }); + + args.$select.change(function() { + var $form = $(this).closest('form'); + + if ($(this).val() == "account-specific") { + $form.find('.form-item[rel=domainId]').css('display', 'inline-block'); + $form.find('.form-item[rel=account]').css('display', 'inline-block'); + $form.find('.form-item[rel=projectId]').hide(); + } else if ($(this).val() == "project-specific") { + $form.find('.form-item[rel=domainId]').css('display', 'inline-block'); + $form.find('.form-item[rel=account]').hide(); + $form.find('.form-item[rel=projectId]').css('display', 'inline-block'); + } + + if (args.context.projects != null && args.context.projects.length > 0) { + $form.find('.form-item[rel=domainId]').hide(); + $form.find('.form-item[rel=account]').hide(); + $form.find('.form-item[rel=projectId]').hide(); + } + }); + }, + isHidden: function(args) { + if(args.context.projects != null && args.context.projects.length > 0) + return true; + else + return false; } }, - domainid: { + domainId: { label: 'label.domain', validation: { required: true @@ -1836,16 +1866,87 @@ } }); } + }, + account: { + label: 'label.account', + validation: { + required: true + }, + dependsOn: 'domainId', + select: function (args) { + $.ajax({ + url: createURL('listAccounts&domainid=' + args.domainId), + data: { + listAll: true + }, + success: function (json) { + args.response.success({ + data: $.map(json.listaccountsresponse.account, function (account) { + return { + id: account.name, + description: account.name + }; + }) + }); + } + }); + } + }, + projectId: { + label: 'label.project', + validation: { + required: true + }, + dependsOn: 'domainId', + select: function(args) { + var items = []; + $.ajax({ + url: createURL("listProjects&domainid=" + args.domainId), + dataType: "json", + async: false, + success: function(json) { + projectObjs = json.listprojectsresponse.project; + $(projectObjs).each(function() { + items.push({ + id: this.id, + description: this.name + }); + }); + } + }); + args.response.success({ + data: items + }); + } } } }, action: function (args) { var data = { physicalnetworkid: args.context.physicalNetworks[0].id, - vlanrange: args.data.vlanrange, - domainid: args.data.domainid, - account: args.data.account + vlanrange: args.data.vlanrange }; + + var $form = args.$form; + + if (($form.find('.form-item[rel=domainId]').css("display") != "none") && (args.data.domainId != null && args.data.domainId.length > 0)) { + $.extend(data, { + domainid: args.data.domainId + }) + } + + if (($form.find('.form-item[rel=account]').css("display") != "none") && (args.data.account != null && args.data.account.length > 0)) { + $.extend(data, { + account: args.data.account + }) + } + + if (($form.find('.form-item[rel=projectId]').css("display") != "none") && (args.data.projectId != null && args.data.projectId.length > 0)) { + $.extend(data, { + projectid: args.data.projectId + }) + } + $.ajax({ url: createURL('dedicateGuestVlanRange'), data: data, @@ -18721,60 +18822,6 @@ ucsmanagerid: args.context.ucsManagers[0].id }, success: function (json) { - //for testing only (begin) - /* - json = { - "refreshucsbladesresponse": { - "count": 7, - "ucsblade": [ - { - "id": "6c6a2d2c-575e-41ac-9782-eee51b0b80f8", - "ucsmanagerid": "9a34c186-12fa-4bbc-af04-5f1a2bf7ae4a", - "bladedn": "sys/chassis-1/blade-5" - }, - { - "id": "d371d470-a51f-489c-aded-54a63dfd76c7", - "ucsmanagerid": "9a34c186-12fa-4bbc-af04-5f1a2bf7ae4a", - "bladedn": "sys/chassis-1/blade-6" - }, - { - "id": "c0f64591-4a80-4083-bb7b-576220b436a2", - "ucsmanagerid": "9a34c186-12fa-4bbc-af04-5f1a2bf7ae4a", - "bladedn": "sys/chassis-1/blade-7" - }, - { - "id": "74b9b69a-cb16-42f5-aad6-06391ebdd759", - "ucsmanagerid": "9a34c186-12fa-4bbc-af04-5f1a2bf7ae4a", - "bladedn": "sys/chassis-1/blade-1" - }, - { - "id": "713a5adb-0136-484f-9acb-d9203af497be", - "ucsmanagerid": "9a34c186-12fa-4bbc-af04-5f1a2bf7ae4a", - "bladedn": "sys/chassis-1/blade-2" - }, - { - "id": "da633578-21cb-4678-9eb4-981a53198b41", - "ucsmanagerid": "9a34c186-12fa-4bbc-af04-5f1a2bf7ae4a", - "bladedn": "sys/chassis-1/blade-4" - }, - { - "id": "3d491c6e-f0b6-40b0-bf6e-f89efdd73c30", - "ucsmanagerid": "9a34c186-12fa-4bbc-af04-5f1a2bf7ae4a", - "bladedn": "sys/chassis-1/blade-3" - } - ] - } - }; - */ - //for testing only (end) - - /* - var item = json.refreshucsbladesresponse.ucsblade[0]; - addExtraPropertiesToUcsBladeObject(item); - args.response.success({ - data: item - }); - */ $(window).trigger('cloudStack.fullRefresh'); } }); From e61815a25563c357a883b2a3cd70cf864beedaf3 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Sun, 19 Feb 2017 01:52:30 +0530 Subject: [PATCH 07/59] CLOUDSTACK-9794: Unable to attach more than 14 devices to a VM Updated hardcoded value with max data volumes limit from hypervisor capabilities. (cherry picked from commit 93f5b6e8a391ce8b09be484d029c54d48a2b88aa) Signed-off-by: Rohit Yadav --- .../hypervisor/kvm/resource/LibvirtVMDef.java | 31 +++++++++++++------ .../cloud/storage/VolumeApiServiceImpl.java | 28 ++++++++++------- 2 files changed, 38 insertions(+), 21 deletions(-) diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java index 6ce7d6c8f5e..7d44ae43bae 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java @@ -583,18 +583,36 @@ public class LibvirtVMDef { /* skip iso label */ private String getDevLabel(int devId, DiskBus bus) { + if (devId < 0) { + return ""; + } + if (devId == 2) { devId++; } - char suffix = (char)('a' + devId); if (bus == DiskBus.SCSI) { - return "sd" + suffix; + return "sd" + getDevLabelSuffix(devId); } else if (bus == DiskBus.VIRTIO) { - return "vd" + suffix; + return "vd" + getDevLabelSuffix(devId); } - return "hd" + suffix; + return "hd" + getDevLabelSuffix(devId); + } + private String getDevLabelSuffix(int deviceIndex) { + if (deviceIndex < 0) { + return ""; + } + + int base = 'z' - 'a' + 1; + String labelSuffix = ""; + do { + char suffix = (char)('a' + (deviceIndex % base)); + labelSuffix = suffix + labelSuffix; + deviceIndex = (deviceIndex / base) - 1; + } while (deviceIndex >= 0); + + return labelSuffix; } public void defFileBasedDisk(String filePath, int devId, DiskBus bus, DiskFmtType diskFmtType) { @@ -715,11 +733,6 @@ public class LibvirtVMDef { return _diskFmtType; } - public int getDiskSeq() { - char suffix = _diskLabel.charAt(_diskLabel.length() - 1); - return suffix - 'a'; - } - public void setBytesReadRate(Long bytesReadRate) { _bytesReadRate = bytesReadRate; } diff --git a/server/src/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/com/cloud/storage/VolumeApiServiceImpl.java index 2a9f02d7b5c..87b003d6bbe 100644 --- a/server/src/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/com/cloud/storage/VolumeApiServiceImpl.java @@ -1437,9 +1437,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic // that supported by hypervisor if (deviceId == null || deviceId.longValue() != 0) { List existingDataVolumes = _volsDao.findByInstanceAndType(vmId, Volume.Type.DATADISK); - int maxDataVolumesSupported = getMaxDataVolumesSupported(vm); - if (existingDataVolumes.size() >= maxDataVolumesSupported) { - throw new InvalidParameterValueException("The specified VM already has the maximum number of data disks (" + maxDataVolumesSupported + "). Please specify another VM."); + int maxAttachableDataVolumesSupported = getMaxDataVolumesSupported(vm) - 2; //IDs: 0 (ROOT) and 3 (CD-ROM) are reserved + if (existingDataVolumes.size() >= maxAttachableDataVolumesSupported) { + throw new InvalidParameterValueException("The specified VM already has the maximum number of data disks (" + maxAttachableDataVolumesSupported + ") attached. Please specify another VM."); } } @@ -2462,7 +2462,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic DataTO volTO = volFactory.getVolume(volumeToAttach.getId()).getTO(); - deviceId = getDeviceId(vm.getId(), deviceId); + deviceId = getDeviceId(vm, deviceId); DiskTO disk = storageMgr.getDiskWithThrottling(volTO, volumeToAttach.getVolumeType(), deviceId, volumeToAttach.getPath(), vm.getServiceOfferingId(), volumeToAttach.getDiskOfferingId()); @@ -2520,7 +2520,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic _volsDao.update(volumeToAttach.getId(), volumeToAttach); } } else { - deviceId = getDeviceId(vm.getId(), deviceId); + deviceId = getDeviceId(vm, deviceId); _volsDao.attachVolume(volumeToAttach.getId(), vm.getId(), deviceId); } @@ -2558,7 +2558,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic _hostDao.loadDetails(host); maxDataVolumesSupported = _hypervisorCapabilitiesDao.getMaxDataVolumesLimit(host.getHypervisorType(), host.getDetail("product_version")); } - if (maxDataVolumesSupported == null) { + if (maxDataVolumesSupported == null || maxDataVolumesSupported.intValue() <= 0) { maxDataVolumesSupported = 6; // 6 data disks by default if nothing // is specified in // 'hypervisor_capabilities' table @@ -2567,28 +2567,32 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic return maxDataVolumesSupported.intValue(); } - private Long getDeviceId(long vmId, Long deviceId) { + private Long getDeviceId(UserVmVO vm, Long deviceId) { // allocate deviceId - List vols = _volsDao.findByInstance(vmId); + int maxDeviceId = getMaxDataVolumesSupported(vm) - 1; + List vols = _volsDao.findByInstance(vm.getId()); if (deviceId != null) { - if (deviceId.longValue() > 15 || deviceId.longValue() == 3) { - throw new RuntimeException("deviceId should be 1,2,4-15"); + if (deviceId.longValue() <= 0 || deviceId.longValue() > maxDeviceId || deviceId.longValue() == 3) { + throw new RuntimeException("deviceId should be 1,2,4-" + maxDeviceId); } for (VolumeVO vol : vols) { if (vol.getDeviceId().equals(deviceId)) { - throw new RuntimeException("deviceId " + deviceId + " is used by vm" + vmId); + throw new RuntimeException("deviceId " + deviceId + " is used by vm " + vm.getId()); } } } else { // allocate deviceId here List devIds = new ArrayList(); - for (int i = 1; i < 15; i++) { + for (int i = 1; i <= maxDeviceId; i++) { devIds.add(String.valueOf(i)); } devIds.remove("3"); for (VolumeVO vol : vols) { devIds.remove(vol.getDeviceId().toString().trim()); } + if (devIds.isEmpty()) { + throw new RuntimeException("All device Ids are used by vm " + vm.getId()); + } deviceId = Long.parseLong(devIds.iterator().next()); } From af9c6b7bdcab5c0613f82931ca403f8d293781cd Mon Sep 17 00:00:00 2001 From: Sudharma Jain Date: Tue, 28 Mar 2017 17:57:28 +0530 Subject: [PATCH 08/59] CLOUDSTACK-9851 travis CI build failure after merge of PR#1953 (cherry picked from commit 7348f9412a20ecff2f7a9f26633013e06e5394dd) Signed-off-by: Rohit Yadav --- server/src/com/cloud/storage/VolumeApiServiceImpl.java | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/server/src/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/com/cloud/storage/VolumeApiServiceImpl.java index 87b003d6bbe..fcdc660b48e 100644 --- a/server/src/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/com/cloud/storage/VolumeApiServiceImpl.java @@ -1437,7 +1437,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic // that supported by hypervisor if (deviceId == null || deviceId.longValue() != 0) { List existingDataVolumes = _volsDao.findByInstanceAndType(vmId, Volume.Type.DATADISK); - int maxAttachableDataVolumesSupported = getMaxDataVolumesSupported(vm) - 2; //IDs: 0 (ROOT) and 3 (CD-ROM) are reserved + int maxAttachableDataVolumesSupported = getMaxDataVolumesSupported(vm); if (existingDataVolumes.size() >= maxAttachableDataVolumesSupported) { throw new InvalidParameterValueException("The specified VM already has the maximum number of data disks (" + maxAttachableDataVolumesSupported + ") attached. Please specify another VM."); } @@ -2569,11 +2569,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic private Long getDeviceId(UserVmVO vm, Long deviceId) { // allocate deviceId - int maxDeviceId = getMaxDataVolumesSupported(vm) - 1; + int maxDevices = getMaxDataVolumesSupported(vm) + 2; // add 2 to consider devices root volume and cdrom + int maxDeviceId = maxDevices - 1; List vols = _volsDao.findByInstance(vm.getId()); if (deviceId != null) { - if (deviceId.longValue() <= 0 || deviceId.longValue() > maxDeviceId || deviceId.longValue() == 3) { - throw new RuntimeException("deviceId should be 1,2,4-" + maxDeviceId); + if (deviceId.longValue() < 0 || deviceId.longValue() > maxDeviceId || deviceId.longValue() == 3) { + throw new RuntimeException("deviceId should be 0,1,2,4-" + maxDeviceId); } for (VolumeVO vol : vols) { if (vol.getDeviceId().equals(deviceId)) { From 0fbbbdb83ac541efa028558d436503c361c4a5a3 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Fri, 30 Dec 2016 03:01:12 +0530 Subject: [PATCH 09/59] CLOUDSTACK-9720: [VMware] template_spool_ref table is not getting updated with correct template physical size in template_size column. (cherry picked from commit 8676b202767d8e8d94e6891a23e0261b07afd2af) Signed-off-by: Rohit Yadav --- .../resource/VmwareStorageProcessor.java | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java index eb18e0b9a80..9c0b317b759 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -168,7 +168,7 @@ public class VmwareStorageProcessor implements StorageProcessor { return null; } - private VirtualMachineMO copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl, + private Pair copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl, String templatePathAtSecondaryStorage, String templateName, String templateUuid, boolean createSnapshot, Integer nfsVersion) throws Exception { s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + @@ -215,6 +215,12 @@ public class VmwareStorageProcessor implements StorageProcessor { throw new Exception(msg); } + OVAProcessor processor = new OVAProcessor(); + Map params = new HashMap(); + params.put(StorageLayer.InstanceConfigKey, _storage); + processor.configure("OVA Processor", params); + long virtualSize = processor.getTemplateVirtualSize(secondaryMountPoint + "/" + templatePathAtSecondaryStorage, templateName); + if (createSnapshot) { if (vmMo.createSnapshot("cloud.template.base", "Base snapshot", false, false)) { // the same template may be deployed with multiple copies at per-datastore per-host basis, @@ -232,7 +238,7 @@ public class VmwareStorageProcessor implements StorageProcessor { } } - return vmMo; + return new Pair(vmMo, new Long(virtualSize)); } @Override @@ -308,6 +314,7 @@ public class VmwareStorageProcessor implements StorageProcessor { DatacenterMO dcMo = new DatacenterMO(context, hyperHost.getHyperHostDatacenter()); VirtualMachineMO templateMo = VmwareHelper.pickOneVmOnRunningHost(dcMo.findVmByNameAndLabel(templateUuidName), true); DatastoreMO dsMo = null; + Pair vmInfo = null; if (templateMo == null) { if (s_logger.isInfoEnabled()) { @@ -329,9 +336,10 @@ public class VmwareStorageProcessor implements StorageProcessor { dsMo = new DatastoreMO(context, morDs); if (managed) { - VirtualMachineMO vmMo = copyTemplateFromSecondaryToPrimary(hyperHost, dsMo, secondaryStorageUrl, templateInfo.first(), templateInfo.second(), + vmInfo = copyTemplateFromSecondaryToPrimary(hyperHost, dsMo, secondaryStorageUrl, templateInfo.first(), templateInfo.second(), managedStoragePoolRootVolumeName, false, _nfsVersion); + VirtualMachineMO vmMo = vmInfo.first(); vmMo.unregisterVm(); String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, managedStoragePoolRootVolumeName, @@ -346,7 +354,7 @@ public class VmwareStorageProcessor implements StorageProcessor { dsMo.deleteFolder(folderToDelete, dcMo.getMor()); } else { - copyTemplateFromSecondaryToPrimary(hyperHost, dsMo, secondaryStorageUrl, templateInfo.first(), templateInfo.second(), + vmInfo = copyTemplateFromSecondaryToPrimary(hyperHost, dsMo, secondaryStorageUrl, templateInfo.first(), templateInfo.second(), templateUuidName, true, _nfsVersion); } } else { @@ -364,7 +372,7 @@ public class VmwareStorageProcessor implements StorageProcessor { else { newTemplate.setPath(templateUuidName); } - newTemplate.setSize(new Long(0)); // TODO: replace 0 with correct template physical_size. + newTemplate.setSize((vmInfo != null)? vmInfo.second() : new Long(0)); return new CopyCmdAnswer(newTemplate); } catch (Throwable e) { From 859b2150cdc834df87f11d88482304f1dc4c6ebd Mon Sep 17 00:00:00 2001 From: Nitesh Sarda Date: Tue, 14 Feb 2017 21:29:21 +0530 Subject: [PATCH 10/59] CLOUDSTACK-9784 : GPU detail not displayed in GPU tab of management server UI. (cherry picked from commit 36abc63b91cce72d1bee96eea630252c348f2450) Signed-off-by: Rohit Yadav --- ui/scripts/system.js | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/ui/scripts/system.js b/ui/scripts/system.js index 02734bf2df3..5264b22e586 100644 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -17042,9 +17042,16 @@ } }, dataProvider: function (args) { - var items = gpugroupObj.vgpu.sort(function(a, b) { - return a.maxvgpuperpgpu >= b.maxvgpuperpgpu; - }); + var items; + + if(typeof(gpugroupObj.vgpu) != "undefined") { + items = gpugroupObj.vgpu.sort(function(a, b) { + return a.maxvgpuperpgpu >= b.maxvgpuperpgpu; + }); + } + else { + items = gpugroupObj.vgpu; + } $(items).each(function () { this.maxresolution = (this.maxresolutionx == null || this.maxresolutionx == 0 || this.maxresolutiony == null || this.maxresolutiony == 0) From 73c30f1baa850d6a863849bca0ff84f7f04ad0b5 Mon Sep 17 00:00:00 2001 From: Stefania Date: Wed, 8 Feb 2017 16:01:34 +0000 Subject: [PATCH 11/59] CLOUDSTACK-9793: Faster ip in subnet check This change removes an unnecessary conversion from IPNetwork to list in one of the router scripts. This makes the router faster at processing static NAT rules, which can prevent timeouts when attaching or detaching IPs. (cherry picked from commit d5c5eb10f8a4fb80aa8b9182290948755cdd6026) Signed-off-by: Rohit Yadav --- systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py index e0eb350ead0..efe0d6f7e93 100755 --- a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py +++ b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py @@ -145,7 +145,7 @@ class CsInterface: def ip_in_subnet(self, ip): ipo = IPAddress(ip) net = IPNetwork("%s/%s" % (self.get_ip(), self.get_size())) - return ipo in list(net) + return ipo in net def get_gateway_cidr(self): return "%s/%s" % (self.get_gateway(), self.get_size()) From cdc9947d48a1490defa4d5715c50170a92754cf5 Mon Sep 17 00:00:00 2001 From: Priyank Parihar Date: Fri, 11 Dec 2015 11:05:46 +0530 Subject: [PATCH 12/59] CLOUDSTACK-8841: Storage XenMotion from XS 6.2 to XS 6.5 fails. (cherry picked from commit 69647b38ce96fb40131ad480a59f034701da5369) Signed-off-by: Rohit Yadav --- server/src/com/cloud/vm/UserVmManagerImpl.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index 29a6bbcc6d7..a9dde18a3dd 100644 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -4960,8 +4960,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } // Check if the source and destination hosts are of the same type and support storage motion. - if (!(srcHost.getHypervisorType().equals(destinationHost.getHypervisorType()) && srcHost.getHypervisorVersion().equals(destinationHost.getHypervisorVersion()))) { - throw new CloudRuntimeException("The source and destination hosts are not of the same type and version. " + "Source hypervisor type and version: " + if (!(srcHost.getHypervisorType().equals(destinationHost.getHypervisorType()))) { + throw new CloudRuntimeException("The source and destination hosts are not of the same type. " + "Source hypervisor type and version: " + srcHost.getHypervisorType().toString() + " " + srcHost.getHypervisorVersion() + ", Destination hypervisor type and version: " + destinationHost.getHypervisorType().toString() + " " + destinationHost.getHypervisorVersion()); } From c2060987830ab14ba92e2dd9fc66fb5a56c5fed3 Mon Sep 17 00:00:00 2001 From: Will Stevens Date: Wed, 22 Feb 2017 13:08:11 -0500 Subject: [PATCH 13/59] Fix public IPs not being removed from the VR when deprovisioned (cherry picked from commit 23f64a13a85b89518860eb3600ed7652ebbc89ba) Signed-off-by: Rohit Yadav --- systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py index efe0d6f7e93..43cd6396587 100755 --- a/systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py +++ b/systemvm/patches/debian/config/opt/cloud/bin/cs/CsAddress.py @@ -106,6 +106,10 @@ class CsAddress(CsDataBag): ip.setAddress(address) logging.info("Address found in DataBag ==> %s" % address) + if not address['add'] and not ip.configured(): + logging.info("Skipping %s as the add flag is set to %s " % (address['public_ip'], address['add'])) + continue + if ip.configured(): logging.info( "Address %s on device %s already configured", ip.ip(), dev) From 7c58c37f08cca55ab7ea5a37031a91e9d84b693a Mon Sep 17 00:00:00 2001 From: Wido den Hollander Date: Thu, 2 Feb 2017 17:07:50 +0100 Subject: [PATCH 14/59] ipv6: Set IPv6 CIDR and Gateway in 'nic' profile Without this information a NPE might be triggered when starting a VR, SSVM or CP as this information is read from the 'nics' table and causes a NPE. During deployment we should set the IPv6 Gateway and CIDR for the NIC object so that it is persisted to the database. Signed-off-by: Wido den Hollander (cherry picked from commit f661b631a13ba7f0c501eb5d1915eab3d097a37e) Signed-off-by: Rohit Yadav --- .../cloudstack/engine/orchestration/NetworkOrchestrator.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index 60c2694ec58..c4c344dddcc 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -1351,6 +1351,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra nic.setIPv4Address(profile.getIPv4Address()); nic.setAddressFormat(profile.getFormat()); nic.setIPv6Address(profile.getIPv6Address()); + nic.setIPv6Cidr(profile.getIPv6Cidr()); + nic.setIPv6Gateway(profile.getIPv6Gateway()); nic.setMacAddress(profile.getMacAddress()); nic.setIsolationUri(profile.getIsolationUri()); nic.setBroadcastUri(profile.getBroadCastUri()); From a9050fa42a94b690abc95d88316b44172c59c90e Mon Sep 17 00:00:00 2001 From: Bharat Kumar Date: Thu, 19 Feb 2015 10:40:16 +0530 Subject: [PATCH 15/59] CLOUDSTACK-8857 listProjects doesn't return tags vmstopped or vmrunning when their value is zero (cherry picked from commit c6e9d82595aa3461d72907ae75817eae4527ba63) Signed-off-by: Rohit Yadav --- server/src/com/cloud/api/query/dao/AccountJoinDaoImpl.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/com/cloud/api/query/dao/AccountJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/AccountJoinDaoImpl.java index b110478e2fc..a8d3b68833d 100644 --- a/server/src/com/cloud/api/query/dao/AccountJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/AccountJoinDaoImpl.java @@ -162,8 +162,8 @@ public class AccountJoinDaoImpl extends GenericDaoBase impl response.setTemplateAvailable(templateAvail); // Get stopped and running VMs - response.setVmStopped(account.getVmStopped()); - response.setVmRunning(account.getVmRunning()); + response.setVmStopped(account.getVmStopped()!=null ? account.getVmStopped() : 0); + response.setVmRunning(account.getVmRunning()!=null ? account.getVmRunning() : 0); //get resource limits for networks long networkLimit = ApiDBUtils.findCorrectResourceLimit(account.getNetworkLimit(), account.getId(), ResourceType.network); From 54bd8ee8805f2cc49a9bd2708f25c372f91b160c Mon Sep 17 00:00:00 2001 From: Jayapal Date: Tue, 14 Feb 2017 14:26:40 +0530 Subject: [PATCH 16/59] CLOUDSTACK-9724: Fixed missing additional public ip on tier network with cleanup (cherry picked from commit ee726af53bb33c5205d481abc3ab78bd2653f1b6) Signed-off-by: Rohit Yadav --- server/src/com/cloud/network/IpAddressManagerImpl.java | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/server/src/com/cloud/network/IpAddressManagerImpl.java b/server/src/com/cloud/network/IpAddressManagerImpl.java index e65adb60f7d..43a251ab0ec 100644 --- a/server/src/com/cloud/network/IpAddressManagerImpl.java +++ b/server/src/com/cloud/network/IpAddressManagerImpl.java @@ -460,6 +460,13 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } } else { if (activeCount != null && activeCount > 0) { + if (network.getVpcId() != null) { + // If there are more than one ip in the vpc tier network and services configured on it. + // restart network with cleanup case, on network reprogramming this needs to be return true + // because on the VR ips has removed. In VPC case restart tier network with cleanup will not + // reboot the VR. So ipassoc is needed. + return true; + } continue; } else if (addCount != null && addCount.longValue() == totalCount.longValue()) { s_logger.trace("All rules are in Add state, have to assiciate IP with the backend"); From 120ac33d7a4575dcf908a34f4697647ae929a393 Mon Sep 17 00:00:00 2001 From: Anshul Gangwar Date: Mon, 20 Apr 2015 13:13:48 +0530 Subject: [PATCH 17/59] CLOUDSTACK-9682: Block VM migration to a storage which is in maintainenece mode. If the destination pool is in maintenance mode do not allow a volume to be migrated to the storage pool. Fixed it for volume migration and vm migration with volume. (cherry picked from commit 8ef94819dabfc7b027d638473e9454e9f73ac49d) Signed-off-by: Rohit Yadav --- server/src/com/cloud/storage/VolumeApiServiceImpl.java | 3 +++ server/src/com/cloud/vm/UserVmManagerImpl.java | 3 +++ 2 files changed, 6 insertions(+) diff --git a/server/src/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/com/cloud/storage/VolumeApiServiceImpl.java index fcdc660b48e..eedb54d724a 100644 --- a/server/src/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/com/cloud/storage/VolumeApiServiceImpl.java @@ -1921,6 +1921,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic StoragePool destPool = (StoragePool)dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); if (destPool == null) { throw new InvalidParameterValueException("Failed to find the destination storage pool: " + storagePoolId); + } else if (destPool.isInMaintenance()) { + throw new InvalidParameterValueException("Cannot migrate volume " + vol + "to the destination storage pool " + destPool.getName() + + " as the storage pool is in maintenance mode."); } if (_volumeMgr.volumeOnSharedStoragePool(vol)) { diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index a9dde18a3dd..45f6ec25347 100644 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -5002,6 +5002,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new InvalidParameterValueException("There is no volume present with the given id " + entry.getKey()); } else if (pool == null) { throw new InvalidParameterValueException("There is no storage pool present with the given id " + entry.getValue()); + } else if (pool.isInMaintenance()) { + throw new InvalidParameterValueException("Cannot migrate volume " + volume + "to the destination storage pool " + pool.getName() + + " as the storage pool is in maintenance mode."); } else { // Verify the volume given belongs to the vm. if (!vmVolumes.contains(volume)) { From 1d25582ed603f4c4558a5a78fb02a2378cc4989c Mon Sep 17 00:00:00 2001 From: nvazquez Date: Fri, 20 Jan 2017 13:20:37 -0300 Subject: [PATCH 18/59] CLOUDSTACK-9752: [Vmware] Optimization of volume attachness to vm (cherry picked from commit 49dadc5505d85323b0864f50a2a8e36dd05805e5) Signed-off-by: Rohit Yadav --- .../storage/resource/VmwareStorageProcessor.java | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java index 9c0b317b759..30ad3ca60ae 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -1571,11 +1571,15 @@ public class VmwareStorageProcessor implements StorageProcessor { } synchronized (this) { - // s_logger.info("Delete file if exists in datastore to clear the way for creating the volume. file: " + volumeDatastorePath); - VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, volumeUuid.toString(), dcMo); - - vmMo.createDisk(volumeDatastorePath, (int)(volume.getSize() / (1024L * 1024L)), morDatastore, vmMo.getScsiDeviceControllerKey()); - vmMo.detachDisk(volumeDatastorePath, false); + try { + vmMo.createDisk(volumeDatastorePath, (int)(volume.getSize() / (1024L * 1024L)), morDatastore, vmMo.getScsiDeviceControllerKey()); + vmMo.detachDisk(volumeDatastorePath, false); + } + catch (Exception e) { + s_logger.error("Deleting file " + volumeDatastorePath + " due to error: " + e.getMessage()); + VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, volumeUuid.toString(), dcMo); + throw new CloudRuntimeException("Unable to create volume due to: " + e.getMessage()); + } } VolumeObjectTO newVol = new VolumeObjectTO(); From 5eb63975f70119479450906ed5355200b289c657 Mon Sep 17 00:00:00 2001 From: Jayapal Date: Thu, 29 Dec 2016 15:39:53 +0530 Subject: [PATCH 19/59] CLOUDSTACK-9715: Update somaxconn value to default value (cherry picked from commit 45c3d94d83cc70a80f493f2de62406853f06d6ac) Signed-off-by: Rohit Yadav --- systemvm/patches/debian/config/etc/sysctl.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/systemvm/patches/debian/config/etc/sysctl.conf b/systemvm/patches/debian/config/etc/sysctl.conf index 15648b35760..b8a8fe01bd9 100644 --- a/systemvm/patches/debian/config/etc/sysctl.conf +++ b/systemvm/patches/debian/config/etc/sysctl.conf @@ -51,7 +51,7 @@ net.ipv4.tcp_timestamps = 0 net.ipv4.netfilter.ip_conntrack_max = 1000000 net.ipv4.tcp_tw_reuse = 1 net.ipv4.tcp_max_tw_buckets = 1000000 -net.core.somaxconn = 1000000 +net.core.somaxconn = 65535 net.nf_conntrack_max = 1000000 net.netfilter.nf_conntrack_max = 1000000 From c14aa40a47312280f295dfcd72480729df42a606 Mon Sep 17 00:00:00 2001 From: Rajani Karuturi Date: Thu, 3 Nov 2016 10:58:22 +0530 Subject: [PATCH 20/59] CLOUDSTACK-8950 Hypervisor Parameter check is not performed for registerTemplate and getUploadParamsForTemplate API's Any string is allowed as hypervisor type from the api. HypervisorType.getType() tries to validate with the enums and if nothing matches, sets the type as None. Added a check to not allow None hypervisor type when registering. (cherry picked from commit cc06c5189a377fc6bc4b7e8bbed56e9058588863) Signed-off-by: Rohit Yadav --- .../cloud/template/TemplateAdapterBase.java | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/server/src/com/cloud/template/TemplateAdapterBase.java b/server/src/com/cloud/template/TemplateAdapterBase.java index c5d0c5b261f..530c562b28d 100644 --- a/server/src/com/cloud/template/TemplateAdapterBase.java +++ b/server/src/com/cloud/template/TemplateAdapterBase.java @@ -270,10 +270,15 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat zoneId = -1L; } - return prepare(false, CallContext.current().getCallingUserId(), cmd.getTemplateName(), cmd.getDisplayText(), cmd.getBits(), cmd.isPasswordEnabled(), - cmd.getRequiresHvm(), cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), cmd.getFormat(), cmd.getOsTypeId(), zoneId, - HypervisorType.getType(cmd.getHypervisor()), cmd.getChecksum(), true, cmd.getTemplateTag(), owner, cmd.getDetails(), cmd.isSshKeyEnabled(), null, - cmd.isDynamicallyScalable(), isRouting ? TemplateType.ROUTING : TemplateType.USER); + HypervisorType hypervisorType = HypervisorType.getType(cmd.getHypervisor()); + if(hypervisorType == HypervisorType.None) { + throw new InvalidParameterValueException("Hypervisor Type: " + cmd.getHypervisor() + " is invalid. Supported Hypervisor types are " + + EnumUtils.listValues(HypervisorType.values()).replace("None, ", "")); + } + + return prepare(false, CallContext.current().getCallingUserId(), cmd.getTemplateName(), cmd.getDisplayText(), cmd.getBits(), cmd.isPasswordEnabled(), cmd.getRequiresHvm(), + cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), cmd.getFormat(), cmd.getOsTypeId(), zoneId, hypervisorType, cmd.getChecksum(), true, + cmd.getTemplateTag(), owner, cmd.getDetails(), cmd.isSshKeyEnabled(), null, cmd.isDynamicallyScalable(), isRouting ? TemplateType.ROUTING : TemplateType.USER); } @@ -293,9 +298,15 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat zoneId = -1L; } + HypervisorType hypervisorType = HypervisorType.getType(cmd.getHypervisor()); + if(hypervisorType == HypervisorType.None) { + throw new InvalidParameterValueException("Hypervisor Type: " + cmd.getHypervisor() + " is invalid. Supported Hypervisor types are " + + EnumUtils.listValues(HypervisorType.values()).replace("None, ", "")); + } + return prepare(false, CallContext.current().getCallingUserId(), cmd.getName(), cmd.getDisplayText(), cmd.getBits(), cmd.isPasswordEnabled(), cmd.getRequiresHvm(), null, cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), cmd.getFormat(), cmd.getOsTypeId(), zoneId, - HypervisorType.getType(cmd.getHypervisor()), cmd.getChecksum(), true, cmd.getTemplateTag(), owner, cmd.getDetails(), cmd.isSshKeyEnabled(), null, + hypervisorType, cmd.getChecksum(), true, cmd.getTemplateTag(), owner, cmd.getDetails(), cmd.isSshKeyEnabled(), null, cmd.isDynamicallyScalable(), isRouting ? TemplateType.ROUTING : TemplateType.USER); } From 38966ee7b7b6b42b0c1c3a2c78d4a08a0172e56b Mon Sep 17 00:00:00 2001 From: sanjeevn Date: Mon, 7 Dec 2015 17:55:50 +0530 Subject: [PATCH 21/59] Marvin test to verify that adding TCP ports 500,4500 and 1701 in vpn should not fail Bug-Id: CS-43653 Reviewed-by: Self Made changes as per pavanb018 review comments (cherry picked from commit 50cbaf9f19a9179412478ec147c70dcf57763f59) Signed-off-by: Rohit Yadav --- test/integration/component/test_vpn_users.py | 75 +++++++++++++++++++- 1 file changed, 73 insertions(+), 2 deletions(-) diff --git a/test/integration/component/test_vpn_users.py b/test/integration/component/test_vpn_users.py index f5bad7e31c9..103ff1c0fca 100644 --- a/test/integration/component/test_vpn_users.py +++ b/test/integration/component/test_vpn_users.py @@ -29,13 +29,15 @@ from marvin.lib.base import ( Vpn, VpnUser, Configurations, - NATRule + NATRule, + FireWallRule ) from marvin.lib.common import (get_domain, get_zone, get_template ) -from marvin.lib.utils import cleanup_resources +from marvin.lib.utils import cleanup_resources, validateList +from marvin.codes import PASS class Services: @@ -451,3 +453,72 @@ class TestVPNUsers(cloudstackTestCase): self.fail("Domain admin should be allowed to create VPN user: %s" % e) return + + @attr(tags=["advanced", "advancedns"], required_hardware="false") + def test_08_add_TCP_PF_Rule_In_VPN(self): + """ + Test to add TCP Port Forwarding rule for specific ports(500,1701 and 4500) in VPN + """ + # Steps for verification + # 1. Enable vpn on SourceNAT IP address + # 2. Configure PF with TCP ports 500,1701 and 4500. It should be allowed + # Should not conflict with UPD ports used for VPN + + vm_res = VirtualMachine.list( + self.apiclient, + id=self.virtual_machine.id, + listall=True + ) + self.assertEqual( + validateList(vm_res)[0], + PASS, + "Failed to list virtual machine" + ) + network_id = vm_res[0].nic[0].networkid + src_nat_list = PublicIPAddress.list( + self.apiclient, + account=self.account.name, + domainid=self.account.domainid, + listall=True, + issourcenat=True, + associatednetworkid=network_id + ) + self.assertEqual( + validateList(src_nat_list)[0], + PASS, + "Failed to list source nat ip address" + ) + ip = src_nat_list[0] + try: + vpn = Vpn.create( + self.apiclient, + publicipid=ip.id, + account=self.account.name, + domainid=self.account.domainid, + ) + self.assertIsNotNone( + vpn, + "Failed to create remote access vpn" + ) + except Exception as e: + self.fail("Failed to enable vpn on SourceNAT IP with error: %s" % e) + + #Create PF rule with TCP ports 500,4500 and 1701 + self.services['natrule']['protocol']="TCP" + for port in [500, 4500, 1701]: + self.services['natrule']['privateport'] = port + self.services['natrule']['publicport'] = port + try: + nat = NATRule.create( + self.apiclient, + self.virtual_machine, + self.services["natrule"], + ip.id + ) + self.assertIsNotNone( + nat, + "Failed to add PF rule with tcp parts matching vpn" + ) + except Exception as e: + self.fail("Creating PF rule for TCP port %s in VPN failed : %s" % (port, e)) + return From fa7ebaaaf8683364947f113a444921f4ec5b0f53 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Fri, 6 Jan 2017 05:18:56 +0530 Subject: [PATCH 22/59] CLOUDSTACK-9731: Hardcoded label appears on the Add zone wizard (cherry picked from commit 9a2f3d95c1ba46ce0f52e6d409cdb1d2db902932) Signed-off-by: Rohit Yadav --- ui/scripts/ui-custom/zoneWizard.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/scripts/ui-custom/zoneWizard.js b/ui/scripts/ui-custom/zoneWizard.js index e659df0c09d..9ae599b7e5b 100644 --- a/ui/scripts/ui-custom/zoneWizard.js +++ b/ui/scripts/ui-custom/zoneWizard.js @@ -656,7 +656,7 @@ var $physicalNetworkItem = $('
').addClass('select-container multi'); var $deleteButton = $('
').addClass('button remove physical-network') .attr({ - title: 'label.remove.this.physical.network' + title: _l('label.remove.this.physical.network') }) .append('').addClass('icon').html(' '); var $icon = $('
').addClass('physical-network-icon'); From 1f44e884ad4f67c208c0f34f202932f87e00aeb2 Mon Sep 17 00:00:00 2001 From: Nick Livens Date: Wed, 13 Apr 2016 17:08:50 +0200 Subject: [PATCH 23/59] CLOUDSTACK-9321 : Multiple Internal LB rules (more than one Internal LB rule with same source IP address) are not getting resolved in the corresponding InternalLbVm instance's haproxy.cfg file CLOUDSTACK-9321 : Adding component tests for VPC Network functionality - Internal LB rules CLOUDSTACK-9321 : Extending Nuage VSP Internal LB Marvin tests Co-Authored-By: Prashanth Manthena , Frank Maximus (cherry picked from commit 62e858131fcc0650d61699efffcf7eb57721e1b1) Signed-off-by: Rohit Yadav --- .../element/LoadBalancingServiceProvider.java | 2 + .../element/ElasticLoadBalancerElement.java | 5 + .../F5ExternalLoadBalancerElement.java | 5 + .../element/InternalLoadBalancerElement.java | 5 + .../network/element/NetscalerElement.java | 5 + .../com/cloud/network/element/OvsElement.java | 5 + .../network/element/VirtualRouterElement.java | 5 + .../lb/LoadBalancingRulesManagerImpl.java | 18 +- .../vpc/dao/MockVpcVirtualRouterElement.java | 4 + .../test_vpc_network_internal_lbrules.py | 1205 ++++++++++ .../nuagevsp/test_nuage_vpc_internal_lb.py | 1985 +++++++++++------ tools/marvin/marvin/config/test_data.py | 16 + 12 files changed, 2541 insertions(+), 719 deletions(-) create mode 100644 test/integration/component/test_vpc_network_internal_lbrules.py diff --git a/api/src/com/cloud/network/element/LoadBalancingServiceProvider.java b/api/src/com/cloud/network/element/LoadBalancingServiceProvider.java index cb3155f9c05..1bb37be970d 100644 --- a/api/src/com/cloud/network/element/LoadBalancingServiceProvider.java +++ b/api/src/com/cloud/network/element/LoadBalancingServiceProvider.java @@ -46,4 +46,6 @@ public interface LoadBalancingServiceProvider extends NetworkElement, IpDeployin boolean validateLBRule(Network network, LoadBalancingRule rule); List updateHealthChecks(Network network, List lbrules); + + boolean handlesOnlyRulesInTransitionState(); } diff --git a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java index d640b62f1ee..87ecf0071f1 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java +++ b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/element/ElasticLoadBalancerElement.java @@ -221,4 +221,9 @@ public class ElasticLoadBalancerElement extends AdapterBase implements LoadBalan return null; } + @Override + public boolean handlesOnlyRulesInTransitionState() { + return true; + } + } diff --git a/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java b/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java index 2527e0d9417..bd54d954b8d 100644 --- a/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java +++ b/plugins/network-elements/f5/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java @@ -530,4 +530,9 @@ public class F5ExternalLoadBalancerElement extends ExternalLoadBalancerDeviceMan // TODO Auto-generated method stub return null; } + + @Override + public boolean handlesOnlyRulesInTransitionState() { + return true; + } } diff --git a/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java b/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java index f3ebeb0bae9..07c5a2dfcaa 100644 --- a/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java +++ b/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java @@ -423,6 +423,11 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala return null; } + @Override + public boolean handlesOnlyRulesInTransitionState() { + return false; + } + private static Map> setCapabilities() { Map> capabilities = new HashMap>(); diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java index 99db9ee8158..53225be61e1 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java +++ b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java @@ -945,6 +945,11 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl return null; } + @Override + public boolean handlesOnlyRulesInTransitionState() { + return true; + } + @Override public List getLBHealthChecks(Network network, List rules) throws ResourceUnavailableException { return super.getLBHealthChecks(network, rules); diff --git a/plugins/network-elements/ovs/src/com/cloud/network/element/OvsElement.java b/plugins/network-elements/ovs/src/com/cloud/network/element/OvsElement.java index 02248c5844d..bfb92f92601 100644 --- a/plugins/network-elements/ovs/src/com/cloud/network/element/OvsElement.java +++ b/plugins/network-elements/ovs/src/com/cloud/network/element/OvsElement.java @@ -554,6 +554,11 @@ StaticNatServiceProvider, IpDeployer { return null; } + @Override + public boolean handlesOnlyRulesInTransitionState() { + return true; + } + private boolean canHandleLbRules(final List rules) { final Map lbCaps = getCapabilities().get(Service.Lb); if (!lbCaps.isEmpty()) { diff --git a/server/src/com/cloud/network/element/VirtualRouterElement.java b/server/src/com/cloud/network/element/VirtualRouterElement.java index d802188e4c4..64c8f4ab314 100644 --- a/server/src/com/cloud/network/element/VirtualRouterElement.java +++ b/server/src/com/cloud/network/element/VirtualRouterElement.java @@ -1120,6 +1120,11 @@ NetworkMigrationResponder, AggregatedCommandExecutor { return null; } + @Override + public boolean handlesOnlyRulesInTransitionState() { + return true; + } + private boolean canHandleLbRules(final List rules) { final Map lbCaps = getCapabilities().get(Service.Lb); if (!lbCaps.isEmpty()) { diff --git a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java index 8730a7a3a8a..ddc6b0baba9 100644 --- a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java +++ b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java @@ -1796,8 +1796,24 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements // entries will be rollbacked. lbs = Arrays.asList(lb); } else { + boolean onlyRulesInTransitionState = true; + for (LoadBalancingServiceProvider lbElement : _lbProviders) { + Provider provider = lbElement.getProvider(); + boolean isLbProvider = _networkModel.isProviderSupportServiceInNetwork(lb.getNetworkId(), Service.Lb, provider); + if (!isLbProvider) { + continue; + } + onlyRulesInTransitionState = lbElement.handlesOnlyRulesInTransitionState(); + break; + } + // get all rules in transition state - lbs = _lbDao.listInTransitionStateByNetworkIdAndScheme(lb.getNetworkId(), lb.getScheme()); + if (onlyRulesInTransitionState) { + lbs = _lbDao.listInTransitionStateByNetworkIdAndScheme(lb.getNetworkId(), lb.getScheme()); + } else { + lbs = _lbDao.listByNetworkIdAndScheme(lb.getNetworkId(), lb.getScheme()); + } + } return applyLoadBalancerRules(lbs, true); } diff --git a/server/test/com/cloud/vpc/dao/MockVpcVirtualRouterElement.java b/server/test/com/cloud/vpc/dao/MockVpcVirtualRouterElement.java index 0aa2f57b580..5553bb16103 100644 --- a/server/test/com/cloud/vpc/dao/MockVpcVirtualRouterElement.java +++ b/server/test/com/cloud/vpc/dao/MockVpcVirtualRouterElement.java @@ -28,4 +28,8 @@ public class MockVpcVirtualRouterElement extends VpcVirtualRouterElement { return true; } + @Override + public boolean handlesOnlyRulesInTransitionState() { + return true; + } } diff --git a/test/integration/component/test_vpc_network_internal_lbrules.py b/test/integration/component/test_vpc_network_internal_lbrules.py new file mode 100644 index 00000000000..9523083e13b --- /dev/null +++ b/test/integration/component/test_vpc_network_internal_lbrules.py @@ -0,0 +1,1205 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" Component tests for VPC network functionality - Internal Load Balancing Rules +""" +# Import Local Modules +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.lib.base import (Account, + ApplicationLoadBalancer, + Network, + NetworkACL, + NetworkOffering, + PublicIPAddress, + Router, + ServiceOffering, + StaticNATRule, + VirtualMachine, + VPC, + VpcOffering) +from marvin.lib.common import (get_domain, + get_template, + get_zone) +from marvin.lib.utils import cleanup_resources +from marvin.cloudstackAPI import (listInternalLoadBalancerVMs, + restartVPC, + stopInternalLoadBalancerVM, + startInternalLoadBalancerVM) +# Import System Modules +from nose.plugins.attrib import attr +import copy +import socket +import time + + +class TestVPCNetworkInternalLBRules(cloudstackTestCase): + """Test VPC network functionality with Internal Load Balancing Rules + """ + + @classmethod + def setUpClass(cls): + # We want to fail quicker, if it's a failure + socket.setdefaulttimeout(60) + + test_client = super(TestVPCNetworkInternalLBRules, cls).getClsTestClient() + cls.api_client = test_client.getApiClient() + cls.db_client = test_client.getDbConnection() + cls.test_data = test_client.getParsedTestDataConfig() + + # Get Zone, Domain and templates + cls.zone = get_zone(cls.api_client) + cls.domain = get_domain(cls.api_client) + cls.template = get_template(cls.api_client, + cls.zone.id, + cls.test_data["ostype"] + ) + cls.test_data["virtual_machine"]["zoneid"] = cls.zone.id + cls.test_data["virtual_machine"]["template"] = cls.template.id + + # Create service offering + cls.service_offering = ServiceOffering.create(cls.api_client, + cls.test_data["service_offering"] + ) + cls._cleanup = [cls.service_offering] + return + + @classmethod + def tearDownClass(cls): + try: + # Cleanup resources used + cleanup_resources(cls.api_client, cls._cleanup) + except Exception as e: + print ("Warning: Exception during cleanup : %s" % e) + return + + def setUp(self): + # Create an account + self.account = Account.create(self.api_client, + self.test_data["account"], + admin=True, + domainid=self.domain.id + ) + self.cleanup = [self.account] + + # Creating a VPC offering + self.debug("Creating a VPC offering..") + self.vpc_off = VpcOffering.create(self.api_client, self.test_data["vpc_offering_multi_lb"]) + self.cleanup.append(self.vpc_off) + self.debug("Enabling the VPC offering created") + self.vpc_off.update(self.api_client, state='Enabled') + + # Creating a VPC + self.debug("Creating a VPC in the account: %s" % self.account.name) + testdata = self.test_data["vpc"] + testdata["name"] = "TestVPC" + testdata["displaytext"] = "TestVPC" + testdata["cidr"] = "10.1.1.1/16" + self.vpc = VPC.create(self.api_client, + testdata, + vpcofferingid=self.vpc_off.id, + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid + ) + + # Creating network offerings + self.debug("Creating Network offering with Internal LB service...") + self.net_off_1 = NetworkOffering.create(self.api_client, + self.test_data["network_offering_internal_lb"], + conservemode=False) + self.cleanup.append(self.net_off_1) + self.debug("Enabling the Network offering created") + self.net_off_1.update(self.api_client, state="Enabled") + + self.debug("Creating Network offering without Internal LB service...") + net_offering = copy.deepcopy(self.test_data["network_offering_internal_lb"]) + net_offering["name"] = "Network offering without internal lb service" + net_offering["displaytext"] = "Network offering without internal lb service" + net_offering["supportedservices"] = "Vpn,Dhcp,Dns,UserData,SourceNat,StaticNat,PortForwarding,NetworkACL" + del net_offering["serviceProviderList"]["Lb"] + del net_offering["serviceCapabilityList"]["Lb"] + self.net_off_2 = NetworkOffering.create(self.api_client, + net_offering, + conservemode=False) + self.cleanup.append(self.net_off_2) + self.debug("Enabling the Network offering created") + self.net_off_2.update(self.api_client, state="Enabled") + return + + def tearDown(self): + try: + # Clean up, terminate the created network offerings + cleanup_resources(self.api_client, self.cleanup) + except Exception as e: + self.debug("Warning: Exception during cleanup : %s" % e) + return + + # create_Network - Creates network with the given Network offering in the VPC + def create_Network(self, nw_off, gateway="10.1.1.1"): + self.debug("Creating a network in the account - %s" % self.account.name) + self.test_data["network"]["netmask"] = "255.255.255.0" + network = Network.create(self.api_client, + self.test_data["network"], + accountid=self.account.name, + domainid=self.account.domainid, + networkofferingid=nw_off.id, + zoneid=self.zone.id, + gateway=gateway, + vpcid=self.vpc.id, + ) + self.debug("Created network with ID - %s" % network.id) + return network + + # create_VM - Creates VM in the given network + def create_VM(self, network): + self.debug("Creating VM in network with ID - %s in the account - %s" % (network.id, self.account.name)) + vm = VirtualMachine.create(self.api_client, + self.test_data["virtual_machine"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + templateid=self.template.id, + zoneid=self.zone.id, + networkids=[str(network.id)], + hostid=None + ) + self.debug("Created VM with ID - %s in network with ID - %s" % (vm.id, network.id)) + return vm + + # restart_Vpc - Restarts the given VPC with/without cleanup + def restart_Vpc(self, vpc, cleanup=None): + self.debug("Restarting VPC with ID - %s" % vpc.id) + cmd = restartVPC.restartVPCCmd() + cmd.id = vpc.id + cmd.cleanup = cleanup + self.api_client.restartVPC(cmd) + self.debug("Restarted VPC with ID - %s" % vpc.id) + + # get_Router - Returns router for the given network + def get_Router(self, network): + self.debug("Finding the virtual router for network with ID - %s" % network.id) + routers = Router.list(self.api_client, + networkid=network.id, + listall=True + ) + self.assertEqual(isinstance(routers, list), True, + "List routers should return a valid virtual router for network" + ) + return routers[0] + + # create_Internal_LB_Rule - Creates Internal LB rule in the given VPC network + def create_Internal_LB_Rule(self, network, vm_array=None, services=None, source_ip=None): + self.debug("Creating Internal LB rule in VPC network with ID - %s" % network.id) + if not services: + services = self.test_data["internal_lbrule"] + int_lb_rule = ApplicationLoadBalancer.create(self.api_client, + services=services, + sourcenetworkid=network.id, + networkid=network.id, + sourceipaddress=source_ip + ) + self.debug("Created Internal LB rule") + # Assigning VMs to the created Internal Load Balancer rule + if vm_array: + self.debug("Assigning virtual machines - %s to the created Internal LB rule" % vm_array) + int_lb_rule.assign(self.api_client, vms=vm_array) + self.debug("Assigned VMs to the created Internal LB rule") + return int_lb_rule + + # validate_Internal_LB_Rule - Validates the given Internal LB rule, + # matches the given Internal LB rule name and state against the list of Internal LB rules fetched + def validate_Internal_LB_Rule(self, int_lb_rule, state=None, vm_array=None): + """Validates the Internal LB Rule""" + self.debug("Check if the Internal LB Rule is created successfully ?") + int_lb_rules = ApplicationLoadBalancer.list(self.api_client, + id=int_lb_rule.id + ) + self.assertEqual(isinstance(int_lb_rules, list), True, + "List Internal LB Rule should return a valid list" + ) + self.assertEqual(int_lb_rule.name, int_lb_rules[0].name, + "Name of the Internal LB Rule should match with the returned list data" + ) + if state: + self.assertEqual(int_lb_rules[0].loadbalancerrule[0].state, state, + "Internal LB Rule state should be '%s'" % state + ) + if vm_array: + instance_ids = [instance.id for instance in int_lb_rules[0].loadbalancerinstance] + for vm in vm_array: + self.assertEqual(vm.id in instance_ids, True, + "Internal LB instance list should have the VM with ID - %s" % vm.id + ) + self.debug("Internal LB Rule creation successfully validated for %s" % int_lb_rule.name) + + # list_InternalLbVms - Lists deployed Internal LB VM instances + def list_InternalLbVms(self, network_id=None, source_ip=None): + listInternalLoadBalancerVMsCmd = listInternalLoadBalancerVMs.listInternalLoadBalancerVMsCmd() + listInternalLoadBalancerVMsCmd.account = self.account.name + listInternalLoadBalancerVMsCmd.domainid = self.account.domainid + if network_id: + listInternalLoadBalancerVMsCmd.networkid = network_id + internal_lb_vms = self.api_client.listInternalLoadBalancerVMs(listInternalLoadBalancerVMsCmd) + if source_ip: + return [internal_lb_vm for internal_lb_vm in internal_lb_vms + if str(internal_lb_vm.guestipaddress) == source_ip] + else: + return internal_lb_vms + + # get_InternalLbVm - Returns Internal LB VM instance for the given VPC network and source ip + def get_InternalLbVm(self, network, source_ip): + self.debug("Finding the InternalLbVm for network with ID - %s and source IP address - %s" % + (network.id, source_ip)) + internal_lb_vms = self.list_InternalLbVms(network.id, source_ip) + self.assertEqual(isinstance(internal_lb_vms, list), True, + "List InternalLbVms should return a valid list" + ) + return internal_lb_vms[0] + + # stop_InternalLbVm - Stops the given Internal LB VM instance + def stop_InternalLbVm(self, int_lb_vm, force=None): + self.debug("Stopping InternalLbVm with ID - %s" % int_lb_vm.id) + cmd = stopInternalLoadBalancerVM.stopInternalLoadBalancerVMCmd() + cmd.id = int_lb_vm.id + if force: + cmd.forced = force + self.api_client.stopInternalLoadBalancerVM(cmd) + + # start_InternalLbVm - Starts the given Internal LB VM instance + def start_InternalLbVm(self, int_lb_vm): + self.debug("Starting InternalLbVm with ID - %s" % int_lb_vm.id) + cmd = startInternalLoadBalancerVM.startInternalLoadBalancerVMCmd() + cmd.id = int_lb_vm.id + self.api_client.startInternalLoadBalancerVM(cmd) + + # check_InternalLbVm_state - Checks if the Internal LB VM instance of the given VPC network and source IP is in the + # expected state form the list of fetched Internal LB VM instances + def check_InternalLbVm_state(self, network, source_ip, state=None): + self.debug("Check if the InternalLbVm is in state - %s" % state) + internal_lb_vms = self.list_InternalLbVms(network.id, source_ip) + self.assertEqual(isinstance(internal_lb_vms, list), True, + "List InternalLbVm should return a valid list" + ) + if state: + self.assertEqual(internal_lb_vms[0].state, state, + "InternalLbVm is not in the expected state" + ) + self.debug("InternalLbVm instance - %s is in the expected state - %s" % (internal_lb_vms[0].name, state)) + + # create_NetworkAclRule - Creates Ingress Network ACL rule in the given network + def create_NetworkAclRule(self, rule, network): + self.debug("Adding Ingress NetworkACL rule - %s" % rule) + return NetworkACL.create(self.api_client, + networkid=network.id, + services=rule, + traffictype="Ingress" + ) + + # acquire_PublicIPAddress - Acquires public IP address for the VPC + def acquire_PublicIPAddress(self): + self.debug("Acquiring public IP for VPC with ID - %s in the account - %s" % (self.vpc.id, self.account.name)) + public_ip = PublicIPAddress.create(self.api_client, + accountid=self.account.name, + domainid=self.account.domainid, + zoneid=self.zone.id, + vpcid=self.vpc.id + ) + self.debug("Acquired public IP address - %s for VPC with ID - %s" % + (public_ip.ipaddress.ipaddress, self.vpc.id)) + return public_ip + + # create_StaticNatRule_For_VM - Creates Static NAT rule on the given public IP for the given VM in the given network + def create_StaticNatRule_For_VM(self, vm, public_ip, network): + self.debug("Enabling Static NAT rule on public IP - %s for VM with ID - %s in network with ID - %s" % + (public_ip.ipaddress.ipaddress, vm.id, network.id)) + StaticNATRule.enable(self.api_client, + ipaddressid=public_ip.ipaddress.id, + virtualmachineid=vm.id, + networkid=network.id, + vmguestip=None + ) + self.debug("Static NAT rule enabled on public IP - %s for VM with ID - %s in network with ID - %s" % + (public_ip.ipaddress.ipaddress, vm.id, network.id)) + + # ssh_into_VM - Gets into the shell of the given VM using its Static NAT rule enabled public IP + def ssh_into_VM(self, vm, public_ip): + self.debug("SSH into VM with ID - %s on public IP address - %s" % (vm.id, public_ip.ipaddress.ipaddress)) + ssh_client = vm.get_ssh_client(ipaddress=public_ip.ipaddress.ipaddress) + return ssh_client + + # execute_cmd - Executes the given command on the given ssh client + def execute_cmd(self, ssh_client, cmd): + self.debug("SSH client executing command - %s" % cmd) + ret_data = "" + out_list = ssh_client.execute(cmd) + if out_list is not None: + ret_data = ' '.join(map(str, out_list)).strip() + self.debug("SSH client executed command result - %s" % ret_data) + else: + self.debug("SSH client executed command result is None") + return ret_data + + # wget_from_vm_cmd - From within the given VM (ssh client), + # fetches test.html file of web server running with the given public IP + def wget_from_vm_cmd(self, ssh_client, ip_address, port): + cmd = "wget --no-cache -t 1 http://" + ip_address + ":" + str(port) + "/test.html" + response = self.execute_cmd(ssh_client, cmd) + if "200 OK" not in response: + self.fail("Failed to wget from a VM with http server IP address - %s" % ip_address) + # Removing the wget file + cmd = "rm -r test.html" + self.execute_cmd(ssh_client, cmd) + + @attr(tags=["advanced", "intervlan"], required_hardware="false") + def test_01_internallb_rules(self): + """Test VPC Network Internal LB functionality with different combinations of Internal LB rules + """ + + # 1. Create an Internal LB Rule with source IP Address specified, check if the Internal LB Rule is successfully + # created. + # 2. Create an Internal LB Rule without source IP Address specified, check if the Internal LB Rule is + # successfully created. + # 3. Create an Internal LB Rule when the specified source IP Address is outside the VPC network (tier) CIDR + # range, check if the Internal LB Rule creation failed as the requested source IP is not in the network's + # CIDR subnet. + # 4. Create an Internal LB Rule when the specified source IP Address is outside the VPC super CIDR range, + # check if the Internal LB Rule creation failed as the requested source IP is not in the network's CIDR + # subnet. + # 5. Create an Internal LB Rule in the tier with LB service provider as VpcInlineLbVm, check if the Internal LB + # Rule creation failed as Scheme Internal is not supported by this network offering. + # 6. Create multiple Internal LB Rules using different Load Balancing source IP Addresses, check if the Internal + # LB Rules are successfully created. + # 7. Create multiple Internal LB Rules with different ports but using the same Load Balancing source IP Address, + # check if the Internal LB Rules are successfully created. + # 8. Create multiple Internal LB Rules with same ports and using the same Load Balancing source IP Address, + # check if the second Internal LB Rule creation failed as it conflicts with the first Internal LB rule. + # 9. Attach a VM to the above created Internal LB Rules, check if the VM is successfully attached to the + # Internal LB Rules. + # 10. Verify the InternalLbVm deployment after successfully creating the first Internal LB Rule and attaching a + # VM to it. + # 11. Verify the failure of attaching a VM from a different tier to an Internal LB Rule created on a tier. + # 12. Delete the above created Internal LB Rules, check if the Internal LB Rules are successfully deleted. + + # Creating VPC networks in the VPC, and deploying VMs + self.debug("Creating a VPC network with Internal LB service...") + internal_tier = self.create_Network(self.net_off_1, gateway='10.1.1.1') + + self.debug("Deploying a VM in the created VPC network...") + internal_vm = self.create_VM(internal_tier) + + self.debug("Creating a VPC network without Internal LB service...") + public_tier = self.create_Network(self.net_off_2, gateway='10.1.2.1') + + self.debug("Deploying a VM in the created VPC network...") + public_vm = self.create_VM(public_tier) + + # Creating Internal LB Rules + self.debug("Creating an Internal LB Rule without source IP Address specified...") + int_lb_rule = self.create_Internal_LB_Rule(internal_tier) + self.validate_Internal_LB_Rule(int_lb_rule, state="Add") + + # Validating InternalLbVm deployment + with self.assertRaises(Exception): + self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress) + self.debug("InternalLbVm is not deployed in the network as there are no VMs assigned to this Internal LB Rule") + + self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name) + int_lb_rule.delete(self.api_client) + with self.assertRaises(Exception): + self.validate_Internal_LB_Rule(int_lb_rule) + self.debug("Internal LB Rule successfully deleted in CloudStack") + + free_source_ip = int_lb_rule.sourceipaddress + + self.debug("Creating an Internal LB Rule with source IP Address specified...") + int_lb_rule = self.create_Internal_LB_Rule(internal_tier, source_ip=free_source_ip) + self.validate_Internal_LB_Rule(int_lb_rule, state="Add") + + # Validating InternalLbVm deployment + with self.assertRaises(Exception): + self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress) + self.debug("InternalLbVm is not deployed in the network as there are no VMs assigned to this Internal LB Rule") + + self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name) + int_lb_rule.delete(self.api_client) + with self.assertRaises(Exception): + self.validate_Internal_LB_Rule(int_lb_rule) + self.debug("Internal LB Rule successfully deleted in CloudStack") + + self.debug("Creating an Internal LB Rule when the specified source IP Address is outside the VPC network CIDR " + "range...") + with self.assertRaises(Exception): + self.create_Internal_LB_Rule(internal_tier, source_ip="10.1.1.256") + self.debug("Internal LB Rule creation failed as the requested IP is not in the network's CIDR subnet") + + self.debug("Creating an Internal LB Rule when the specified source IP Address is outside the VPC super CIDR " + "range...") + with self.assertRaises(Exception): + self.create_Internal_LB_Rule(internal_tier, source_ip="10.2.1.256") + self.debug("Internal LB Rule creation failed as the requested IP is not in the network's CIDR subnet") + + self.debug("Creating an Internal LB Rule in a VPC network without Internal Lb service...") + with self.assertRaises(Exception): + self.create_Internal_LB_Rule(public_tier) + self.debug("Internal LB Rule creation failed as Scheme Internal is not supported by this network offering") + + self.debug("Creating multiple Internal LB Rules using different Load Balancing source IP Addresses...") + int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm]) + self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm]) + int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm]) + self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm]) + + # Validating InternalLbVms deployment and state + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress, state="Running") + + self.debug('Removing VMs from the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name)) + int_lb_rule_1.remove(self.api_client, vms=[internal_vm]) + with self.assertRaises(Exception): + self.validate_Internal_LB_Rule(int_lb_rule_1, vm_array=[internal_vm]) + self.debug("VMs successfully removed from the Internal LB Rule in CloudStack") + int_lb_rule_2.remove(self.api_client, vms=[internal_vm]) + with self.assertRaises(Exception): + self.validate_Internal_LB_Rule(int_lb_rule_2, vm_array=[internal_vm]) + self.debug("VMs successfully removed from the Internal LB Rule in CloudStack") + + # Validating InternalLbVms state + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress, state="Running") + + self.debug('Deleting the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name)) + int_lb_rule_1.delete(self.api_client) + with self.assertRaises(Exception): + self.validate_Internal_LB_Rule(int_lb_rule_1) + self.debug("Internal LB Rule successfully deleted in CloudStack") + int_lb_rule_2.delete(self.api_client) + with self.assertRaises(Exception): + self.validate_Internal_LB_Rule(int_lb_rule_2) + self.debug("Internal LB Rule successfully deleted in CloudStack") + + # Validating InternalLbVms un-deployment + with self.assertRaises(Exception): + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress) + self.debug("InternalLbVm successfully destroyed in CloudStack") + with self.assertRaises(Exception): + self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress) + self.debug("InternalLbVm successfully destroyed in CloudStack") + + self.debug("Creating multiple Internal LB Rules with different ports but using the same Load Balancing source " + "IP Address...") + int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm]) + self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm]) + int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier, + vm_array=[internal_vm], + services=self.test_data["internal_lbrule_http"], + source_ip=int_lb_rule_1.sourceipaddress + ) + self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm]) + + # Validating InternalLbVm deployment and state + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + self.debug('Removing VMs from the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name)) + int_lb_rule_1.remove(self.api_client, vms=[internal_vm]) + with self.assertRaises(Exception): + self.validate_Internal_LB_Rule(int_lb_rule_1, vm_array=[internal_vm]) + self.debug("VMs successfully removed from the Internal LB Rule in CloudStack") + int_lb_rule_2.remove(self.api_client, vms=[internal_vm]) + with self.assertRaises(Exception): + self.validate_Internal_LB_Rule(int_lb_rule_2, vm_array=[internal_vm]) + self.debug("VMs successfully removed from the Internal LB Rule in CloudStack") + + # Validating InternalLbVm state + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + self.debug('Deleting the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name)) + int_lb_rule_1.delete(self.api_client) + with self.assertRaises(Exception): + self.validate_Internal_LB_Rule(int_lb_rule_1) + self.debug("Internal LB Rule successfully deleted in CloudStack") + int_lb_rule_2.delete(self.api_client) + with self.assertRaises(Exception): + self.validate_Internal_LB_Rule(int_lb_rule_2) + self.debug("Internal LB Rule successfully deleted in CloudStack") + + # Validating InternalLbVm un-deployment + with self.assertRaises(Exception): + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress) + self.debug("InternalLbVm successfully destroyed in CloudStack") + + self.debug("Creating multiple Internal LB Rules with same ports and using the same Load Balancing source IP " + "Address...") + int_lb_rule = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm]) + self.validate_Internal_LB_Rule(int_lb_rule, state="Active", vm_array=[internal_vm]) + with self.assertRaises(Exception): + self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm], source_ip=int_lb_rule.sourceipaddress) + self.debug("Internal LB Rule creation failed as it conflicts with the existing rule") + + # Validating InternalLbVm deployment and state + self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress, state="Running") + + self.debug('Removing VMs from the Internal LB Rule - %s' % int_lb_rule.name) + int_lb_rule.remove(self.api_client, vms=[internal_vm]) + with self.assertRaises(Exception): + self.validate_Internal_LB_Rule(int_lb_rule, vm_array=[internal_vm]) + self.debug("VMs successfully removed from the Internal LB Rule in CloudStack") + + # Validating InternalLbVm state + self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress, state="Running") + + self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name) + int_lb_rule.delete(self.api_client) + with self.assertRaises(Exception): + self.validate_Internal_LB_Rule(int_lb_rule) + self.debug("Internal LB Rule successfully deleted in CloudStack") + + # Validating InternalLbVm un-deployment + with self.assertRaises(Exception): + self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress) + self.debug("InternalLbVm successfully destroyed in CloudStack") + + self.debug("Attaching a VM from a different tier to an Internal LB Rule created on a tier...") + with self.assertRaises(Exception): + self.create_Internal_LB_Rule(internal_tier, vm_array=[public_vm]) + self.debug("Internal LB Rule creation failed as the VM belongs to a different network") + + @attr(tags=["advanced", "intervlan"], required_hardware="true") + def test_02_internallb_rules_traffic(self): + """Test VPC Network Internal LB functionality by performing (wget) traffic tests within a VPC + """ + + # 1. Create an Internal LB Rule "internal_lbrule" with source IP Address specified on the Internal tier, check + # if the Internal LB Rule is successfully created. + # 2. Create an Internal LB Rule "internal_lbrule_http" with source IP Address (same as above) specified on the + # Internal tier, check if the Internal LB Rule is successfully created. + # 3. Attach a VM to the above created Internal LB Rules, check if the InternalLbVm is successfully deployed in + # the Internal tier. + # 4. Deploy two more VMs in the Internal tier, check if the VMs are successfully deployed. + # 5. Attach the newly deployed VMs to the above created Internal LB Rules, verify the validity of the above + # created Internal LB Rules over three Load Balanced VMs in the Internal tier. + # 6. Create the corresponding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible, + # check if the Network ACL rules are successfully added to the internal tier. + # 7. Validate the Internal LB functionality by performing (wget) traffic tests from a VM in the Public tier to + # the Internal load balanced guest VMs in the Internal tier, using Static NAT functionality to access (ssh) + # the VM on the Public tier. + # 8. Verify that the InternalLbVm gets destroyed when the last Internal LB rule is removed from the Internal + # tier. + # 9. Repeat the above steps for one more Internal tier as well, validate the Internal LB functionality. + + # Creating VPC networks in the VPC, and deploying VMs + self.debug("Creating a VPC network with Internal LB service...") + internal_tier_1 = self.create_Network(self.net_off_1, gateway='10.1.1.1') + + self.debug("Deploying a VM in the created VPC network...") + internal_vm_1 = self.create_VM(internal_tier_1) + + self.debug("Creating one more VPC network with Internal LB service...") + internal_tier_2 = self.create_Network(self.net_off_1, gateway='10.1.2.1') + + self.debug("Deploying a VM in the created VPC network...") + internal_vm_2 = self.create_VM(internal_tier_2) + + self.debug("Creating a VPC network without Internal LB service...") + public_tier = self.create_Network(self.net_off_2, gateway='10.1.3.1') + + self.debug("Deploying a VM in the created VPC network...") + public_vm = self.create_VM(public_tier) + + # Creating Internal LB Rules in the Internal tiers + self.debug("Creating three Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...") + int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier_1, vm_array=[internal_vm_1]) + self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm_1]) + int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier_1, + vm_array=[internal_vm_1], + services=self.test_data["internal_lbrule_http"], + source_ip=int_lb_rule_1.sourceipaddress + ) + self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm_1]) + internal_lbrule_http = copy.deepcopy(self.test_data["internal_lbrule_http"]) + internal_lbrule_http["sourceport"] = 8080 + internal_lbrule_http["instanceport"] = 8080 + int_lb_rule_3 = self.create_Internal_LB_Rule(internal_tier_1, + vm_array=[internal_vm_1], + services=internal_lbrule_http, + source_ip=int_lb_rule_1.sourceipaddress + ) + self.validate_Internal_LB_Rule(int_lb_rule_3, state="Active", vm_array=[internal_vm_1]) + + # Validating InternalLbVm deployment and state + self.check_InternalLbVm_state(internal_tier_1, int_lb_rule_1.sourceipaddress, state="Running") + + # Deploying more VMs in the Internal tier + self.debug("Deploying two more VMs in network - %s" % internal_tier_1.name) + internal_vm_1_1 = self.create_VM(internal_tier_1) + internal_vm_1_2 = self.create_VM(internal_tier_1) + + # Adding newly deployed VMs to the created Internal LB rules + self.debug("Adding two more virtual machines to the created Internal LB rules...") + int_lb_rule_1.assign(self.api_client, [internal_vm_1_1, internal_vm_1_2]) + self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", + vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2]) + int_lb_rule_2.assign(self.api_client, [internal_vm_1_1, internal_vm_1_2]) + self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", + vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2]) + int_lb_rule_3.assign(self.api_client, [internal_vm_1_1, internal_vm_1_2]) + self.validate_Internal_LB_Rule(int_lb_rule_3, state="Active", + vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2]) + + # Validating InternalLbVm state + self.check_InternalLbVm_state(internal_tier_1, int_lb_rule_1.sourceipaddress, state="Running") + + # Adding Network ACL rules in the Internal tier + self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...") + self.create_NetworkAclRule(self.test_data["ingress_rule"], internal_tier_1) + self.create_NetworkAclRule(self.test_data["http_rule"], internal_tier_1) + http_rule = copy.deepcopy(self.test_data["http_rule"]) + http_rule["privateport"] = 8080 + http_rule["publicport"] = 8080 + http_rule["startport"] = 8080 + http_rule["endport"] = 8080 + self.create_NetworkAclRule(http_rule, internal_tier_1) + + # Creating Internal LB Rules in the Internal tier + self.debug("Creating three Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...") + int_lb_rule_4 = self.create_Internal_LB_Rule(internal_tier_2, vm_array=[internal_vm_2]) + self.validate_Internal_LB_Rule(int_lb_rule_4, state="Active", vm_array=[internal_vm_2]) + int_lb_rule_5 = self.create_Internal_LB_Rule(internal_tier_2, + vm_array=[internal_vm_2], + services=self.test_data["internal_lbrule_http"], + source_ip=int_lb_rule_4.sourceipaddress + ) + self.validate_Internal_LB_Rule(int_lb_rule_5, state="Active", vm_array=[internal_vm_2]) + int_lb_rule_6 = self.create_Internal_LB_Rule(internal_tier_2, + vm_array=[internal_vm_2], + services=internal_lbrule_http, + source_ip=int_lb_rule_4.sourceipaddress + ) + self.validate_Internal_LB_Rule(int_lb_rule_6, state="Active", vm_array=[internal_vm_2]) + + # Validating InternalLbVm deployment and state + self.check_InternalLbVm_state(internal_tier_2, int_lb_rule_4.sourceipaddress, state="Running") + + # Deploying more VMs in the Internal tier + self.debug("Deploying two more VMs in network - %s" % internal_tier_2.name) + internal_vm_2_1 = self.create_VM(internal_tier_2) + internal_vm_2_2 = self.create_VM(internal_tier_2) + + # Adding newly deployed VMs to the created Internal LB rules + self.debug("Adding two more virtual machines to the created Internal LB rules...") + int_lb_rule_4.assign(self.api_client, [internal_vm_2_1, internal_vm_2_2]) + self.validate_Internal_LB_Rule(int_lb_rule_4, state="Active", + vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2]) + int_lb_rule_5.assign(self.api_client, [internal_vm_2_1, internal_vm_2_2]) + self.validate_Internal_LB_Rule(int_lb_rule_5, state="Active", + vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2]) + int_lb_rule_6.assign(self.api_client, [internal_vm_2_1, internal_vm_2_2]) + self.validate_Internal_LB_Rule(int_lb_rule_6, state="Active", + vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2]) + + # Validating InternalLbVm state + self.check_InternalLbVm_state(internal_tier_2, int_lb_rule_4.sourceipaddress, state="Running") + + # Adding Network ACL rules in the Internal tier + self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...") + self.create_NetworkAclRule(self.test_data["ingress_rule"], internal_tier_2) + self.create_NetworkAclRule(self.test_data["http_rule"], internal_tier_2) + self.create_NetworkAclRule(http_rule, internal_tier_2) + + # Creating Static NAT rule for the VM in the Public tier + public_ip = self.acquire_PublicIPAddress() + self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier) + + # Adding Network ACL rule in the Public tier + self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...") + self.create_NetworkAclRule(self.test_data["ingress_rule"], public_tier) + + # Internal LB (wget) traffic tests + ssh_client = self.ssh_into_VM(public_vm, public_ip) + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + ssh_client = self.ssh_into_VM(public_vm, public_ip) + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + http_rule["publicport"] + ) + ssh_client = self.ssh_into_VM(public_vm, public_ip) + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_4.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + ssh_client = self.ssh_into_VM(public_vm, public_ip) + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_4.sourceipaddress, + http_rule["publicport"] + ) + + @attr(tags=["advanced", "intervlan"], required_hardware="true") + def test_03_internallb_rules_vpc_network_restarts_traffic(self): + """Test VPC Network Internal LB functionality with restarts of VPC network components by performing (wget) + traffic tests within a VPC + """ + + # Repeat the tests in the testcase "test_02_internallb_rules_traffic" with restarts of VPC networks (tiers): + # 1. Restart tier with InternalLbVm (cleanup = false), verify that the InternalLbVm gets destroyed and deployed + # again in the Internal tier. + # 2. Restart tier with InternalLbVm (cleanup = true), verify that the InternalLbVm gets destroyed and deployed + # again in the Internal tier. + # 3. Restart tier without InternalLbVm (cleanup = false), verify that this restart has no effect on the + # InternalLbVm functionality. + # 4. Restart tier without InternalLbVm (cleanup = true), verify that this restart has no effect on the + # InternalLbVm functionality. + # 5. Stop all the VMs configured with InternalLbVm, verify that the InternalLbVm gets destroyed in the Internal + # tier. + # 6. Start all the VMs configured with InternalLbVm, verify that the InternalLbVm gets deployed again in the + # Internal tier. + # 7. Restart VPC (cleanup = false), verify that the VPC VR gets rebooted and this restart has no effect on the + # InternalLbVm functionality. + # 7. Restart VPC (cleanup = true), verify that the VPC VR gets rebooted and this restart has no effect on the + # InternalLbVm functionality. + # Verify the above restarts of VPC networks (tiers) by performing (wget) traffic tests within a VPC. + + # Creating VPC networks in the VPC, and deploying VMs + self.debug("Creating a VPC network with Internal LB service...") + internal_tier = self.create_Network(self.net_off_1, gateway='10.1.1.1') + + self.debug("Deploying a VM in the created VPC network...") + internal_vm = self.create_VM(internal_tier) + + self.debug("Creating a VPC network without Internal LB service...") + public_tier = self.create_Network(self.net_off_2, gateway='10.1.2.1') + + self.debug("Deploying a VM in the created VPC network...") + public_vm = self.create_VM(public_tier) + + # Creating Internal LB Rules in the Internal tiers + self.debug("Creating three Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...") + int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm]) + self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm]) + int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier, + vm_array=[internal_vm], + services=self.test_data["internal_lbrule_http"], + source_ip=int_lb_rule_1.sourceipaddress + ) + self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm]) + + # Validating InternalLbVm deployment and state + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + # Deploying more VMs in the Internal tier + self.debug("Deploying two more VMs in network - %s" % internal_tier.name) + internal_vm_1 = self.create_VM(internal_tier) + internal_vm_2 = self.create_VM(internal_tier) + + # Adding newly deployed VMs to the created Internal LB rules + self.debug("Adding two more virtual machines to the created Internal LB rules...") + int_lb_rule_1.assign(self.api_client, [internal_vm_1, internal_vm_2]) + self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", + vm_array=[internal_vm, internal_vm_1, internal_vm_2]) + int_lb_rule_2.assign(self.api_client, [internal_vm_1, internal_vm_2]) + self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", + vm_array=[internal_vm, internal_vm_1, internal_vm_2]) + + # Validating InternalLbVm state + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + # Adding Network ACL rules in the Internal tier + self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...") + self.create_NetworkAclRule(self.test_data["ingress_rule"], internal_tier) + self.create_NetworkAclRule(self.test_data["http_rule"], internal_tier) + + # Creating Static NAT rule for the VM in the Public tier + public_ip = self.acquire_PublicIPAddress() + self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier) + + # Adding Network ACL rule in the Public tier + self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...") + self.create_NetworkAclRule(self.test_data["ingress_rule"], public_tier) + + # Internal LB (wget) traffic test + ssh_client = self.ssh_into_VM(public_vm, public_ip) + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + + # Restart Internal tier (cleanup = false) + # InternalLbVm gets destroyed and deployed again in the Internal tier + self.debug("Restarting the Internal tier without cleanup...") + Network.restart(internal_tier, self.api_client, cleanup=False) + + # Validating InternalLbVm state + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + # Internal LB (wget) traffic test + ssh_client = self.ssh_into_VM(public_vm, public_ip) + tries = 0 + while tries < 10: + try: + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + except Exception as e: + self.debug("Failed to wget file via the InternalLbVm after re-starting the Internal tier: %s" % e) + self.debug("Waiting for the InternalLbVm in the Internal tier to be fully resolved for (wget) traffic " + "test...") + time.sleep(30) + tries += 1 + continue + self.debug("Internal LB (wget) traffic test is successful after re-starting the Internal tier") + break + + # Restart Internal tier (cleanup = true) + # InternalLbVm gets destroyed and deployed again in the Internal tier + self.debug("Restarting the Internal tier with cleanup...") + Network.restart(internal_tier, self.api_client, cleanup=True) + + # Validating InternalLbVm state + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + # Internal LB (wget) traffic test + ssh_client = self.ssh_into_VM(public_vm, public_ip) + tries = 0 + while tries < 10: + try: + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + except Exception as e: + self.debug("Failed to wget file via the InternalLbVm after re-starting the Internal tier with cleanup: " + "%s" % e) + self.debug("Waiting for the InternalLbVm in the Internal tier to be fully resolved for (wget) traffic " + "test...") + time.sleep(30) + tries += 1 + continue + self.debug("Internal LB (wget) traffic test is successful after re-starting the Internal tier with cleanup") + break + + # Restart Public tier (cleanup = false) + # This restart has no effect on the InternalLbVm functionality + self.debug("Restarting the Public tier without cleanup...") + Network.restart(public_tier, self.api_client, cleanup=False) + + # Validating InternalLbVm state + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + # Internal LB (wget) traffic test + ssh_client = self.ssh_into_VM(public_vm, public_ip) + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + + # Restart Public tier (cleanup = true) + # This restart has no effect on the InternalLbVm functionality + self.debug("Restarting the Public tier with cleanup...") + Network.restart(public_tier, self.api_client, cleanup=True) + + # Validating InternalLbVm state + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + # Internal LB (wget) traffic test + ssh_client = self.ssh_into_VM(public_vm, public_ip) + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + + # Stopping VMs in the Internal tier + # wget traffic test fails as all the VMs in the Internal tier are in stopped state + self.debug("Stopping all the VMs in the Internal tier...") + internal_vm.stop(self.api_client) + internal_vm_1.stop(self.api_client) + internal_vm_2.stop(self.api_client) + + # Validating InternalLbVm state + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + # Internal LB (wget) traffic test + ssh_client = self.ssh_into_VM(public_vm, public_ip) + with self.assertRaises(Exception): + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + self.debug("Failed to wget file as all the VMs in the Internal tier are in stopped state") + + # Starting VMs in the Internal tier + # wget traffic test succeeds as all the VMs in the Internal tier are back in running state + self.debug("Starting all the VMs in the Internal tier...") + internal_vm.start(self.api_client) + internal_vm_1.start(self.api_client) + internal_vm_2.start(self.api_client) + + # Validating InternalLbVm state + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + # Internal LB (wget) traffic test + ssh_client = self.ssh_into_VM(public_vm, public_ip) + tries = 0 + while tries < 10: + try: + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + except Exception as e: + self.debug("Failed to wget file via the InternalLbVm after re-starting all the VMs in the Internal tier" + ": %s" % e) + self.debug("Waiting for the InternalLbVm and all the VMs in the Internal tier to be fully resolved for " + "(wget) traffic test...") + time.sleep(30) + tries += 1 + continue + self.debug("Internal LB (wget) traffic test is successful after re-starting all the VMs in the Internal " + "tier") + break + + # Restarting VPC (cleanup = false) + # VPC VR gets destroyed and deployed again in the VPC + # This restart has no effect on the InternalLbVm functionality + self.debug("Restarting the VPC without cleanup...") + self.restart_Vpc(self.vpc, cleanup=False) + + # Validating InternalLbVm state + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + # Internal LB (wget) traffic test + ssh_client = self.ssh_into_VM(public_vm, public_ip) + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + + # Restarting VPC (cleanup = true) + # VPC VR gets destroyed and deployed again in the VPC + # This restart has no effect on the InternalLbVm functionality + self.debug("Restarting the VPC with cleanup...") + self.restart_Vpc(self.vpc, cleanup=True) + + # Validating InternalLbVm state + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + # Internal LB (wget) traffic test + ssh_client = self.ssh_into_VM(public_vm, public_ip) + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + + @attr(tags=["advanced", "intervlan"], required_hardware="true") + def test_04_internallb_appliance_operations_traffic(self): + """Test VPC Network Internal LB functionality with InternalLbVm appliance operations by performing (wget) + traffic tests within a VPC + """ + + # Repeat the tests in the testcase "test_02_internallb_rules_traffic" with InternalLbVm appliance operations: + # 1. Verify the InternalLbVm deployment by creating the Internal LB Rules when the VPC VR is in Stopped state, + # VPC VR has no effect on the InternalLbVm functionality. + # 2. Stop the InternalLbVm when the VPC VR is in Stopped State + # 3. Start the InternalLbVm when the VPC VR is in Stopped state + # 4. Stop the InternalLbVm when the VPC VR is in Running State + # 5. Start the InternalLbVm when the VPC VR is in Running state + # 6. Force stop the InternalLbVm when the VPC VR is in Running State + # 7. Start the InternalLbVm when the VPC VR is in Running state + # Verify the above restarts of VPC networks by performing (wget) traffic tests within a VPC. + + # Creating VPC networks in the VPC, and deploying VMs + self.debug("Creating a VPC network with Internal LB service...") + internal_tier = self.create_Network(self.net_off_1, gateway='10.1.1.1') + + self.debug("Deploying a VM in the created VPC network...") + internal_vm = self.create_VM(internal_tier) + + self.debug("Creating a VPC network without Internal LB service...") + public_tier = self.create_Network(self.net_off_2, gateway='10.1.2.1') + + self.debug("Deploying a VM in the created VPC network...") + public_vm = self.create_VM(public_tier) + + # Stopping the VPC VR + # VPC VR has no effect on the InternalLbVm functionality + vpc_vr = self.get_Router(internal_tier) + Router.stop(self.api_client, id=vpc_vr.id) + + # Creating Internal LB Rules in the Internal tiers + self.debug("Creating three Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...") + int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm]) + self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm]) + int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier, + vm_array=[internal_vm], + services=self.test_data["internal_lbrule_http"], + source_ip=int_lb_rule_1.sourceipaddress + ) + self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm]) + + # Validating InternalLbVm deployment and state + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + # Deploying more VMs in the Internal tier + self.debug("Deploying two more VMs in network - %s" % internal_tier.name) + internal_vm_1 = self.create_VM(internal_tier) + internal_vm_2 = self.create_VM(internal_tier) + + # Adding newly deployed VMs to the created Internal LB rules + self.debug("Adding two more virtual machines to the created Internal LB rules...") + int_lb_rule_1.assign(self.api_client, [internal_vm_1, internal_vm_2]) + self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", + vm_array=[internal_vm, internal_vm_1, internal_vm_2]) + int_lb_rule_2.assign(self.api_client, [internal_vm_1, internal_vm_2]) + self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", + vm_array=[internal_vm, internal_vm_1, internal_vm_2]) + + # Validating InternalLbVm state + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + # Adding Network ACL rules in the Internal tier + self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...") + self.create_NetworkAclRule(self.test_data["ingress_rule"], internal_tier) + self.create_NetworkAclRule(self.test_data["http_rule"], internal_tier) + + # Creating Static NAT rule for the VM in the Public tier + public_ip = self.acquire_PublicIPAddress() + self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier) + + # Adding Network ACL rule in the Public tier + self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...") + self.create_NetworkAclRule(self.test_data["ingress_rule"], public_tier) + + # Internal LB (wget) traffic test + ssh_client = self.ssh_into_VM(public_vm, public_ip) + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + + # # Stopping the InternalLbVm when the VPC VR is in Stopped state + int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress) + self.stop_InternalLbVm(int_lb_vm) + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped") + + # Internal LB (wget) traffic test + ssh_client = self.ssh_into_VM(public_vm, public_ip) + with self.assertRaises(Exception): + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + self.debug("Failed to wget file as the InternalLbVm is in stopped state") + + # # Starting the InternalLbVm when the VPC VR is in Stopped state + self.start_InternalLbVm(int_lb_vm) + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + # Internal LB (wget) traffic test + ssh_client = self.ssh_into_VM(public_vm, public_ip) + tries = 0 + while tries < 10: + try: + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + except Exception as e: + self.debug("Failed to wget file via the InternalLbVm after re-starting the InternalLbVm appliance: %s" + % e) + self.debug("Waiting for the InternalLbVm to be fully resolved for (wget) traffic test...") + time.sleep(30) + tries += 1 + continue + self.debug("Internal LB (wget) traffic test is successful after re-starting the InternalLbVm appliance") + break + + # Starting the VPC VR + # VPC VR has no effect on the InternalLbVm functionality + Router.start(self.api_client, id=vpc_vr.id) + + # # Stopping the InternalLbVm when the VPC VR is in Running state + self.stop_InternalLbVm(int_lb_vm) + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped") + + # Internal LB (wget) traffic test + ssh_client = self.ssh_into_VM(public_vm, public_ip) + with self.assertRaises(Exception): + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + self.debug("Failed to wget file as the InternalLbVm is in stopped state") + + # # Starting the InternalLbVm when the VPC VR is in Running state + self.start_InternalLbVm(int_lb_vm) + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + # Internal LB (wget) traffic test + ssh_client = self.ssh_into_VM(public_vm, public_ip) + tries = 0 + while tries < 10: + try: + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + except Exception as e: + self.debug("Failed to wget file via the InternalLbVm after re-starting the InternalLbVm appliance: %s" + % e) + self.debug("Waiting for the InternalLbVm to be fully resolved for (wget) traffic test...") + time.sleep(30) + tries += 1 + continue + self.debug("Internal LB (wget) traffic test is successful after re-starting the InternalLbVm appliance") + break + + # # Force Stopping the InternalLbVm when the VPC VR is in Running state + self.stop_InternalLbVm(int_lb_vm, force=True) + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped") + + # Internal LB (wget) traffic test + ssh_client = self.ssh_into_VM(public_vm, public_ip) + with self.assertRaises(Exception): + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + self.debug("Failed to wget file as the InternalLbVm is in stopped state") + + # # Starting the InternalLbVm when the VPC VR is in Running state + self.start_InternalLbVm(int_lb_vm) + self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + + # Internal LB (wget) traffic test + ssh_client = self.ssh_into_VM(public_vm, public_ip) + tries = 0 + while tries < 10: + try: + self.wget_from_vm_cmd(ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"] + ) + except Exception as e: + self.debug("Failed to wget file via the InternalLbVm after re-starting the InternalLbVm appliance: %s" + % e) + self.debug("Waiting for the InternalLbVm to be fully resolved for (wget) traffic test...") + time.sleep(30) + tries += 1 + continue + self.debug("Internal LB (wget) traffic test is successful after re-starting the InternalLbVm appliance") + break diff --git a/test/integration/plugins/nuagevsp/test_nuage_vpc_internal_lb.py b/test/integration/plugins/nuagevsp/test_nuage_vpc_internal_lb.py index 08146897592..97319aa2b1f 100644 --- a/test/integration/plugins/nuagevsp/test_nuage_vpc_internal_lb.py +++ b/test/integration/plugins/nuagevsp/test_nuage_vpc_internal_lb.py @@ -15,7 +15,8 @@ # specific language governing permissions and limitations # under the License. -""" Component tests for VPC Internal Load Balancer functionality with Nuage VSP SDN plugin +""" Component tests for VPC Internal Load Balancer functionality with +Nuage VSP SDN plugin """ # Import Local Modules from nuageTestCase import nuageTestCase @@ -51,28 +52,35 @@ class TestNuageInternalLb(nuageTestCase): self.cleanup = [self.account] return - # create_Internal_LB_Rule - Creates Internal LB rule in the given VPC network - def create_Internal_LB_Rule(self, network, vm_array=None, services=None, source_ip=None): - self.debug("Creating Internal LB rule in VPC network with ID - %s" % network.id) + # create_Internal_LB_Rule - Creates Internal LB rule in the given + # VPC network + def create_Internal_LB_Rule(self, network, vm_array=None, services=None, + source_ip=None): + self.debug("Creating Internal LB rule in VPC network with ID - %s" % + network.id) if not services: services = self.test_data["internal_lbrule"] - int_lb_rule = ApplicationLoadBalancer.create(self.api_client, - services=services, - sourcenetworkid=network.id, - networkid=network.id, - sourceipaddress=source_ip - ) + int_lb_rule = ApplicationLoadBalancer.create( + self.api_client, + services=services, + sourcenetworkid=network.id, + networkid=network.id, + sourceipaddress=source_ip + ) self.debug("Created Internal LB rule") # Assigning VMs to the created Internal Load Balancer rule if vm_array: - self.debug("Assigning virtual machines - %s to the created Internal LB rule" % vm_array) + self.debug("Assigning virtual machines - %s to the created " + "Internal LB rule" % vm_array) int_lb_rule.assign(self.api_client, vms=vm_array) self.debug("Assigned VMs to the created Internal LB rule") return int_lb_rule # validate_Internal_LB_Rule - Validates the given Internal LB rule, - # matches the given Internal LB rule name and state against the list of Internal LB rules fetched - def validate_Internal_LB_Rule(self, int_lb_rule, state=None, vm_array=None): + # matches the given Internal LB rule name and state against the list of + # Internal LB rules fetched + def validate_Internal_LB_Rule(self, int_lb_rule, state=None, + vm_array=None): """Validates the Internal LB Rule""" self.debug("Check if the Internal LB Rule is created successfully ?") int_lb_rules = ApplicationLoadBalancer.list(self.api_client, @@ -82,38 +90,45 @@ class TestNuageInternalLb(nuageTestCase): "List Internal LB Rule should return a valid list" ) self.assertEqual(int_lb_rule.name, int_lb_rules[0].name, - "Name of the Internal LB Rule should match with the returned list data" + "Name of the Internal LB Rule should match with the " + "returned list data" ) if state: self.assertEqual(int_lb_rules[0].loadbalancerrule[0].state, state, "Internal LB Rule state should be '%s'" % state ) if vm_array: - instance_ids = [instance.id for instance in int_lb_rules[0].loadbalancerinstance] + instance_ids = [instance.id for instance in + int_lb_rules[0].loadbalancerinstance] for vm in vm_array: self.assertEqual(vm.id in instance_ids, True, - "Internal LB instance list should have the VM with ID - %s" % vm.id + "Internal LB instance list should have the " + "VM with ID - %s" % vm.id ) - self.debug("Internal LB Rule creation successfully validated for %s" % int_lb_rule.name) + self.debug("Internal LB Rule creation successfully validated for %s" % + int_lb_rule.name) # list_InternalLbVms - Lists deployed Internal LB VM instances def list_InternalLbVms(self, network_id=None, source_ip=None): - listInternalLoadBalancerVMsCmd = listInternalLoadBalancerVMs.listInternalLoadBalancerVMsCmd() + listInternalLoadBalancerVMsCmd = \ + listInternalLoadBalancerVMs.listInternalLoadBalancerVMsCmd() listInternalLoadBalancerVMsCmd.account = self.account.name listInternalLoadBalancerVMsCmd.domainid = self.account.domainid if network_id: listInternalLoadBalancerVMsCmd.networkid = network_id - internal_lb_vms = self.api_client.listInternalLoadBalancerVMs(listInternalLoadBalancerVMsCmd) + internal_lb_vms = self.api_client.listInternalLoadBalancerVMs( + listInternalLoadBalancerVMsCmd) if source_ip: return [internal_lb_vm for internal_lb_vm in internal_lb_vms if str(internal_lb_vm.guestipaddress) == source_ip] else: return internal_lb_vms - # get_InternalLbVm - Returns Internal LB VM instance for the given VPC network and source ip + # get_InternalLbVm - Returns Internal LB VM instance for the given VPC + # network and source ip def get_InternalLbVm(self, network, source_ip): - self.debug("Finding the InternalLbVm for network with ID - %s and source IP address - %s" % - (network.id, source_ip)) + self.debug("Finding the InternalLbVm for network with ID - %s and " + "source IP address - %s" % (network.id, source_ip)) internal_lb_vms = self.list_InternalLbVms(network.id, source_ip) self.assertEqual(isinstance(internal_lb_vms, list), True, "List InternalLbVms should return a valid list" @@ -121,7 +136,7 @@ class TestNuageInternalLb(nuageTestCase): return internal_lb_vms[0] # stop_InternalLbVm - Stops the given Internal LB VM instance - def stop_InternalLbVm(self, int_lb_vm, force=None): + def stop_InternalLbVm(self, int_lb_vm, force=False): self.debug("Stopping InternalLbVm with ID - %s" % int_lb_vm.id) cmd = stopInternalLoadBalancerVM.stopInternalLoadBalancerVMCmd() cmd.id = int_lb_vm.id @@ -136,8 +151,9 @@ class TestNuageInternalLb(nuageTestCase): cmd.id = int_lb_vm.id self.api_client.startInternalLoadBalancerVM(cmd) - # check_InternalLbVm_state - Checks if the Internal LB VM instance of the given VPC network and source IP is in the - # expected state form the list of fetched Internal LB VM instances + # check_InternalLbVm_state - Checks if the Internal LB VM instance of the + # given VPC network and source IP is in the expected state form the list of + # fetched Internal LB VM instances def check_InternalLbVm_state(self, network, source_ip, state=None): self.debug("Check if the InternalLbVm is in state - %s" % state) internal_lb_vms = self.list_InternalLbVms(network.id, source_ip) @@ -148,24 +164,99 @@ class TestNuageInternalLb(nuageTestCase): self.assertEqual(internal_lb_vms[0].state, state, "InternalLbVm is not in the expected state" ) - self.debug("InternalLbVm instance - %s is in the expected state - %s" % (internal_lb_vms[0].name, state)) + self.debug("InternalLbVm instance - %s is in the expected state - %s" % + (internal_lb_vms[0].name, state)) + + # verify_vpc_vm_ingress_traffic - Verifies ingress traffic to the given VM + # (SSH into VM) via a created Static NAT rule in the given VPC network + def verify_vpc_vm_ingress_traffic(self, vm, network, vpc): + self.debug("Verifying ingress traffic to the VM (SSH into VM) - %s " + "via a created Static NAT rule in the VPC network - %s" % + (vm, network)) + + # Creating Static NAT rule for the given VM in the given VPC network + self.debug("Creating Static NAT Rule...") + test_public_ip = self.acquire_PublicIPAddress(network, vpc) + self.validate_PublicIPAddress(test_public_ip, network) + self.create_StaticNatRule_For_VM(vm, test_public_ip, network) + self.validate_PublicIPAddress( + test_public_ip, network, static_nat=True, vm=vm) + + # VSD verification + self.verify_vsd_floating_ip(network, vm, test_public_ip.ipaddress, vpc) + + # Adding Network ACL rule in the given VPC network + self.debug("Creating Network ACL rule ...") + test_public_ssh_rule = self.create_NetworkAclRule( + self.test_data["ingress_rule"], network=network) + + # VSD verification + self.verify_vsd_firewall_rule(test_public_ssh_rule) + + # SSH into VM + self.debug("Verifying VM ingress traffic (SSH into VM)...") + self.ssh_into_VM(vm, test_public_ip) + + # Removing Network ACL rule in the given VPC network + self.debug("Removing the created Network ACL rule...") + test_public_ssh_rule.delete(self.api_client) + + # VSD verification + with self.assertRaises(Exception): + self.verify_vsd_firewall_rule(test_public_ssh_rule) + self.debug("Network ACL rule successfully deleted in VSD") + + # Deleting Static NAT Rule + self.debug("Deleting the created Static NAT Rule...") + self.delete_StaticNatRule_For_VM(test_public_ip) + with self.assertRaises(Exception): + self.validate_PublicIPAddress( + test_public_ip, network, static_nat=True, vm=vm) + self.debug("Static NAT Rule successfully deleted in CloudStack") + + # VSD verification + with self.assertRaises(Exception): + self.verify_vsd_floating_ip( + network, vm, test_public_ip.ipaddress, vpc=vpc) + self.debug("Floating IP successfully deleted in VSD") + + # Releasing acquired public IP + self.debug("Releasing the acquired public IP...") + test_public_ip.delete(self.api_client) + with self.assertRaises(Exception): + self.validate_PublicIPAddress(test_public_ip, network) + self.debug("Acquired public IP in the network successfully released " + "in CloudStack") + + self.debug("Successfully verified ingress traffic to the VM " + "(SSH into VM) - %s via a created Static NAT rule in the " + "VPC network - %s" % (vm, network)) # wget_from_vm_cmd - From within the given VM (ssh client), # fetches index.html file of web server running with the given public IP def wget_from_vm_cmd(self, ssh_client, ip_address, port): - cmd = "wget --no-cache -t 1 http://" + ip_address + ":" + str(port) + "/" - response = self.execute_cmd(ssh_client, cmd) - if "200 OK" not in response: - self.fail("Failed to wget from a VM with http server IP address - %s" % ip_address) - # Reading the wget file - cmd = "cat index.html" - wget_file = self.execute_cmd(ssh_client, cmd) - # Removing the wget file - cmd = "rm -r index.html" + wget_file = "" + cmd = "rm -rf index.html*" self.execute_cmd(ssh_client, cmd) + cmd = "wget --no-cache -t 1 http://" + ip_address + ":" + str(port) + \ + "/" + response = self.execute_cmd(ssh_client, cmd) + if "200 OK" in response: + self.debug("wget from a VM with http server IP address " + "- %s is successful" % ip_address) + # Reading the wget file + cmd = "cat index.html" + wget_file = self.execute_cmd(ssh_client, cmd) + # Removing the wget file + cmd = "rm -rf index.html*" + self.execute_cmd(ssh_client, cmd) + else: + self.debug("Failed to wget from a VM with http server IP address " + "- %s" % ip_address) return wget_file - # verify_lb_wget_file - Verifies that the given wget file (index.html) belongs to the given Internal LB rule + # verify_lb_wget_file - Verifies that the given wget file (index.html) + # belongs to the given Internal LB rule # assigned VMs (vm array) def verify_lb_wget_file(self, wget_file, vm_array): wget_server_ip = None @@ -174,93 +265,120 @@ class TestNuageInternalLb(nuageTestCase): if str(nic.ipaddress) in str(wget_file): wget_server_ip = str(nic.ipaddress) if wget_server_ip: - self.debug("Verified wget file from an Internal Load Balanced VM with http server IP address - %s" - % wget_server_ip) + self.debug("Verified wget file from Internal Load Balanced VMs - " + "%s" % vm_array) else: - self.fail("Did not wget file from the Internal Load Balanced VMs - %s" % vm_array) + self.fail("Failed to verify wget file from Internal Load Balanced " + "VMs - %s" % vm_array) return wget_server_ip - # validate_internallb_algorithm_traffic - Validates Internal LB algorithms by performing multiple wget traffic tests - # against the given Internal LB VM instance (source port) - def validate_internallb_algorithm_traffic(self, ssh_client, source_ip, port, vm_array, algorithm): + # validate_internallb_algorithm_traffic - Validates Internal LB algorithms + # by performing multiple wget traffic tests against the given Internal LB + # VM instance (source port) + def validate_internallb_algorithm_traffic(self, ssh_client, source_ip, + port, vm_array, algorithm): # Internal LB (wget) traffic tests iterations = 2 * len(vm_array) wget_files = [] for i in range(iterations): - wget_files.append(self.wget_from_vm_cmd(ssh_client, source_ip, port)) + wget_files.append( + self.wget_from_vm_cmd(ssh_client, source_ip, port)) # Verifying Internal LB (wget) traffic tests wget_servers_ip_list = [] for i in range(iterations): - wget_servers_ip_list.append(self.verify_lb_wget_file(wget_files[i], vm_array)) + wget_servers_ip_list.append( + self.verify_lb_wget_file(wget_files[i], vm_array)) # Validating Internal LB algorithm if algorithm == "roundrobin" or algorithm == "leastconn": for i in range(iterations): - if wget_servers_ip_list.count(wget_servers_ip_list[i]) is not 2: - self.fail("Round Robin Internal LB algorithm validation failed - %s" % wget_servers_ip_list) - self.debug("Successfully validated Round Robin/Least connections Internal LB algorithm - %s" % - wget_servers_ip_list) + if wget_servers_ip_list.count(wget_servers_ip_list[i]) \ + is not 2: + self.fail("Round Robin Internal LB algorithm validation " + "failed - %s" % wget_servers_ip_list) + self.debug("Successfully validated Round Robin/Least connections " + "Internal LB algorithm - %s" % wget_servers_ip_list) if algorithm == "source": for i in range(iterations): - if wget_servers_ip_list.count(wget_servers_ip_list[i]) is not iterations: - self.fail("Source Internal LB algorithm validation failed - %s" % wget_servers_ip_list) - self.debug("Successfully validated Source Internal LB algorithm - %s" % wget_servers_ip_list) + if wget_servers_ip_list.count(wget_servers_ip_list[i]) \ + is not iterations: + self.fail("Source Internal LB algorithm validation failed " + "- %s" % wget_servers_ip_list) + self.debug("Successfully validated Source Internal LB algorithm - " + "%s" % wget_servers_ip_list) @attr(tags=["advanced", "nuagevsp"], required_hardware="false") def test_01_nuage_internallb_vpc_Offering(self): - """Test Nuage VSP VPC Offering with different combinations of LB service providers + """Test Nuage VSP VPC Offering with different combinations of LB + service providers """ - # 1. Verify that the network service providers supported by Nuage VSP for VPC Internal LB functionality are all - # successfully created and enabled. - # 2. Create Nuage VSP VPC offering with LB service provider as "InternalLbVm", check if it is successfully - # created and enabled. Verify that the VPC creation succeeds with this VPC offering. - # 3. Create Nuage VSP VPC offering with LB service provider as "VpcVirtualRouter", check if it is successfully - # created and enabled. Verify that the VPC creation fails with this VPC offering as Nuage VSP does not - # support provider "VpcVirtualRouter" for service LB. - # 4. Create Nuage VSP VPC offering with LB service provider as "Netscaler", check if it is successfully - # created and enabled. Verify that the VPC creation fails with this VPC offering as Nuage VSP does not - # support provider "Netscaler" for service LB. + # 1. Verify that the network service providers supported by Nuage VSP + # for VPC Internal LB functionality are all successfully created and + # enabled. + # 2. Create Nuage VSP VPC offering with LB service provider as + # "InternalLbVm", check if it is successfully created and enabled. + # Verify that the VPC creation succeeds with this VPC offering. + # 3. Create Nuage VSP VPC offering with LB service provider as + # "VpcVirtualRouter", check if it is successfully created and + # enabled. Verify that the VPC creation fails with this VPC offering + # as Nuage VSP does not support provider "VpcVirtualRouter" for + # service LB. + # 4. Create Nuage VSP VPC offering with LB service provider as + # "Netscaler", check if it is successfully created and enabled. + # Verify that the VPC creation fails with this VPC offering as Nuage + # VSP does not support provider "Netscaler" for service LB. # 5. Delete all the created objects (cleanup). - self.debug("Validating network service providers supported by Nuage VSP for VPC Internal LB functionality") + self.debug("Validating network service providers supported by Nuage " + "VSP for VPC Internal LB functionality") providers = ["NuageVsp", "VpcVirtualRouter", "InternalLbVm"] for provider in providers: self.validate_NetworkServiceProvider(provider, state="Enabled") # Creating VPC offerings - self.debug("Creating Nuage VSP VPC offering with LB service provider as InternalLbVm...") - vpc_off_1 = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"]) + self.debug("Creating Nuage VSP VPC offering with LB service provider " + "as InternalLbVm...") + vpc_off_1 = self.create_VpcOffering( + self.test_data["nuagevsp"]["vpc_offering_lb"]) self.validate_VpcOffering(vpc_off_1, state="Enabled") - self.debug("Creating Nuage VSP VPC offering with LB service provider as VpcVirtualRouter...") - vpc_offering_lb = copy.deepcopy(self.test_data["nuagevsp"]["vpc_offering_lb"]) + self.debug("Creating Nuage VSP VPC offering with LB service provider " + "as VpcVirtualRouter...") + vpc_offering_lb = copy.deepcopy( + self.test_data["nuagevsp"]["vpc_offering_lb"]) vpc_offering_lb["serviceProviderList"]["Lb"] = "VpcVirtualRouter" vpc_off_2 = self.create_VpcOffering(vpc_offering_lb) self.validate_VpcOffering(vpc_off_2, state="Enabled") - self.debug("Creating Nuage VSP VPC offering with LB service provider as Netscaler...") + self.debug("Creating Nuage VSP VPC offering with LB service provider " + "as Netscaler...") vpc_offering_lb["serviceProviderList"]["Lb"] = "Netscaler" vpc_off_3 = self.create_VpcOffering(vpc_offering_lb) self.validate_VpcOffering(vpc_off_3, state="Enabled") self.debug("Creating Nuage VSP VPC offering without LB service...") - vpc_off_4 = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering"]) + vpc_off_4 = self.create_VpcOffering( + self.test_data["nuagevsp"]["vpc_offering"]) self.validate_VpcOffering(vpc_off_4, state="Enabled") # Creating VPCs - self.debug("Creating a VPC with LB service provider as InternalLbVm...") + self.debug("Creating a VPC with LB service provider as " + "InternalLbVm...") vpc_1 = self.create_Vpc(vpc_off_1, cidr='10.1.0.0/16') self.validate_Vpc(vpc_1, state="Enabled") - self.debug("Creating a VPC with LB service provider as VpcVirtualRouter...") + self.debug("Creating a VPC with LB service provider as " + "VpcVirtualRouter...") with self.assertRaises(Exception): self.create_Vpc(vpc_off_2, cidr='10.1.0.0/16') - self.debug("Nuage VSP does not support provider VpcVirtualRouter for service LB for VPCs") + self.debug("Nuage VSP does not support provider VpcVirtualRouter for " + "service LB for VPCs") self.debug("Creating a VPC with LB service provider as Netscaler...") with self.assertRaises(Exception): self.create_Vpc(vpc_off_3, cidr='10.1.0.0/16') - self.debug("Nuage VSP does not support provider Netscaler for service LB for VPCs") + self.debug("Nuage VSP does not support provider Netscaler for service " + "LB for VPCs") self.debug("Creating a VPC without LB service...") vpc_2 = self.create_Vpc(vpc_off_4, cidr='10.1.0.0/16') @@ -268,32 +386,43 @@ class TestNuageInternalLb(nuageTestCase): @attr(tags=["advanced", "nuagevsp"], required_hardware="false") def test_02_nuage_internallb_vpc_network_offering(self): - """Test Nuage VSP VPC Network Offering with and without Internal LB service + """Test Nuage VSP VPC Network Offering with and without Internal LB + service """ - # 1. Create Nuage VSP VPC Network offering with LB Service Provider as "InternalLbVm" and LB Service Capability - # "lbSchemes" as "internal", check if it is successfully created and enabled. Verify that the VPC network - # creation succeeds with this Network offering. - # 2. Recreate above Network offering with ispersistent False, check if it is successfully created and enabled. - # Verify that the VPC network creation fails with this Network offering as Nuage VSP does not support non - # persistent VPC networks. - # 3. Recreate above Network offering with conserve mode On, check if the network offering creation failed - # as only networks with conserve mode Off can belong to VPC. - # 4. Create Nuage VSP VPC Network offering with LB Service Provider as "InternalLbVm" and LB Service Capability - # "lbSchemes" as "public", check if the network offering creation failed as "public" lbScheme is not - # supported for LB Service Provider "InternalLbVm". - # 5. Create Nuage VSP VPC Network offering without Internal LB Service, check if it is successfully created and - # enabled. Verify that the VPC network creation succeeds with this Network offering. - # 6. Recreate above Network offering with ispersistent False, check if it is successfully created and enabled. - # Verify that the VPC network creation fails with this Network offering as Nuage VSP does not support non - # persistent VPC networks. - # 7. Recreate the above Network offering with conserve mode On, check if the network offering creation failed - # as only networks with conserve mode Off can belong to VPC. + # 1. Create Nuage VSP VPC Network offering with LB Service Provider as + # "InternalLbVm" and LB Service Capability "lbSchemes" as + # "internal", check if it is successfully created and enabled. + # Verify that the VPC network creation succeeds with this Network + # offering. + # 2. Recreate above Network offering with ispersistent False, check if + # it is successfully created and enabled.Verify that the VPC network + # creation fails with this Network offering as Nuage VSP does not + # support non persistent VPC networks. + # 3. Recreate above Network offering with conserve mode On, check if + # the network offering creation failed as only networks with + # conserve mode Off can belong to VPC. + # 4. Create Nuage VSP VPC Network offering with LB Service Provider as + # "InternalLbVm" and LB Service Capability "lbSchemes" as "public", + # check if the network offering creation failed as "public" lbScheme + # is not supported for LB Service Provider "InternalLbVm". + # 5. Create Nuage VSP VPC Network offering without Internal LB Service, + # check if it is successfully created and enabled. Verify that the + # VPC network creation succeeds with this Network offering. + # 6. Recreate above Network offering with ispersistent False, check if + # it is successfully created and enabled. Verify that the VPC + # network creation fails with this Network offering as Nuage VSP + # does not support non persistent VPC networks. + # 7. Recreate the above Network offering with conserve mode On, check + # if the network offering creation failed as only networks with + # conserve mode Off can belong to VPC. # 8. Delete all the created objects (cleanup). # Creating VPC offering - self.debug("Creating Nuage VSP VPC offering with Internal LB service...") - vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"]) + self.debug("Creating Nuage VSP VPC offering with Internal LB " + "service...") + vpc_off = self.create_VpcOffering( + self.test_data["nuagevsp"]["vpc_offering_lb"]) self.validate_VpcOffering(vpc_off, state="Enabled") # Creating VPC @@ -302,50 +431,71 @@ class TestNuageInternalLb(nuageTestCase): self.validate_Vpc(vpc, state="Enabled") # Creating network offerings - self.debug("Creating Nuage VSP VPC Network offering with LB Service Provider as InternalLbVm and LB Service " - "Capability lbSchemes as internal...") - net_off_1 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"]) + self.debug("Creating Nuage VSP VPC Network offering with LB Service " + "Provider as InternalLbVm and LB Service Capability " + "lbSchemes as internal...") + net_off_1 = self.create_NetworkOffering( + self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"]) self.validate_NetworkOffering(net_off_1, state="Enabled") - self.debug("Recreating above Network offering with ispersistent False...") - vpc_net_off_lb_non_persistent = copy.deepcopy(self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"]) + self.debug("Recreating above Network offering with ispersistent " + "False...") + vpc_net_off_lb_non_persistent = copy.deepcopy( + self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"]) vpc_net_off_lb_non_persistent["ispersistent"] = "False" net_off_2 = self.create_NetworkOffering(vpc_net_off_lb_non_persistent) self.validate_NetworkOffering(net_off_2, state="Enabled") - self.debug("Recreating above Network offering with conserve mode On...") + self.debug("Recreating above Network offering with conserve mode " + "On...") with self.assertRaises(Exception): - self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"], - conserve_mode=True) - self.debug("Network offering creation failed as only networks with conserve mode Off can belong to VPC") + self.create_NetworkOffering( + self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"], + conserve_mode=True) + self.debug("Network offering creation failed as only networks with " + "conserve mode Off can belong to VPC") - self.debug("Creating Nuage VSP VPC Network offering with LB Service Provider as InternalLbVm and LB Service " - "Capability lbSchemes as public...") - network_offering_internal_lb = copy.deepcopy(self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"]) - network_offering_internal_lb["serviceCapabilityList"]["Lb"]["lbSchemes"] = "public" + self.debug("Creating Nuage VSP VPC Network offering with LB Service " + "Provider as InternalLbVm and LB Service Capability " + "lbSchemes as public...") + network_offering_internal_lb = copy.deepcopy( + self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"]) + service_list = network_offering_internal_lb["serviceCapabilityList"] + service_list["Lb"]["lbSchemes"] = "public" + network_offering_internal_lb["serviceCapabilityList"] = service_list with self.assertRaises(Exception): self.create_NetworkOffering(network_offering_internal_lb) - self.debug("Network offering creation failed as public lbScheme is not supported for LB Service Provider " - "InternalLbVm") + self.debug("Network offering creation failed as public lbScheme is " + "not supported for LB Service Provider InternalLbVm") - self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...") - net_off_3 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"]) + self.debug("Creating Nuage VSP VPC Network offering without Internal " + "LB service...") + net_off_3 = self.create_NetworkOffering( + self.test_data["nuagevsp"]["vpc_network_offering"]) self.validate_NetworkOffering(net_off_3, state="Enabled") - self.debug("Recreating above Network offering with ispersistent False...") - vpc_net_off_non_persistent = copy.deepcopy(self.test_data["nuagevsp"]["vpc_network_offering"]) + self.debug("Recreating above Network offering with ispersistent " + "False...") + vpc_net_off_non_persistent = copy.deepcopy( + self.test_data["nuagevsp"]["vpc_network_offering"]) vpc_net_off_non_persistent["ispersistent"] = "False" net_off_4 = self.create_NetworkOffering(vpc_net_off_non_persistent) self.validate_NetworkOffering(net_off_4, state="Enabled") - self.debug("Recreating above Network offering with conserve mode On...") + self.debug("Recreating above Network offering with conserve mode " + "On...") with self.assertRaises(Exception): - self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"], conserve_mode=True) - self.debug("Network offering creation failed as only networks with conserve mode Off can belong to VPC") + self.create_NetworkOffering( + self.test_data["nuagevsp"]["vpc_network_offering"], + conserve_mode=True) + self.debug("Network offering creation failed as only networks with " + "conserve mode Off can belong to VPC") # Creating VPC networks in the VPC - self.debug("Creating a persistent VPC network with Internal LB service...") - internal_tier = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc) + self.debug("Creating a persistent VPC network with Internal LB " + "service...") + internal_tier = self.create_Network( + net_off_1, gateway='10.1.1.1', vpc=vpc) self.validate_Network(internal_tier, state="Implemented") vr = self.get_Router(internal_tier) self.check_Router_state(vr, state="Running") @@ -354,13 +504,16 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_network(self.domain.id, internal_tier, vpc) self.verify_vsd_router(vr) - self.debug("Creating a non persistent VPC network with Internal LB service...") + self.debug("Creating a non persistent VPC network with Internal LB " + "service...") with self.assertRaises(Exception): self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc) self.debug("Nuage VSP does not support non persistent VPC networks") - self.debug("Creating a persistent VPC network without Internal LB service...") - public_tier = self.create_Network(net_off_3, gateway='10.1.3.1', vpc=vpc) + self.debug("Creating a persistent VPC network without Internal LB " + "service...") + public_tier = self.create_Network( + net_off_3, gateway='10.1.3.1', vpc=vpc) self.validate_Network(public_tier, state="Implemented") vr = self.get_Router(public_tier) self.check_Router_state(vr, state="Running") @@ -369,7 +522,8 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_network(self.domain.id, public_tier, vpc) self.verify_vsd_router(vr) - self.debug("Creating a non persistent VPC network without Internal LB service...") + self.debug("Creating a non persistent VPC network without Internal LB " + "service...") with self.assertRaises(Exception): self.create_Network(net_off_4, gateway='10.1.4.1', vpc=vpc) self.debug("Nuage VSP does not support non persistent VPC networks") @@ -379,36 +533,52 @@ class TestNuageInternalLb(nuageTestCase): """Test Nuage VSP VPC Networks with and without Internal LB service """ - # 1. Create Nuage VSP VPC offering with Internal LB service, check if it is successfully created and enabled. - # 2. Create Nuage VSP VPC offering without Internal LB service, check if it is successfully created and enabled. - # 3. Create a VPC "vpc_1" with Internal LB service, check if it is successfully created and enabled. - # 4. Create a VPC "vpc_2" without Internal LB service, check if it is successfully created and enabled. - # 5. Create Nuage VSP VPC Network offering with Internal LB service, check if it is successfully created and - # enabled. - # 6. Create Nuage VSP VPC Network offering without Internal LB service, check if it is successfully created and - # enabled. - # 7. Create a VPC network in vpc_1 with Internal LB service and spawn a VM, check if the tier is added to the - # VPC VR, and the VM is deployed successfully in the tier. - # 8. Create one more VPC network in vpc_1 with Internal LB service and spawn a VM, check if the tier is added - # to the VPC VR, and the VM is deployed successfully in the tier. - # 9. Create a VPC network in vpc_2 with Internal LB service, check if the tier creation failed. - # 10. Create a VPC network in vpc_1 without Internal LB service and spawn a VM, check if the tier is added to - # the VPC VR, and the VM is deployed successfully in the tier. - # 11. Create a VPC network in vpc_2 without Internal LB service and spawn a VM, check if the tier is added to - # the VPC VR, and the VM is deployed successfully in the tier. - # 12. Upgrade the VPC network with Internal LB service to one with no Internal LB service and vice-versa, check - # if the VPC Network offering upgrade passed in both directions. - # 13. Delete the VPC network with Internal LB service, check if the tier is successfully deleted. - # 14. Recreate the VPC network with Internal LB service, check if the tier is successfully re-created. + # 1. Create Nuage VSP VPC offering with Internal LB service, check if + # it is successfully created and enabled. + # 2. Create Nuage VSP VPC offering without Internal LB service, check + # if it is successfully created and enabled. + # 3. Create a VPC "vpc_1" with Internal LB service, check if it is + # successfully created and enabled. + # 4. Create a VPC "vpc_2" without Internal LB service, check if it is + # successfully created and enabled. + # 5. Create Nuage VSP VPC Network offering with Internal LB service, + # check if it is successfully created and enabled. + # 6. Create Nuage VSP VPC Network offering without Internal LB service, + # check if it is successfully created and enabled. + # 7. Create a VPC network in vpc_1 with Internal LB service and spawn a + # VM, check if the tier is added to the VPC VR, and the VM is + # deployed successfully in the tier. + # 8. Create one more VPC network in vpc_1 with Internal LB service and + # spawn a VM, check if the tier is added to the VPC VR, and the VM + # is deployed successfully in the tier. + # 9. Create a VPC network in vpc_2 with Internal LB service, check if + # the tier creation failed. + # 10. Create a VPC network in vpc_1 without Internal LB service and + # spawn a VM, check if the tier is added to the VPC VR, and the VM + # is deployed successfully in the tier. + # 11. Create a VPC network in vpc_2 without Internal LB service and + # spawn a VM, check if the tier is added to the VPC VR, and the VM + # is deployed successfully in the tier. + # 12. Upgrade the VPC network with Internal LB service to one with no + # Internal LB service and vice-versa, check if the VPC Network + # offering upgrade passed in both directions. + # 13. Delete the VPC network with Internal LB service, check if the + # tier is successfully deleted. + # 14. Recreate the VPC network with Internal LB service, check if the + # tier is successfully re-created. # 15. Delete all the created objects (cleanup). # Creating VPC offerings - self.debug("Creating Nuage VSP VPC offering with Internal LB service...") - vpc_off_1 = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"]) + self.debug("Creating Nuage VSP VPC offering with Internal LB " + "service...") + vpc_off_1 = self.create_VpcOffering( + self.test_data["nuagevsp"]["vpc_offering_lb"]) self.validate_VpcOffering(vpc_off_1, state="Enabled") - self.debug("Creating Nuage VSP VPC offering without Internal LB service...") - vpc_off_2 = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering"]) + self.debug("Creating Nuage VSP VPC offering without Internal LB " + "service...") + vpc_off_2 = self.create_VpcOffering( + self.test_data["nuagevsp"]["vpc_offering"]) self.validate_VpcOffering(vpc_off_2, state="Enabled") # Creating VPCs @@ -421,18 +591,23 @@ class TestNuageInternalLb(nuageTestCase): self.validate_Vpc(vpc_2, state="Enabled") # Creating network offerings - self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...") + self.debug("Creating Nuage VSP VPC Network offering with Internal LB " + "service...") net_off_1 = self.create_NetworkOffering( self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"]) self.validate_NetworkOffering(net_off_1, state="Enabled") - self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...") - net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"]) + self.debug("Creating Nuage VSP VPC Network offering without Internal " + "LB service...") + net_off_2 = self.create_NetworkOffering( + self.test_data["nuagevsp"]["vpc_network_offering"]) self.validate_NetworkOffering(net_off_2, state="Enabled") # Creating VPC networks in VPCs, and deploying VMs - self.debug("Creating a VPC network in vpc_1 with Internal LB service...") - internal_tier_1 = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc_1) + self.debug("Creating a VPC network in vpc_1 with Internal LB " + "service...") + internal_tier_1 = self.create_Network( + net_off_1, gateway='10.1.1.1', vpc=vpc_1) self.validate_Network(internal_tier_1, state="Implemented") vr_1 = self.get_Router(internal_tier_1) self.check_Router_state(vr_1, state="Running") @@ -446,8 +621,10 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_router(vr_1) self.verify_vsd_vm(internal_vm_1) - self.debug("Creating one more VPC network in vpc_1 with Internal LB service...") - internal_tier_2 = self.create_Network(net_off_1, gateway='10.1.2.1', vpc=vpc_1) + self.debug("Creating one more VPC network in vpc_1 with Internal LB " + "service...") + internal_tier_2 = self.create_Network( + net_off_1, gateway='10.1.2.1', vpc=vpc_1) self.validate_Network(internal_tier_2, state="Implemented") vr_1 = self.get_Router(internal_tier_2) self.check_Router_state(vr_1, state="Running") @@ -461,13 +638,17 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_router(vr_1) self.verify_vsd_vm(internal_vm_2) - self.debug("Creating a VPC network in vpc_2 with Internal LB service...") + self.debug("Creating a VPC network in vpc_2 with Internal LB " + "service...") with self.assertRaises(Exception): self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc_2) - self.debug("VPC Network creation failed as vpc_2 does not support Internal Lb service") + self.debug("VPC Network creation failed as vpc_2 does not support " + "Internal Lb service") - self.debug("Creating a VPC network in vpc_1 without Internal LB service...") - public_tier_1 = self.create_Network(net_off_2, gateway='10.1.3.1', vpc=vpc_1) + self.debug("Creating a VPC network in vpc_1 without Internal LB " + "service...") + public_tier_1 = self.create_Network( + net_off_2, gateway='10.1.3.1', vpc=vpc_1) self.validate_Network(public_tier_1, state="Implemented") vr_1 = self.get_Router(public_tier_1) self.check_Router_state(vr_1, state="Running") @@ -481,8 +662,10 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_router(vr_1) self.verify_vsd_vm(public_vm_1) - self.debug("Creating a VPC network in vpc_2 without Internal LB service...") - public_tier_2 = self.create_Network(net_off_2, gateway='10.1.1.1', vpc=vpc_2) + self.debug("Creating a VPC network in vpc_2 without Internal LB " + "service...") + public_tier_2 = self.create_Network( + net_off_2, gateway='10.1.1.1', vpc=vpc_2) self.validate_Network(public_tier_2, state="Implemented") vr_2 = self.get_Router(public_tier_2) self.check_Router_state(vr_2, state="Running") @@ -497,7 +680,8 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(public_vm_2) # Upgrading a VPC network - self.debug("Upgrading a VPC network with Internal LB Service to one without Internal LB Service...") + self.debug("Upgrading a VPC network with Internal LB Service to one " + "without Internal LB Service...") self.upgrade_Network(net_off_2, internal_tier_2) self.validate_Network(internal_tier_2, state="Implemented") vr_1 = self.get_Router(internal_tier_2) @@ -509,7 +693,8 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_router(vr_1) self.verify_vsd_vm(internal_vm_2) - self.debug("Upgrading a VPC network without Internal LB Service to one with Internal LB Service...") + self.debug("Upgrading a VPC network without Internal LB Service to " + "one with Internal LB Service...") self.upgrade_Network(net_off_1, internal_tier_2) self.validate_Network(internal_tier_2, state="Implemented") vr_1 = self.get_Router(internal_tier_2) @@ -535,7 +720,8 @@ class TestNuageInternalLb(nuageTestCase): self.debug("VPC network successfully deleted in VSD") self.debug("Recreating a VPC network with Internal LB Service...") - internal_tier_2 = self.create_Network(net_off_1, gateway='10.1.2.1', vpc=vpc_1) + internal_tier_2 = self.create_Network( + net_off_1, gateway='10.1.2.1', vpc=vpc_1) internal_vm_2 = self.create_VM(internal_tier_2) self.validate_Network(internal_tier_2, state="Implemented") vr_1 = self.get_Router(internal_tier_2) @@ -549,38 +735,50 @@ class TestNuageInternalLb(nuageTestCase): @attr(tags=["advanced", "nuagevsp"], required_hardware="false") def test_04_nuage_internallb_rules(self): - """Test Nuage VSP VPC Internal LB functionality with different combinations of Internal LB rules + """Test Nuage VSP VPC Internal LB functionality with different + combinations of Internal LB rules """ - # 1. Create an Internal LB Rule with source IP Address specified, check if the Internal LB Rule is successfully - # created. - # 2. Create an Internal LB Rule without source IP Address specified, check if the Internal LB Rule is - # successfully created. - # 3. Create an Internal LB Rule when the specified source IP Address is outside the VPC network (tier) CIDR - # range, check if the Internal LB Rule creation failed as the requested source IP is not in the network's + # 1. Create an Internal LB Rule with source IP Address specified, check + # if the Internal LB Rule is successfully created. + # 2. Create an Internal LB Rule without source IP Address specified, + # check if the Internal LB Rule is successfully created. + # 3. Create an Internal LB Rule when the specified source IP Address is + # outside the VPC network (tier) CIDR range, check if the Internal + # LB Rule creation failed as the requested source IP is not in the + # network's CIDR subnet. + # 4. Create an Internal LB Rule when the specified source IP Address is + # outside the VPC super CIDR range, check if the Internal LB Rule + # creation failed as the requested source IP is not in the network's # CIDR subnet. - # 4. Create an Internal LB Rule when the specified source IP Address is outside the VPC super CIDR range, - # check if the Internal LB Rule creation failed as the requested source IP is not in the network's CIDR - # subnet. - # 5. Create an Internal LB Rule in the tier with LB service provider as VpcInlineLbVm, check if the Internal LB - # Rule creation failed as Scheme Internal is not supported by this network offering. - # 6. Create multiple Internal LB Rules using different Load Balancing source IP Addresses, check if the Internal + # 5. Create an Internal LB Rule in the tier with LB service provider as + # VpcInlineLbVm, check if the Internal LB Rule creation failed as + # Scheme Internal is not supported by this network offering. + # 6. Create multiple Internal LB Rules using different Load Balancing + # source IP Addresses, check if the Internal LB Rules are + # successfully created. + # 7. Create multiple Internal LB Rules with different ports but using + # the same Load Balancing source IP Address, check if the Internal # LB Rules are successfully created. - # 7. Create multiple Internal LB Rules with different ports but using the same Load Balancing source IP Address, - # check if the Internal LB Rules are successfully created. - # 8. Create multiple Internal LB Rules with same ports and using the same Load Balancing source IP Address, - # check if the second Internal LB Rule creation failed as it conflicts with the first Internal LB rule. - # 9. Attach a VM to the above created Internal LB Rules, check if the VM is successfully attached to the - # Internal LB Rules. - # 10. Verify the InternalLbVm deployment after successfully creating the first Internal LB Rule and attaching a - # VM to it. - # 11. Verify the failure of attaching a VM from a different tier to an Internal LB Rule created on a tier. - # 12. Delete the above created Internal LB Rules, check if the Internal LB Rules are successfully deleted. + # 8. Create multiple Internal LB Rules with same ports and using the + # same Load Balancing source IP Address, check if the second + # Internal LB Rule creation failed as it conflicts with the first + # Internal LB rule. + # 9. Attach a VM to the above created Internal LB Rules, check if the + # VM is successfully attached to the Internal LB Rules. + # 10. Verify the InternalLbVm deployment after successfully creating + # the first Internal LB Rule and attaching a VM to it. + # 11. Verify the failure of attaching a VM from a different tier to an + # Internal LB Rule created on a tier. + # 12. Delete the above created Internal LB Rules, check if the Internal + # LB Rules are successfully deleted. # 13. Delete all the created objects (cleanup). # Creating a VPC offering - self.debug("Creating Nuage VSP VPC offering with Internal LB service...") - vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"]) + self.debug("Creating Nuage VSP VPC offering with Internal LB " + "service...") + vpc_off = self.create_VpcOffering( + self.test_data["nuagevsp"]["vpc_offering_lb"]) self.validate_VpcOffering(vpc_off, state="Enabled") # Creating a VPC @@ -589,18 +787,22 @@ class TestNuageInternalLb(nuageTestCase): self.validate_Vpc(vpc, state="Enabled") # Creating network offerings - self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...") + self.debug("Creating Nuage VSP VPC Network offering with Internal LB " + "service...") net_off_1 = self.create_NetworkOffering( self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"]) self.validate_NetworkOffering(net_off_1, state="Enabled") - self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...") - net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"]) + self.debug("Creating Nuage VSP VPC Network offering without Internal " + "LB service...") + net_off_2 = self.create_NetworkOffering( + self.test_data["nuagevsp"]["vpc_network_offering"]) self.validate_NetworkOffering(net_off_2, state="Enabled") # Creating VPC networks in the VPC, and deploying VMs self.debug("Creating a VPC network with Internal LB service...") - internal_tier = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc) + internal_tier = self.create_Network( + net_off_1, gateway='10.1.1.1', vpc=vpc) self.validate_Network(internal_tier, state="Implemented") vr = self.get_Router(internal_tier) self.check_Router_state(vr, state="Running") @@ -615,7 +817,8 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm) self.debug("Creating a VPC network without Internal LB service...") - public_tier = self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc) + public_tier = self.create_Network( + net_off_2, gateway='10.1.2.1', vpc=vpc) self.validate_Network(public_tier, state="Implemented") vr = self.get_Router(public_tier) self.check_Router_state(vr, state="Running") @@ -630,14 +833,17 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(public_vm) # Creating Internal LB Rules - self.debug("Creating an Internal LB Rule without source IP Address specified...") + self.debug("Creating an Internal LB Rule without source IP Address " + "specified...") int_lb_rule = self.create_Internal_LB_Rule(internal_tier) self.validate_Internal_LB_Rule(int_lb_rule, state="Add") # Validating InternalLbVm deployment with self.assertRaises(Exception): - self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress) - self.debug("InternalLbVm is not deployed in the network as there are no VMs assigned to this Internal LB Rule") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule.sourceipaddress) + self.debug("InternalLbVm is not deployed in the network as there are " + "no VMs assigned to this Internal LB Rule") self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name) int_lb_rule.delete(self.api_client) @@ -647,14 +853,18 @@ class TestNuageInternalLb(nuageTestCase): free_source_ip = int_lb_rule.sourceipaddress - self.debug("Creating an Internal LB Rule with source IP Address specified...") - int_lb_rule = self.create_Internal_LB_Rule(internal_tier, source_ip=free_source_ip) + self.debug("Creating an Internal LB Rule with source IP Address " + "specified...") + int_lb_rule = self.create_Internal_LB_Rule( + internal_tier, source_ip=free_source_ip) self.validate_Internal_LB_Rule(int_lb_rule, state="Add") # Validating InternalLbVm deployment with self.assertRaises(Exception): - self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress) - self.debug("InternalLbVm is not deployed in the network as there are no VMs assigned to this Internal LB Rule") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule.sourceipaddress) + self.debug("InternalLbVm is not deployed in the network as there are " + "no VMs assigned to this Internal LB Rule") self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name) int_lb_rule.delete(self.api_client) @@ -662,58 +872,79 @@ class TestNuageInternalLb(nuageTestCase): self.validate_Internal_LB_Rule(int_lb_rule) self.debug("Internal LB Rule successfully deleted in CloudStack") - self.debug("Creating an Internal LB Rule when the specified source IP Address is outside the VPC network CIDR " - "range...") + self.debug("Creating an Internal LB Rule when the specified source IP " + "Address is outside the VPC network CIDR range...") with self.assertRaises(Exception): self.create_Internal_LB_Rule(internal_tier, source_ip="10.1.1.256") - self.debug("Internal LB Rule creation failed as the requested IP is not in the network's CIDR subnet") + self.debug("Internal LB Rule creation failed as the requested IP is " + "not in the network's CIDR subnet") - self.debug("Creating an Internal LB Rule when the specified source IP Address is outside the VPC super CIDR " - "range...") + self.debug("Creating an Internal LB Rule when the specified source IP " + "Address is outside the VPC super CIDR range...") with self.assertRaises(Exception): self.create_Internal_LB_Rule(internal_tier, source_ip="10.2.1.256") - self.debug("Internal LB Rule creation failed as the requested IP is not in the network's CIDR subnet") + self.debug("Internal LB Rule creation failed as the requested IP is " + "not in the network's CIDR subnet") - self.debug("Creating an Internal LB Rule in a VPC network without Internal Lb service...") + self.debug("Creating an Internal LB Rule in a VPC network without " + "Internal Lb service...") with self.assertRaises(Exception): self.create_Internal_LB_Rule(public_tier) - self.debug("Internal LB Rule creation failed as Scheme Internal is not supported by this network offering") + self.debug("Internal LB Rule creation failed as Scheme Internal is " + "not supported by this network offering") - self.debug("Creating multiple Internal LB Rules using different Load Balancing source IP Addresses...") - int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm]) - self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm]) - int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm]) - self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm]) + self.debug("Creating multiple Internal LB Rules using different Load " + "Balancing source IP Addresses...") + int_lb_rule_1 = self.create_Internal_LB_Rule( + internal_tier, vm_array=[internal_vm]) + self.validate_Internal_LB_Rule( + int_lb_rule_1, state="Active", vm_array=[internal_vm]) + int_lb_rule_2 = self.create_Internal_LB_Rule( + internal_tier, vm_array=[internal_vm]) + self.validate_Internal_LB_Rule( + int_lb_rule_2, state="Active", vm_array=[internal_vm]) # Validating InternalLbVms deployment and state - int_lb_vm_1 = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress) - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") - int_lb_vm_2 = self.get_InternalLbVm(internal_tier, int_lb_rule_2.sourceipaddress) - self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress, state="Running") + int_lb_vm_1 = self.get_InternalLbVm( + internal_tier, int_lb_rule_1.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + int_lb_vm_2 = self.get_InternalLbVm( + internal_tier, int_lb_rule_2.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_2.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm_1) self.verify_vsd_lb_device(int_lb_vm_2) - self.debug('Removing VMs from the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name)) + self.debug('Removing VMs from the Internal LB Rules - %s, %s' % + (int_lb_rule_1.name, int_lb_rule_2.name)) int_lb_rule_1.remove(self.api_client, vms=[internal_vm]) with self.assertRaises(Exception): - self.validate_Internal_LB_Rule(int_lb_rule_1, vm_array=[internal_vm]) - self.debug("VMs successfully removed from the Internal LB Rule in CloudStack") + self.validate_Internal_LB_Rule( + int_lb_rule_1, vm_array=[internal_vm]) + self.debug("VMs successfully removed from the Internal LB Rule in " + "CloudStack") int_lb_rule_2.remove(self.api_client, vms=[internal_vm]) with self.assertRaises(Exception): - self.validate_Internal_LB_Rule(int_lb_rule_2, vm_array=[internal_vm]) - self.debug("VMs successfully removed from the Internal LB Rule in CloudStack") + self.validate_Internal_LB_Rule( + int_lb_rule_2, vm_array=[internal_vm]) + self.debug("VMs successfully removed from the Internal LB Rule in " + "CloudStack") # Validating InternalLbVms state - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") - self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_2.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm_1) self.verify_vsd_lb_device(int_lb_vm_2) - self.debug('Deleting the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name)) + self.debug('Deleting the Internal LB Rules - %s, %s' % + (int_lb_rule_1.name, int_lb_rule_2.name)) int_lb_rule_1.delete(self.api_client) with self.assertRaises(Exception): self.validate_Internal_LB_Rule(int_lb_rule_1) @@ -725,10 +956,12 @@ class TestNuageInternalLb(nuageTestCase): # Validating InternalLbVms un-deployment with self.assertRaises(Exception): - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress) self.debug("InternalLbVm successfully destroyed in CloudStack") with self.assertRaises(Exception): - self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_2.sourceipaddress) self.debug("InternalLbVm successfully destroyed in CloudStack") # VSD Verification @@ -739,41 +972,52 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_lb_device(int_lb_vm_2) self.debug("InternalLbVm successfully destroyed in VSD") - self.debug("Creating multiple Internal LB Rules with different ports but using the same Load Balancing source " - "IP Address...") - int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm]) - self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm]) - int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier, - vm_array=[internal_vm], - services=self.test_data["internal_lbrule_http"], - source_ip=int_lb_rule_1.sourceipaddress - ) - self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm]) + self.debug("Creating multiple Internal LB Rules with different ports " + "but using the same Load Balancing source IP Address...") + int_lb_rule_1 = self.create_Internal_LB_Rule( + internal_tier, vm_array=[internal_vm]) + self.validate_Internal_LB_Rule( + int_lb_rule_1, state="Active", vm_array=[internal_vm]) + int_lb_rule_2 = self.create_Internal_LB_Rule( + internal_tier, vm_array=[internal_vm], + services=self.test_data["internal_lbrule_http"], + source_ip=int_lb_rule_1.sourceipaddress) + self.validate_Internal_LB_Rule( + int_lb_rule_2, state="Active", vm_array=[internal_vm]) # Validating InternalLbVm deployment and state - int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress) - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + int_lb_vm = self.get_InternalLbVm( + internal_tier, int_lb_rule_1.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) - self.debug('Removing VMs from the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name)) + self.debug('Removing VMs from the Internal LB Rules - %s, %s' % + (int_lb_rule_1.name, int_lb_rule_2.name)) int_lb_rule_1.remove(self.api_client, vms=[internal_vm]) with self.assertRaises(Exception): - self.validate_Internal_LB_Rule(int_lb_rule_1, vm_array=[internal_vm]) - self.debug("VMs successfully removed from the Internal LB Rule in CloudStack") + self.validate_Internal_LB_Rule( + int_lb_rule_1, vm_array=[internal_vm]) + self.debug("VMs successfully removed from the Internal LB Rule in " + "CloudStack") int_lb_rule_2.remove(self.api_client, vms=[internal_vm]) with self.assertRaises(Exception): - self.validate_Internal_LB_Rule(int_lb_rule_2, vm_array=[internal_vm]) - self.debug("VMs successfully removed from the Internal LB Rule in CloudStack") + self.validate_Internal_LB_Rule( + int_lb_rule_2, vm_array=[internal_vm]) + self.debug("VMs successfully removed from the Internal LB Rule in " + "CloudStack") # Validating InternalLbVm state - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) - self.debug('Deleting the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name)) + self.debug('Deleting the Internal LB Rules - %s, %s' % + (int_lb_rule_1.name, int_lb_rule_2.name)) int_lb_rule_1.delete(self.api_client) with self.assertRaises(Exception): self.validate_Internal_LB_Rule(int_lb_rule_1) @@ -785,7 +1029,8 @@ class TestNuageInternalLb(nuageTestCase): # Validating InternalLbVm un-deployment with self.assertRaises(Exception): - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress) self.debug("InternalLbVm successfully destroyed in CloudStack") # VSD Verification @@ -793,29 +1038,39 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_lb_device(int_lb_vm) self.debug("InternalLbVm successfully destroyed in VSD") - self.debug("Creating multiple Internal LB Rules with same ports and using the same Load Balancing source IP " - "Address...") - int_lb_rule = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm]) - self.validate_Internal_LB_Rule(int_lb_rule, state="Active", vm_array=[internal_vm]) + self.debug("Creating multiple Internal LB Rules with same ports and " + "using the same Load Balancing source IP Address...") + int_lb_rule = self.create_Internal_LB_Rule( + internal_tier, vm_array=[internal_vm]) + self.validate_Internal_LB_Rule( + int_lb_rule, state="Active", vm_array=[internal_vm]) with self.assertRaises(Exception): - self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm], source_ip=int_lb_rule.sourceipaddress) - self.debug("Internal LB Rule creation failed as it conflicts with the existing rule") + self.create_Internal_LB_Rule( + internal_tier, vm_array=[internal_vm], + source_ip=int_lb_rule.sourceipaddress) + self.debug("Internal LB Rule creation failed as it conflicts with the " + "existing rule") # Validating InternalLbVm deployment and state - int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule.sourceipaddress) - self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress, state="Running") + int_lb_vm = self.get_InternalLbVm( + internal_tier, int_lb_rule.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier, int_lb_rule.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) - self.debug('Removing VMs from the Internal LB Rule - %s' % int_lb_rule.name) + self.debug('Removing VMs from the Internal LB Rule - %s' % + int_lb_rule.name) int_lb_rule.remove(self.api_client, vms=[internal_vm]) with self.assertRaises(Exception): self.validate_Internal_LB_Rule(int_lb_rule, vm_array=[internal_vm]) - self.debug("VMs successfully removed from the Internal LB Rule in CloudStack") + self.debug("VMs successfully removed from the Internal LB Rule in " + "CloudStack") # Validating InternalLbVm state - self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) @@ -828,7 +1083,8 @@ class TestNuageInternalLb(nuageTestCase): # Validating InternalLbVm un-deployment with self.assertRaises(Exception): - self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier, int_lb_rule.sourceipaddress) self.debug("InternalLbVm successfully destroyed in CloudStack") # VSD Verification @@ -836,38 +1092,47 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_lb_device(int_lb_vm) self.debug("InternalLbVm successfully destroyed in VSD") - self.debug("Attaching a VM from a different tier to an Internal LB Rule created on a tier...") + self.debug("Attaching a VM from a different tier to an Internal LB " + "Rule created on a tier...") with self.assertRaises(Exception): self.create_Internal_LB_Rule(internal_tier, vm_array=[public_vm]) - self.debug("Internal LB Rule creation failed as the VM belongs to a different network") + self.debug("Internal LB Rule creation failed as the VM belongs to a " + "different network") @attr(tags=["advanced", "nuagevsp"], required_hardware="true") def test_05_nuage_internallb_traffic(self): - """Test Nuage VSP VPC Internal LB functionality by performing (wget) traffic tests within a VPC + """Test Nuage VSP VPC Internal LB functionality by performing (wget) + traffic tests within a VPC """ - # 1. Create an Internal LB Rule "internal_lbrule" with source IP Address specified on the Internal tier, check - # if the Internal LB Rule is successfully created. - # 2. Create an Internal LB Rule "internal_lbrule_http" with source IP Address (same as above) specified on the - # Internal tier, check if the Internal LB Rule is successfully created. - # 3. Attach a VM to the above created Internal LB Rules, check if the InternalLbVm is successfully deployed in - # the Internal tier. - # 4. Deploy two more VMs in the Internal tier, check if the VMs are successfully deployed. - # 5. Attach the newly deployed VMs to the above created Internal LB Rules, verify the validity of the above - # created Internal LB Rules over three Load Balanced VMs in the Internal tier. - # 6. Create the corresponding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible, - # check if the Network ACL rules are successfully added to the internal tier. - # 7. Validate the Internal LB functionality by performing (wget) traffic tests from a VM in the Public tier to - # the Internal load balanced guest VMs in the Internal tier, using Static NAT functionality to access (ssh) - # the VM on the Public tier. - # 8. Verify that the InternalLbVm gets destroyed when the last Internal LB rule is removed from the Internal - # tier. - # 9. Repeat the above steps for one more Internal tier as well, validate the Internal LB functionality. - # 10. Delete all the created objects (cleanup). + # 1. Create three different Internal LB Rules with a single source IP + # Address specified on the Internal tier, check if the Internal LB + # Rules are created successfully. + # 2. Attach a VM to the above created Internal LB Rules, check if the + # InternalLbVm is successfully deployed in the Internal tier. + # 3. Deploy two more VMs in the Internal tier, check if the VMs are + # successfully deployed. + # 4. Attach the newly deployed VMs to the above created Internal LB + # Rules, verify the validity of the above created Internal LB Rules + # over three Load Balanced VMs in the Internal tier. + # 5. Create the corresponding Network ACL rules to make the created + # Internal LB rules (SSH & HTTP) accessible, check if the Network + # ACL rules are successfully added to the internal tier. + # 6. Validate the Internal LB functionality by performing (wget) + # traffic tests from a VM in the Public tier to the Internal load + # balanced guest VMs in the Internal tier, using Static NAT + # functionality to access (ssh) the VM on the Public tier. + # 7. Verify that the InternalLbVm gets destroyed when the last Internal + # LB rule is removed from the Internal tier. + # 8. Repeat the above steps for one more Internal tier as well, + # validate the Internal LB functionality. + # 9. Delete all the created objects (cleanup). # Creating a VPC offering - self.debug("Creating Nuage VSP VPC offering with Internal LB service...") - vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"]) + self.debug("Creating Nuage VSP VPC offering with Internal LB " + "service...") + vpc_off = self.create_VpcOffering( + self.test_data["nuagevsp"]["vpc_offering_lb"]) self.validate_VpcOffering(vpc_off, state="Enabled") # Creating a VPC @@ -876,18 +1141,22 @@ class TestNuageInternalLb(nuageTestCase): self.validate_Vpc(vpc, state="Enabled") # Creating network offerings - self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...") + self.debug("Creating Nuage VSP VPC Network offering with Internal LB " + "service...") net_off_1 = self.create_NetworkOffering( self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"]) self.validate_NetworkOffering(net_off_1, state="Enabled") - self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...") - net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"]) + self.debug("Creating Nuage VSP VPC Network offering without Internal " + "LB service...") + net_off_2 = self.create_NetworkOffering( + self.test_data["nuagevsp"]["vpc_network_offering"]) self.validate_NetworkOffering(net_off_2, state="Enabled") # Creating VPC networks in the VPC, and deploying VMs self.debug("Creating a VPC network with Internal LB service...") - internal_tier_1 = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc) + internal_tier_1 = self.create_Network( + net_off_1, gateway='10.1.1.1', vpc=vpc) self.validate_Network(internal_tier_1, state="Implemented") vr = self.get_Router(internal_tier_1) self.check_Router_state(vr, state="Running") @@ -902,7 +1171,8 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm_1) self.debug("Creating one more VPC network with Internal LB service...") - internal_tier_2 = self.create_Network(net_off_1, gateway='10.1.2.1', vpc=vpc) + internal_tier_2 = self.create_Network( + net_off_1, gateway='10.1.2.1', vpc=vpc) self.validate_Network(internal_tier_2, state="Implemented") vr = self.get_Router(internal_tier_2) self.check_Router_state(vr, state="Running") @@ -917,7 +1187,8 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm_2) self.debug("Creating a VPC network without Internal LB service...") - public_tier = self.create_Network(net_off_2, gateway='10.1.3.1', vpc=vpc) + public_tier = self.create_Network( + net_off_2, gateway='10.1.3.1', vpc=vpc) self.validate_Network(public_tier, state="Implemented") vr = self.get_Router(public_tier) self.check_Router_state(vr, state="Running") @@ -932,25 +1203,42 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(public_vm) # Creating Internal LB Rules in the Internal tiers - self.debug("Creating two Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...") - int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier_1, vm_array=[internal_vm_1]) - self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm_1]) - int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier_1, - vm_array=[internal_vm_1], - services=self.test_data["internal_lbrule_http"], - source_ip=int_lb_rule_1.sourceipaddress - ) - self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm_1]) + self.debug("Creating three Internal LB Rules (SSH & HTTP) using the " + "same Load Balancing source IP Address...") + int_lb_rule_1 = self.create_Internal_LB_Rule( + internal_tier_1, vm_array=[internal_vm_1]) + self.validate_Internal_LB_Rule( + int_lb_rule_1, state="Active", vm_array=[internal_vm_1]) + int_lb_rule_2 = self.create_Internal_LB_Rule( + internal_tier_1, vm_array=[internal_vm_1], + services=self.test_data["internal_lbrule_http"], + source_ip=int_lb_rule_1.sourceipaddress) + self.validate_Internal_LB_Rule( + int_lb_rule_2, state="Active", vm_array=[internal_vm_1]) + internal_lbrule_http = copy.deepcopy( + self.test_data["internal_lbrule_http"]) + internal_lbrule_http["sourceport"] = 8080 + internal_lbrule_http["instanceport"] = 8080 + int_lb_rule_3 = self.create_Internal_LB_Rule( + internal_tier_1, + vm_array=[internal_vm_1], + services=internal_lbrule_http, + source_ip=int_lb_rule_1.sourceipaddress) + self.validate_Internal_LB_Rule( + int_lb_rule_3, state="Active", vm_array=[internal_vm_1]) # Validating InternalLbVm deployment and state - int_lb_vm_1 = self.get_InternalLbVm(internal_tier_1, int_lb_rule_1.sourceipaddress) - self.check_InternalLbVm_state(internal_tier_1, int_lb_rule_1.sourceipaddress, state="Running") + int_lb_vm_1 = self.get_InternalLbVm( + internal_tier_1, int_lb_rule_1.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier_1, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm_1) # Deploying more VMs in the Internal tier - self.debug("Deploying two more VMs in network - %s" % internal_tier_1.name) + self.debug("Deploying two more VMs in network - %s" % + internal_tier_1.name) internal_vm_1_1 = self.create_VM(internal_tier_1) internal_vm_1_2 = self.create_VM(internal_tier_1) @@ -959,49 +1247,82 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm_1_2) # Adding newly deployed VMs to the created Internal LB rules - self.debug("Adding two more virtual machines to the created Internal LB rules...") - int_lb_rule_1.assign(self.api_client, [internal_vm_1_1, internal_vm_1_2]) - self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", - vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2]) - int_lb_rule_2.assign(self.api_client, [internal_vm_1_1, internal_vm_1_2]) - self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", - vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2]) + self.debug("Adding two more virtual machines to the created Internal " + "LB rules...") + int_lb_rule_1.assign( + self.api_client, [internal_vm_1_1, internal_vm_1_2]) + self.validate_Internal_LB_Rule( + int_lb_rule_1, state="Active", + vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2]) + int_lb_rule_2.assign( + self.api_client, [internal_vm_1_1, internal_vm_1_2]) + self.validate_Internal_LB_Rule( + int_lb_rule_2, state="Active", + vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2]) + int_lb_rule_3.assign( + self.api_client, [internal_vm_1_1, internal_vm_1_2]) + self.validate_Internal_LB_Rule( + int_lb_rule_3, state="Active", + vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2]) # Validating InternalLbVm state - self.check_InternalLbVm_state(internal_tier_1, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier_1, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm_1) # Adding Network ACL rules in the Internal tier - self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...") - ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=internal_tier_1) - http_rule = self.create_NetworkAclRule(self.test_data["http_rule"], network=internal_tier_1) + self.debug("Adding Network ACL rules to make the created Internal LB " + "rules (HTTP) accessible...") + http_rule_1 = self.create_NetworkAclRule( + self.test_data["http_rule"], network=internal_tier_1) + http_rule = copy.deepcopy(self.test_data["http_rule"]) + http_rule["privateport"] = 8080 + http_rule["publicport"] = 8080 + http_rule["startport"] = 8080 + http_rule["endport"] = 8080 + http_rule_2 = self.create_NetworkAclRule( + http_rule, network=internal_tier_1) # VSD verification - self.verify_vsd_firewall_rule(ssh_rule) - self.verify_vsd_firewall_rule(http_rule) + self.verify_vsd_firewall_rule(http_rule_1) + self.verify_vsd_firewall_rule(http_rule_2) # Creating Internal LB Rules in the Internal tier - self.debug("Creating two Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...") - int_lb_rule_3 = self.create_Internal_LB_Rule(internal_tier_2, vm_array=[internal_vm_2]) - self.validate_Internal_LB_Rule(int_lb_rule_3, state="Active", vm_array=[internal_vm_2]) - int_lb_rule_4 = self.create_Internal_LB_Rule(internal_tier_2, - vm_array=[internal_vm_2], - services=self.test_data["internal_lbrule_http"], - source_ip=int_lb_rule_3.sourceipaddress - ) - self.validate_Internal_LB_Rule(int_lb_rule_4, state="Active", vm_array=[internal_vm_2]) + self.debug("Creating three Internal LB Rules (SSH & HTTP) using the " + "same Load Balancing source IP Address...") + int_lb_rule_4 = self.create_Internal_LB_Rule( + internal_tier_2, vm_array=[internal_vm_2]) + self.validate_Internal_LB_Rule( + int_lb_rule_4, state="Active", vm_array=[internal_vm_2]) + int_lb_rule_5 = self.create_Internal_LB_Rule( + internal_tier_2, + vm_array=[internal_vm_2], + services=self.test_data["internal_lbrule_http"], + source_ip=int_lb_rule_4.sourceipaddress) + self.validate_Internal_LB_Rule( + int_lb_rule_5, state="Active", vm_array=[internal_vm_2]) + int_lb_rule_6 = self.create_Internal_LB_Rule( + internal_tier_2, + vm_array=[internal_vm_2], + services=internal_lbrule_http, + source_ip=int_lb_rule_4.sourceipaddress) + self.validate_Internal_LB_Rule( + int_lb_rule_6, state="Active", vm_array=[internal_vm_2]) # Validating InternalLbVm deployment and state - int_lb_vm_2 = self.get_InternalLbVm(internal_tier_2, int_lb_rule_3.sourceipaddress) - self.check_InternalLbVm_state(internal_tier_2, int_lb_rule_3.sourceipaddress, state="Running") + int_lb_vm_2 = self.get_InternalLbVm( + internal_tier_2, int_lb_rule_4.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier_2, int_lb_rule_4.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm_2) # Deploying more VMs in the Internal tier - self.debug("Deploying two more VMs in network - %s" % internal_tier_2.name) + self.debug("Deploying two more VMs in network - %s" % + internal_tier_2.name) internal_vm_2_1 = self.create_VM(internal_tier_2) internal_vm_2_2 = self.create_VM(internal_tier_2) @@ -1010,77 +1331,132 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm_2_2) # Adding newly deployed VMs to the created Internal LB rules - self.debug("Adding two more virtual machines to the created Internal LB rules...") - int_lb_rule_3.assign(self.api_client, [internal_vm_2_1, internal_vm_2_2]) - self.validate_Internal_LB_Rule(int_lb_rule_3, state="Active", - vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2]) - int_lb_rule_4.assign(self.api_client, [internal_vm_2_1, internal_vm_2_2]) - self.validate_Internal_LB_Rule(int_lb_rule_4, state="Active", - vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2]) + self.debug("Adding two more virtual machines to the created Internal " + "LB rules...") + int_lb_rule_4.assign( + self.api_client, [internal_vm_2_1, internal_vm_2_2]) + self.validate_Internal_LB_Rule( + int_lb_rule_4, state="Active", + vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2]) + int_lb_rule_5.assign( + self.api_client, [internal_vm_2_1, internal_vm_2_2]) + self.validate_Internal_LB_Rule( + int_lb_rule_5, state="Active", + vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2]) + int_lb_rule_6.assign( + self.api_client, [internal_vm_2_1, internal_vm_2_2]) + self.validate_Internal_LB_Rule( + int_lb_rule_6, state="Active", + vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2]) # Validating InternalLbVm state - self.check_InternalLbVm_state(internal_tier_2, int_lb_rule_3.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier_2, int_lb_rule_4.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm_2) # Adding Network ACL rules in the Internal tier - self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...") - ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=internal_tier_2) - http_rule = self.create_NetworkAclRule(self.test_data["http_rule"], network=internal_tier_2) + self.debug("Adding Network ACL rules to make the created Internal LB " + "rules (HTTP) accessible...") + http_rule_1 = self.create_NetworkAclRule( + self.test_data["http_rule"], network=internal_tier_2) + http_rule_2 = self.create_NetworkAclRule( + http_rule, network=internal_tier_2) # VSD verification - self.verify_vsd_firewall_rule(ssh_rule) - self.verify_vsd_firewall_rule(http_rule) + self.verify_vsd_firewall_rule(http_rule_1) + self.verify_vsd_firewall_rule(http_rule_2) + + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic( + internal_vm_1, internal_tier_1, vpc) + self.verify_vpc_vm_ingress_traffic( + internal_vm_1_1, internal_tier_1, vpc) + self.verify_vpc_vm_ingress_traffic( + internal_vm_1_2, internal_tier_1, vpc) + self.verify_vpc_vm_ingress_traffic( + internal_vm_2, internal_tier_2, vpc) + self.verify_vpc_vm_ingress_traffic( + internal_vm_2_1, internal_tier_2, vpc) + self.verify_vpc_vm_ingress_traffic( + internal_vm_2_2, internal_tier_2, vpc) # Creating Static NAT rule for the VM in the Public tier public_ip = self.acquire_PublicIPAddress(public_tier, vpc) self.validate_PublicIPAddress(public_ip, public_tier) self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier) - self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm) + self.validate_PublicIPAddress( + public_ip, public_tier, static_nat=True, vm=public_vm) # VSD verification - self.verify_vsd_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc) + self.verify_vsd_floating_ip( + public_tier, public_vm, public_ip.ipaddress, vpc) # Adding Network ACL rule in the Public tier - self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...") - public_ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=public_tier) + self.debug("Adding Network ACL rule to make the created NAT rule " + "(SSH) accessible...") + public_ssh_rule = self.create_NetworkAclRule( + self.test_data["ingress_rule"], network=public_tier) # VSD verification self.verify_vsd_firewall_rule(public_ssh_rule) # Internal LB (wget) traffic tests ssh_client = self.ssh_into_VM(public_vm, public_ip) - wget_file_1 = self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) + wget_file_1 = self.wget_from_vm_cmd( + ssh_client, + int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) ssh_client = self.ssh_into_VM(public_vm, public_ip) - wget_file_2 = self.wget_from_vm_cmd(ssh_client, - int_lb_rule_3.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) + wget_file_2 = self.wget_from_vm_cmd( + ssh_client, + int_lb_rule_1.sourceipaddress, + http_rule["publicport"]) + ssh_client = self.ssh_into_VM(public_vm, public_ip) + wget_file_3 = self.wget_from_vm_cmd( + ssh_client, + int_lb_rule_4.sourceipaddress, + self.test_data["http_rule"]["publicport"]) + ssh_client = self.ssh_into_VM(public_vm, public_ip) + wget_file_4 = self.wget_from_vm_cmd( + ssh_client, + int_lb_rule_4.sourceipaddress, + http_rule["publicport"]) # Verifying Internal LB (wget) traffic tests - self.verify_lb_wget_file(wget_file_1, [internal_vm_1, internal_vm_1_1, internal_vm_1_2]) - self.verify_lb_wget_file(wget_file_2, [internal_vm_2, internal_vm_2_1, internal_vm_2_2]) + self.verify_lb_wget_file( + wget_file_1, [internal_vm_1, internal_vm_1_1, internal_vm_1_2]) + self.verify_lb_wget_file( + wget_file_2, [internal_vm_1, internal_vm_1_1, internal_vm_1_2]) + self.verify_lb_wget_file( + wget_file_3, [internal_vm_2, internal_vm_2_1, internal_vm_2_2]) + self.verify_lb_wget_file( + wget_file_4, [internal_vm_2, internal_vm_2_1, internal_vm_2_2]) @attr(tags=["advanced", "nuagevsp"], required_hardware="true") def test_06_nuage_internallb_algorithms_traffic(self): - """Test Nuage VSP VPC Internal LB functionality with different LB algorithms by performing (wget) traffic tests - within a VPC + """Test Nuage VSP VPC Internal LB functionality with different LB + algorithms by performing (wget) traffic tests within a VPC """ - # Repeat the tests in the testcase "test_05_nuage_internallb_traffic" with different Internal LB algorithms: + # Repeat the tests in the testcase "test_05_nuage_internallb_traffic" + # with different Internal LB algorithms: # 1. Round Robin # 2. Least connections # 3. Source - # Verify the above Internal LB algorithms by performing multiple (wget) traffic tests within a VPC. + # Verify the above Internal LB algorithms by performing multiple (wget) + # traffic tests within a VPC. # Delete all the created objects (cleanup). # Creating a VPC offering - self.debug("Creating Nuage VSP VPC offering with Internal LB service...") - vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"]) + self.debug("Creating Nuage VSP VPC offering with Internal LB " + "service...") + vpc_off = self.create_VpcOffering( + self.test_data["nuagevsp"]["vpc_offering_lb"]) self.validate_VpcOffering(vpc_off, state="Enabled") # Creating a VPC @@ -1089,18 +1465,22 @@ class TestNuageInternalLb(nuageTestCase): self.validate_Vpc(vpc, state="Enabled") # Creating network offerings - self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...") + self.debug("Creating Nuage VSP VPC Network offering with Internal LB " + "service...") net_off_1 = self.create_NetworkOffering( self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"]) self.validate_NetworkOffering(net_off_1, state="Enabled") - self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...") - net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"]) + self.debug("Creating Nuage VSP VPC Network offering without Internal " + "LB service...") + net_off_2 = self.create_NetworkOffering( + self.test_data["nuagevsp"]["vpc_network_offering"]) self.validate_NetworkOffering(net_off_2, state="Enabled") # Creating VPC networks in the VPC, and deploying VMs self.debug("Creating a VPC network with Internal LB service...") - internal_tier = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc) + internal_tier = self.create_Network( + net_off_1, gateway='10.1.1.1', vpc=vpc) self.validate_Network(internal_tier, state="Implemented") vr = self.get_Router(internal_tier) self.check_Router_state(vr, state="Running") @@ -1115,7 +1495,8 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm) self.debug("Creating a VPC network without Internal LB service...") - public_tier = self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc) + public_tier = self.create_Network( + net_off_2, gateway='10.1.2.1', vpc=vpc) self.validate_Network(public_tier, state="Implemented") vr = self.get_Router(public_tier) self.check_Router_state(vr, state="Running") @@ -1129,26 +1510,33 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_router(vr) self.verify_vsd_vm(public_vm) - # Creating Internal LB Rules in the Internal tier with Round Robin Algorithm - self.debug("Creating two Internal LB Rules (SSH & HTTP) with Round Robin Algorithm...") - int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm]) - self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm]) - int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier, - vm_array=[internal_vm], - services=self.test_data["internal_lbrule_http"], - source_ip=int_lb_rule_1.sourceipaddress - ) - self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm]) + # Creating Internal LB Rules in the Internal tier with Round Robin + # Algorithm + self.debug("Creating two Internal LB Rules (SSH & HTTP) with Round " + "Robin Algorithm...") + int_lb_rule_1 = self.create_Internal_LB_Rule( + internal_tier, vm_array=[internal_vm]) + self.validate_Internal_LB_Rule( + int_lb_rule_1, state="Active", vm_array=[internal_vm]) + int_lb_rule_2 = self.create_Internal_LB_Rule( + internal_tier, vm_array=[internal_vm], + services=self.test_data["internal_lbrule_http"], + source_ip=int_lb_rule_1.sourceipaddress) + self.validate_Internal_LB_Rule( + int_lb_rule_2, state="Active", vm_array=[internal_vm]) # Validating InternalLbVm deployment and state - int_lb_vm_1 = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress) - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + int_lb_vm_1 = self.get_InternalLbVm( + internal_tier, int_lb_rule_1.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm_1) # Deploying more VMs in the Internal tier - self.debug("Deploying two more VMs in network - %s" % internal_tier.name) + self.debug("Deploying two more VMs in network - %s" % + internal_tier.name) internal_vm_1 = self.create_VM(internal_tier) internal_vm_2 = self.create_VM(internal_tier) @@ -1157,151 +1545,179 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm_2) # Adding newly deployed VMs to the created Internal LB rules - self.debug("Adding two more virtual machines to the created Internal LB rules...") + self.debug("Adding two more virtual machines to the created Internal " + "LB rules...") int_lb_rule_1.assign(self.api_client, [internal_vm_1, internal_vm_2]) - self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", - vm_array=[internal_vm, internal_vm_1, internal_vm_2]) + self.validate_Internal_LB_Rule( + int_lb_rule_1, state="Active", + vm_array=[internal_vm, internal_vm_1, internal_vm_2]) int_lb_rule_2.assign(self.api_client, [internal_vm_1, internal_vm_2]) - self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", - vm_array=[internal_vm, internal_vm_1, internal_vm_2]) + self.validate_Internal_LB_Rule( + int_lb_rule_2, state="Active", + vm_array=[internal_vm, internal_vm_1, internal_vm_2]) # Validating InternalLbVm state - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm_1) - # Creating Internal LB Rules in the Internal tier with Least connections Algorithm - self.debug("Creating two Internal LB Rules (SSH & HTTP) with Least connections Algorithm...") + # Creating Internal LB Rules in the Internal tier with Least + # connections Algorithm + self.debug("Creating two Internal LB Rules (SSH & HTTP) with Least " + "connections Algorithm...") self.test_data["internal_lbrule"]["algorithm"] = "leastconn" - int_lb_rule_3 = self.create_Internal_LB_Rule(internal_tier, - vm_array=[internal_vm, internal_vm_1, internal_vm_2], - services=self.test_data["internal_lbrule"] - ) - self.validate_Internal_LB_Rule(int_lb_rule_3, state="Active", - vm_array=[internal_vm, internal_vm_1, internal_vm_2]) + int_lb_rule_3 = self.create_Internal_LB_Rule( + internal_tier, + vm_array=[internal_vm, internal_vm_1, internal_vm_2], + services=self.test_data["internal_lbrule"]) + self.validate_Internal_LB_Rule( + int_lb_rule_3, state="Active", + vm_array=[internal_vm, internal_vm_1, internal_vm_2]) self.test_data["internal_lbrule_http"]["algorithm"] = "leastconn" - int_lb_rule_4 = self.create_Internal_LB_Rule(internal_tier, - vm_array=[internal_vm, internal_vm_1, internal_vm_2], - services=self.test_data["internal_lbrule_http"], - source_ip=int_lb_rule_3.sourceipaddress - ) - self.validate_Internal_LB_Rule(int_lb_rule_4, state="Active", - vm_array=[internal_vm, internal_vm_1, internal_vm_2]) + int_lb_rule_4 = self.create_Internal_LB_Rule( + internal_tier, + vm_array=[internal_vm, internal_vm_1, internal_vm_2], + services=self.test_data["internal_lbrule_http"], + source_ip=int_lb_rule_3.sourceipaddress) + self.validate_Internal_LB_Rule( + int_lb_rule_4, state="Active", + vm_array=[internal_vm, internal_vm_1, internal_vm_2]) # Validating InternalLbVm deployment and state - int_lb_vm_2 = self.get_InternalLbVm(internal_tier, int_lb_rule_3.sourceipaddress) - self.check_InternalLbVm_state(internal_tier, int_lb_rule_3.sourceipaddress, state="Running") + int_lb_vm_2 = self.get_InternalLbVm( + internal_tier, int_lb_rule_3.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_3.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm_2) # Creating Internal LB Rules in the Internal tier with Source Algorithm - self.debug("Creating two Internal LB Rules (SSH & HTTP) with Source Algorithm...") + self.debug("Creating two Internal LB Rules (SSH & HTTP) with Source " + "Algorithm...") self.test_data["internal_lbrule"]["algorithm"] = "source" - int_lb_rule_5 = self.create_Internal_LB_Rule(internal_tier, - vm_array=[internal_vm, internal_vm_1, internal_vm_2], - services=self.test_data["internal_lbrule"] - ) - self.validate_Internal_LB_Rule(int_lb_rule_5, state="Active", - vm_array=[internal_vm, internal_vm_1, internal_vm_2]) + int_lb_rule_5 = self.create_Internal_LB_Rule( + internal_tier, + vm_array=[internal_vm, internal_vm_1, internal_vm_2], + services=self.test_data["internal_lbrule"]) + self.validate_Internal_LB_Rule( + int_lb_rule_5, state="Active", + vm_array=[internal_vm, internal_vm_1, internal_vm_2]) self.test_data["internal_lbrule_http"]["algorithm"] = "source" - int_lb_rule_6 = self.create_Internal_LB_Rule(internal_tier, - vm_array=[internal_vm, internal_vm_1, internal_vm_2], - services=self.test_data["internal_lbrule_http"], - source_ip=int_lb_rule_5.sourceipaddress - ) - self.validate_Internal_LB_Rule(int_lb_rule_6, state="Active", - vm_array=[internal_vm, internal_vm_1, internal_vm_2]) + int_lb_rule_6 = self.create_Internal_LB_Rule( + internal_tier, + vm_array=[internal_vm, internal_vm_1, internal_vm_2], + services=self.test_data["internal_lbrule_http"], + source_ip=int_lb_rule_5.sourceipaddress) + self.validate_Internal_LB_Rule( + int_lb_rule_6, state="Active", + vm_array=[internal_vm, internal_vm_1, internal_vm_2]) # Validating InternalLbVm deployment and state - int_lb_vm_3 = self.get_InternalLbVm(internal_tier, int_lb_rule_5.sourceipaddress) - self.check_InternalLbVm_state(internal_tier, int_lb_rule_5.sourceipaddress, state="Running") + int_lb_vm_3 = self.get_InternalLbVm( + internal_tier, int_lb_rule_5.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_5.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm_3) # Adding Network ACL rules in the Internal tier - self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...") - ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=internal_tier) - http_rule = self.create_NetworkAclRule(self.test_data["http_rule"], network=internal_tier) + self.debug("Adding Network ACL rules to make the created Internal LB " + "rules (HTTP) accessible...") + http_rule = self.create_NetworkAclRule( + self.test_data["http_rule"], network=internal_tier) # VSD verification - self.verify_vsd_firewall_rule(ssh_rule) self.verify_vsd_firewall_rule(http_rule) + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc) + # Creating Static NAT rule for the VM in the Public tier public_ip = self.acquire_PublicIPAddress(public_tier, vpc) self.validate_PublicIPAddress(public_ip, public_tier) self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier) - self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm) + self.validate_PublicIPAddress( + public_ip, public_tier, static_nat=True, vm=public_vm) # VSD verification - self.verify_vsd_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc) + self.verify_vsd_floating_ip( + public_tier, public_vm, public_ip.ipaddress, vpc) # Adding Network ACL rule in the Public tier - self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...") - public_ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=public_tier) + self.debug("Adding Network ACL rule to make the created NAT rule " + "(SSH) accessible...") + public_ssh_rule = self.create_NetworkAclRule( + self.test_data["ingress_rule"], network=public_tier) # VSD verification self.verify_vsd_firewall_rule(public_ssh_rule) # Internal LB (wget) traffic tests with Round Robin Algorithm ssh_client = self.ssh_into_VM(public_vm, public_ip) - self.validate_internallb_algorithm_traffic(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"], - [internal_vm, internal_vm_1, internal_vm_2], - "roundrobin" - ) + self.validate_internallb_algorithm_traffic( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"], + [internal_vm, internal_vm_1, internal_vm_2], "roundrobin") # Internal LB (wget) traffic tests with Least connections Algorithm ssh_client = self.ssh_into_VM(public_vm, public_ip) - self.validate_internallb_algorithm_traffic(ssh_client, - int_lb_rule_3.sourceipaddress, - self.test_data["http_rule"]["publicport"], - [internal_vm, internal_vm_1, internal_vm_2], - "leastconn" - ) + self.validate_internallb_algorithm_traffic( + ssh_client, int_lb_rule_3.sourceipaddress, + self.test_data["http_rule"]["publicport"], + [internal_vm, internal_vm_1, internal_vm_2], "leastconn") # Internal LB (wget) traffic tests with Source Algorithm ssh_client = self.ssh_into_VM(public_vm, public_ip) - self.validate_internallb_algorithm_traffic(ssh_client, - int_lb_rule_5.sourceipaddress, - self.test_data["http_rule"]["publicport"], - [internal_vm, internal_vm_1, internal_vm_2], - "source" - ) + self.validate_internallb_algorithm_traffic( + ssh_client, int_lb_rule_5.sourceipaddress, + self.test_data["http_rule"]["publicport"], + [internal_vm, internal_vm_1, internal_vm_2], "source") @attr(tags=["advanced", "nuagevsp"], required_hardware="true") def test_07_nuage_internallb_vpc_network_restarts_traffic(self): - """Test Nuage VSP VPC Internal LB functionality with restarts of VPC network components by performing (wget) - traffic tests within a VPC + """Test Nuage VSP VPC Internal LB functionality with restarts of VPC + network components by performing (wget) traffic tests within a VPC """ - # Repeat the tests in the testcase "test_05_nuage_internallb_traffic" with restarts of VPC networks (tiers): - # 1. Restart tier with InternalLbVm (cleanup = false), verify that the InternalLbVm gets destroyed and deployed - # again in the Internal tier. - # 2. Restart tier with InternalLbVm (cleanup = true), verify that the InternalLbVm gets destroyed and deployed - # again in the Internal tier. - # 3. Restart tier without InternalLbVm (cleanup = false), verify that this restart has no effect on the - # InternalLbVm functionality. - # 4. Restart tier without InternalLbVm (cleanup = true), verify that this restart has no effect on the - # InternalLbVm functionality. - # 5. Stop all the VMs configured with InternalLbVm, verify that the InternalLbVm gets destroyed in the Internal + # Repeat the tests in the testcase "test_05_nuage_internallb_traffic" + # with restarts of VPC networks (tiers): + # 1. Restart tier with InternalLbVm (cleanup = false), verify that the + # InternalLbVm gets destroyed and deployed again in the Internal # tier. - # 6. Start all the VMs configured with InternalLbVm, verify that the InternalLbVm gets deployed again in the - # Internal tier. - # 7. Restart VPC (cleanup = false), verify that the VPC VR gets rebooted and this restart has no effect on the - # InternalLbVm functionality. - # 7. Restart VPC (cleanup = true), verify that the VPC VR gets rebooted and this restart has no effect on the - # InternalLbVm functionality. - # Verify the above restarts of VPC networks (tiers) by performing (wget) traffic tests within a VPC. + # 2. Restart tier with InternalLbVm (cleanup = true), verify that the + # InternalLbVm gets destroyed and deployed again in the Internal + # tier. + # 3. Restart tier without InternalLbVm (cleanup = false), verify that + # this restart has no effect on the InternalLbVm functionality. + # 4. Restart tier without InternalLbVm (cleanup = true), verify that + # this restart has no effect on the InternalLbVm functionality. + # 5. Stop all the VMs configured with InternalLbVm, verify that the + # InternalLbVm gets destroyed in the Internal tier. + # 6. Start all the VMs configured with InternalLbVm, verify that the + # InternalLbVm gets deployed again in the Internal tier. + # 7. Restart VPC (cleanup = false), verify that the VPC VR gets + # rebooted and this restart has no effect on the InternalLbVm + # functionality. + # 7. Restart VPC (cleanup = true), verify that the VPC VR gets rebooted + # and this restart has no effect on the InternalLbVm functionality. + # Verify the above restarts of VPC networks (tiers) by performing + # (wget) traffic tests within a VPC. # Delete all the created objects (cleanup). # Creating a VPC offering - self.debug("Creating Nuage VSP VPC offering with Internal LB service...") - vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"]) + self.debug("Creating Nuage VSP VPC offering with Internal LB " + "service...") + vpc_off = self.create_VpcOffering( + self.test_data["nuagevsp"]["vpc_offering_lb"]) self.validate_VpcOffering(vpc_off, state="Enabled") # Creating a VPC @@ -1310,18 +1726,22 @@ class TestNuageInternalLb(nuageTestCase): self.validate_Vpc(vpc, state="Enabled") # Creating network offerings - self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...") + self.debug("Creating Nuage VSP VPC Network offering with Internal LB " + "service...") net_off_1 = self.create_NetworkOffering( self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"]) self.validate_NetworkOffering(net_off_1, state="Enabled") - self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...") - net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"]) + self.debug("Creating Nuage VSP VPC Network offering without Internal " + "LB service...") + net_off_2 = self.create_NetworkOffering( + self.test_data["nuagevsp"]["vpc_network_offering"]) self.validate_NetworkOffering(net_off_2, state="Enabled") # Creating VPC networks in the VPC, and deploying VMs self.debug("Creating a VPC network with Internal LB service...") - internal_tier = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc) + internal_tier = self.create_Network( + net_off_1, gateway='10.1.1.1', vpc=vpc) self.validate_Network(internal_tier, state="Implemented") vr = self.get_Router(internal_tier) self.check_Router_state(vr, state="Running") @@ -1336,7 +1756,8 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm) self.debug("Creating a VPC network without Internal LB service...") - public_tier = self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc) + public_tier = self.create_Network( + net_off_2, gateway='10.1.2.1', vpc=vpc) self.validate_Network(public_tier, state="Implemented") vr = self.get_Router(public_tier) self.check_Router_state(vr, state="Running") @@ -1351,25 +1772,31 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(public_vm) # Creating Internal LB Rules in the Internal tier - self.debug("Creating two Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...") - int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm]) - self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm]) - int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier, - vm_array=[internal_vm], - services=self.test_data["internal_lbrule_http"], - source_ip=int_lb_rule_1.sourceipaddress - ) - self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm]) + self.debug("Creating two Internal LB Rules (SSH & HTTP) using the " + "same Load Balancing source IP Address...") + int_lb_rule_1 = self.create_Internal_LB_Rule( + internal_tier, vm_array=[internal_vm]) + self.validate_Internal_LB_Rule( + int_lb_rule_1, state="Active", vm_array=[internal_vm]) + int_lb_rule_2 = self.create_Internal_LB_Rule( + internal_tier, vm_array=[internal_vm], + services=self.test_data["internal_lbrule_http"], + source_ip=int_lb_rule_1.sourceipaddress) + self.validate_Internal_LB_Rule( + int_lb_rule_2, state="Active", vm_array=[internal_vm]) # Validating InternalLbVm deployment and state - int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress) - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + int_lb_vm = self.get_InternalLbVm( + internal_tier, int_lb_rule_1.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) # Deploying more VMs in the Internal tier - self.debug("Deploying two more VMs in network - %s" % internal_tier.name) + self.debug("Deploying two more VMs in network - %s" % + internal_tier.name) internal_vm_1 = self.create_VM(internal_tier) internal_vm_2 = self.create_VM(internal_tier) @@ -1378,54 +1805,70 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm_2) # Adding newly deployed VMs to the created Internal LB rules - self.debug("Adding two more virtual machines to the created Internal LB rules...") + self.debug("Adding two more virtual machines to the created Internal " + "LB rules...") int_lb_rule_1.assign(self.api_client, [internal_vm_1, internal_vm_2]) - self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", - vm_array=[internal_vm, internal_vm_1, internal_vm_2]) + self.validate_Internal_LB_Rule( + int_lb_rule_1, state="Active", + vm_array=[internal_vm, internal_vm_1, internal_vm_2]) int_lb_rule_2.assign(self.api_client, [internal_vm_1, internal_vm_2]) - self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", - vm_array=[internal_vm, internal_vm_1, internal_vm_2]) + self.validate_Internal_LB_Rule( + int_lb_rule_2, state="Active", + vm_array=[internal_vm, internal_vm_1, internal_vm_2]) # Validating InternalLbVm state - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) # Adding Network ACL rules in the Internal tier - self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...") - ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=internal_tier) - http_rule = self.create_NetworkAclRule(self.test_data["http_rule"], network=internal_tier) + self.debug("Adding Network ACL rules to make the created Internal LB " + "rules (HTTP) accessible...") + http_rule = self.create_NetworkAclRule( + self.test_data["http_rule"], network=internal_tier) # VSD verification - self.verify_vsd_firewall_rule(ssh_rule) self.verify_vsd_firewall_rule(http_rule) + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc) + # Creating Static NAT rule for the VM in the Public tier public_ip = self.acquire_PublicIPAddress(public_tier, vpc) self.validate_PublicIPAddress(public_ip, public_tier) self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier) - self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm) + self.validate_PublicIPAddress( + public_ip, public_tier, static_nat=True, vm=public_vm) # VSD verification - self.verify_vsd_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc) + self.verify_vsd_floating_ip( + public_tier, public_vm, public_ip.ipaddress, vpc) # Adding Network ACL rule in the Public tier - self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...") - public_ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=public_tier) + self.debug("Adding Network ACL rule to make the created NAT rule " + "(SSH) accessible...") + public_ssh_rule = self.create_NetworkAclRule( + self.test_data["ingress_rule"], network=public_tier) # VSD verification self.verify_vsd_firewall_rule(public_ssh_rule) # Internal LB (wget) traffic test ssh_client = self.ssh_into_VM(public_vm, public_ip) - wget_file = self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) + wget_file = self.wget_from_vm_cmd( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) # Verifying Internal LB (wget) traffic test - self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2]) + self.verify_lb_wget_file( + wget_file, [internal_vm, internal_vm_1, internal_vm_2]) # Restart Internal tier (cleanup = false) # InternalLbVm gets destroyed and deployed again in the Internal tier @@ -1443,38 +1886,35 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm) self.verify_vsd_vm(internal_vm_1) self.verify_vsd_vm(internal_vm_2) - self.verify_vsd_firewall_rule(ssh_rule) self.verify_vsd_firewall_rule(http_rule) # Validating InternalLbVm state # InternalLbVm gets destroyed and deployed again in the Internal tier - int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress) - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + int_lb_vm = self.get_InternalLbVm( + internal_tier, int_lb_rule_1.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc) + # Internal LB (wget) traffic test ssh_client = self.ssh_into_VM(public_vm, public_ip) - tries = 0 - while tries < 10: - try: - wget_file = self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) - except Exception as e: - self.debug("Failed to wget file via the InternalLbVm after re-starting the Internal tier: %s" % e) - self.debug("Waiting for the InternalLbVm in the Internal tier to be fully resolved for (wget) traffic " - "test...") - time.sleep(30) - tries += 1 - continue - self.debug("Internal LB (wget) traffic test is successful after re-starting the Internal tier") - break + wget_file = self.wget_from_vm_cmd( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) # Verifying Internal LB (wget) traffic test - self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2]) + self.verify_lb_wget_file( + wget_file, [internal_vm, internal_vm_1, internal_vm_2]) # Restart Internal tier (cleanup = true) # InternalLbVm gets destroyed and deployed again in the Internal tier @@ -1492,39 +1932,35 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm) self.verify_vsd_vm(internal_vm_1) self.verify_vsd_vm(internal_vm_2) - self.verify_vsd_firewall_rule(ssh_rule) self.verify_vsd_firewall_rule(http_rule) # Validating InternalLbVm state # InternalLbVm gets destroyed and deployed again in the Internal tier - int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress) - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + int_lb_vm = self.get_InternalLbVm( + internal_tier, int_lb_rule_1.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc) + # Internal LB (wget) traffic test ssh_client = self.ssh_into_VM(public_vm, public_ip) - tries = 0 - while tries < 10: - try: - wget_file = self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) - except Exception as e: - self.debug("Failed to wget file via the InternalLbVm after re-starting the Internal tier with cleanup: " - "%s" % e) - self.debug("Waiting for the InternalLbVm in the Internal tier to be fully resolved for (wget) traffic " - "test...") - time.sleep(30) - tries += 1 - continue - self.debug("Internal LB (wget) traffic test is successful after re-starting the Internal tier with cleanup") - break + wget_file = self.wget_from_vm_cmd( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) # Verifying Internal LB (wget) traffic test - self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2]) + self.verify_lb_wget_file( + wget_file, [internal_vm, internal_vm_1, internal_vm_2]) # Restart Public tier (cleanup = false) # This restart has no effect on the InternalLbVm functionality @@ -1533,30 +1969,41 @@ class TestNuageInternalLb(nuageTestCase): self.validate_Network(public_tier, state="Implemented") self.check_Router_state(vr, state="Running") self.check_VM_state(public_vm, state="Running") - self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm) + self.validate_PublicIPAddress( + public_ip, public_tier, static_nat=True, vm=public_vm) # VSD verification self.verify_vsd_network(self.domain.id, public_tier, vpc) self.verify_vsd_router(vr) self.verify_vsd_vm(public_vm) - self.verify_vsd_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc) + self.verify_vsd_floating_ip( + public_tier, public_vm, public_ip.ipaddress, vpc) self.verify_vsd_firewall_rule(public_ssh_rule) # Validating InternalLbVm state - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc) + # Internal LB (wget) traffic test ssh_client = self.ssh_into_VM(public_vm, public_ip) - wget_file = self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) + wget_file = self.wget_from_vm_cmd( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) # Verifying Internal LB (wget) traffic test - self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2]) + self.verify_lb_wget_file( + wget_file, [internal_vm, internal_vm_1, internal_vm_2]) # Restart Public tier (cleanup = true) # This restart has no effect on the InternalLbVm functionality @@ -1565,33 +2012,45 @@ class TestNuageInternalLb(nuageTestCase): self.validate_Network(public_tier, state="Implemented") self.check_Router_state(vr, state="Running") self.check_VM_state(public_vm, state="Running") - self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm) + self.validate_PublicIPAddress( + public_ip, public_tier, static_nat=True, vm=public_vm) # VSD verification self.verify_vsd_network(self.domain.id, public_tier, vpc) self.verify_vsd_router(vr) self.verify_vsd_vm(public_vm) - self.verify_vsd_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc) + self.verify_vsd_floating_ip( + public_tier, public_vm, public_ip.ipaddress, vpc) self.verify_vsd_firewall_rule(public_ssh_rule) # Validating InternalLbVm state - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc) + # Internal LB (wget) traffic test ssh_client = self.ssh_into_VM(public_vm, public_ip) - wget_file = self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) + wget_file = self.wget_from_vm_cmd( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) # Verifying Internal LB (wget) traffic test - self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2]) + self.verify_lb_wget_file( + wget_file, [internal_vm, internal_vm_1, internal_vm_2]) # Stopping VMs in the Internal tier - # wget traffic test fails as all the VMs in the Internal tier are in stopped state + # wget traffic test fails as all the VMs in the Internal tier are in + # stopped state self.debug("Stopping all the VMs in the Internal tier...") internal_vm.stop(self.api_client) internal_vm_1.stop(self.api_client) @@ -1608,26 +2067,31 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm, stopped=True) self.verify_vsd_vm(internal_vm_1, stopped=True) self.verify_vsd_vm(internal_vm_2, stopped=True) - self.verify_vsd_firewall_rule(ssh_rule) self.verify_vsd_firewall_rule(http_rule) # Validating InternalLbVm state - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) # Internal LB (wget) traffic test ssh_client = self.ssh_into_VM(public_vm, public_ip) + wget_file = self.wget_from_vm_cmd( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) + + # Verifying Internal LB (wget) traffic test with self.assertRaises(Exception): - self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) - self.debug("Failed to wget file as all the VMs in the Internal tier are in stopped state") + self.verify_lb_wget_file( + wget_file, [internal_vm, internal_vm_1, internal_vm_2]) + self.debug("Failed to wget file as all the VMs in the Internal tier " + "are in stopped state") # Starting VMs in the Internal tier - # wget traffic test succeeds as all the VMs in the Internal tier are back in running state + # wget traffic test succeeds as all the VMs in the Internal tier are + # back in running state self.debug("Starting all the VMs in the Internal tier...") internal_vm.start(self.api_client) internal_vm_1.start(self.api_client) @@ -1644,38 +2108,41 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm) self.verify_vsd_vm(internal_vm_1) self.verify_vsd_vm(internal_vm_2) - self.verify_vsd_firewall_rule(ssh_rule) self.verify_vsd_firewall_rule(http_rule) # Validating InternalLbVm state - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc) + # Internal LB (wget) traffic test ssh_client = self.ssh_into_VM(public_vm, public_ip) tries = 0 - while tries < 10: - try: - wget_file = self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) - except Exception as e: - self.debug("Failed to wget file via the InternalLbVm after re-starting all the VMs in the Internal tier" - ": %s" % e) - self.debug("Waiting for the InternalLbVm and all the VMs in the Internal tier to be fully resolved for " - "(wget) traffic test...") - time.sleep(30) - tries += 1 - continue - self.debug("Internal LB (wget) traffic test is successful after re-starting all the VMs in the Internal " - "tier") - break + while tries < 25: + wget_file = self.wget_from_vm_cmd( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) + if wget_file != "": + break + self.debug("Waiting for the InternalLbVm and all the VMs in the " + "Internal tier to be fully resolved for (wget) traffic " + "test...") + time.sleep(60) + tries += 1 # Verifying Internal LB (wget) traffic test - self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2]) + self.verify_lb_wget_file( + wget_file, [internal_vm, internal_vm_1, internal_vm_2]) # Restarting VPC (cleanup = false) # VPC VR gets destroyed and deployed again in the VPC @@ -1690,7 +2157,8 @@ class TestNuageInternalLb(nuageTestCase): self.check_VM_state(internal_vm, state="Running") self.check_VM_state(internal_vm_1, state="Running") self.check_VM_state(internal_vm_2, state="Running") - self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm) + self.validate_PublicIPAddress( + public_ip, public_tier, static_nat=True, vm=public_vm) # VSD verification self.verify_vsd_network(self.domain.id, public_tier, vpc) @@ -1700,26 +2168,35 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm) self.verify_vsd_vm(internal_vm_1) self.verify_vsd_vm(internal_vm_2) - self.verify_vsd_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc) + self.verify_vsd_floating_ip( + public_tier, public_vm, public_ip.ipaddress, vpc) self.verify_vsd_firewall_rule(public_ssh_rule) - self.verify_vsd_firewall_rule(ssh_rule) self.verify_vsd_firewall_rule(http_rule) # Validating InternalLbVm state - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc) + # Internal LB (wget) traffic test ssh_client = self.ssh_into_VM(public_vm, public_ip) - wget_file = self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) + wget_file = self.wget_from_vm_cmd( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) # Verifying Internal LB (wget) traffic test - self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2]) + self.verify_lb_wget_file( + wget_file, [internal_vm, internal_vm_1, internal_vm_2]) # Restarting VPC (cleanup = true) # VPC VR gets destroyed and deployed again in the VPC @@ -1734,7 +2211,8 @@ class TestNuageInternalLb(nuageTestCase): self.check_VM_state(internal_vm, state="Running") self.check_VM_state(internal_vm_1, state="Running") self.check_VM_state(internal_vm_2, state="Running") - self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm) + self.validate_PublicIPAddress( + public_ip, public_tier, static_nat=True, vm=public_vm) # VSD verification self.verify_vsd_network(self.domain.id, public_tier, vpc) @@ -1744,48 +2222,62 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm) self.verify_vsd_vm(internal_vm_1) self.verify_vsd_vm(internal_vm_2) - self.verify_vsd_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc) + self.verify_vsd_floating_ip( + public_tier, public_vm, public_ip.ipaddress, vpc) self.verify_vsd_firewall_rule(public_ssh_rule) - self.verify_vsd_firewall_rule(ssh_rule) self.verify_vsd_firewall_rule(http_rule) # Validating InternalLbVm state - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc) + # Internal LB (wget) traffic test ssh_client = self.ssh_into_VM(public_vm, public_ip) - wget_file = self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) + wget_file = self.wget_from_vm_cmd( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) # Verifying Internal LB (wget) traffic test - self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2]) + self.verify_lb_wget_file( + wget_file, [internal_vm, internal_vm_1, internal_vm_2]) @attr(tags=["advanced", "nuagevsp"], required_hardware="true") def test_08_nuage_internallb_appliance_operations_traffic(self): - """Test Nuage VSP VPC Internal LB functionality with InternalLbVm appliance operations by performing (wget) - traffic tests within a VPC + """Test Nuage VSP VPC Internal LB functionality with InternalLbVm + appliance operations by performing (wget) traffic tests within a VPC """ - # Repeat the tests in the testcase "test_05_nuage_internallb_traffic" with InternalLbVm appliance operations: - # 1. Verify the InternalLbVm deployment by creating the Internal LB Rules when the VPC VR is in Stopped state, - # VPC VR has no effect on the InternalLbVm functionality. + # Repeat the tests in the testcase "test_05_nuage_internallb_traffic" + # with InternalLbVm appliance operations: + # 1. Verify the InternalLbVm deployment by creating the Internal LB + # Rules when the VPC VR is in Stopped state, VPC VR has no effect on + # the InternalLbVm functionality. # 2. Stop the InternalLbVm when the VPC VR is in Stopped State # 3. Start the InternalLbVm when the VPC VR is in Stopped state # 4. Stop the InternalLbVm when the VPC VR is in Running State # 5. Start the InternalLbVm when the VPC VR is in Running state # 6. Force stop the InternalLbVm when the VPC VR is in Running State # 7. Start the InternalLbVm when the VPC VR is in Running state - # Verify the above restarts of VPC networks by performing (wget) traffic tests within a VPC. + # Verify the above restarts of VPC networks by performing (wget) + # traffic tests within a VPC. # Delete all the created objects (cleanup). # Creating a VPC offering - self.debug("Creating Nuage VSP VPC offering with Internal LB service...") - vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"]) + self.debug("Creating Nuage VSP VPC offering with Internal LB " + "service...") + vpc_off = self.create_VpcOffering( + self.test_data["nuagevsp"]["vpc_offering_lb"]) self.validate_VpcOffering(vpc_off, state="Enabled") # Creating a VPC @@ -1794,18 +2286,22 @@ class TestNuageInternalLb(nuageTestCase): self.validate_Vpc(vpc, state="Enabled") # Creating network offerings - self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...") + self.debug("Creating Nuage VSP VPC Network offering with Internal LB " + "service...") net_off_1 = self.create_NetworkOffering( self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"]) self.validate_NetworkOffering(net_off_1, state="Enabled") - self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...") - net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"]) + self.debug("Creating Nuage VSP VPC Network offering without Internal " + "LB service...") + net_off_2 = self.create_NetworkOffering( + self.test_data["nuagevsp"]["vpc_network_offering"]) self.validate_NetworkOffering(net_off_2, state="Enabled") # Creating VPC networks in the VPC, and deploying VMs self.debug("Creating a VPC network with Internal LB service...") - internal_tier = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc) + internal_tier = self.create_Network( + net_off_1, gateway='10.1.1.1', vpc=vpc) self.validate_Network(internal_tier, state="Implemented") vr = self.get_Router(internal_tier) self.check_Router_state(vr, state="Running") @@ -1820,7 +2316,8 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm) self.debug("Creating a VPC network without Internal LB service...") - public_tier = self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc) + public_tier = self.create_Network( + net_off_2, gateway='10.1.2.1', vpc=vpc) self.validate_Network(public_tier, state="Implemented") vr = self.get_Router(public_tier) self.check_Router_state(vr, state="Running") @@ -1847,25 +2344,31 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_network(self.domain.id, internal_tier, vpc) # Creating Internal LB Rules in the Internal tier - self.debug("Creating two Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...") - int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm]) - self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm]) - int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier, - vm_array=[internal_vm], - services=self.test_data["internal_lbrule_http"], - source_ip=int_lb_rule_1.sourceipaddress - ) - self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm]) + self.debug("Creating two Internal LB Rules (SSH & HTTP) using the " + "same Load Balancing source IP Address...") + int_lb_rule_1 = self.create_Internal_LB_Rule( + internal_tier, vm_array=[internal_vm]) + self.validate_Internal_LB_Rule( + int_lb_rule_1, state="Active", vm_array=[internal_vm]) + int_lb_rule_2 = self.create_Internal_LB_Rule( + internal_tier, vm_array=[internal_vm], + services=self.test_data["internal_lbrule_http"], + source_ip=int_lb_rule_1.sourceipaddress) + self.validate_Internal_LB_Rule( + int_lb_rule_2, state="Active", vm_array=[internal_vm]) # Validating InternalLbVm deployment and state - int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress) - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + int_lb_vm = self.get_InternalLbVm( + internal_tier, int_lb_rule_1.sourceipaddress) + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) # Deploying more VMs in the Internal tier - self.debug("Deploying two more VMs in network - %s" % internal_tier.name) + self.debug("Deploying two more VMs in network - %s" % + internal_tier.name) internal_vm_1 = self.create_VM(internal_tier) internal_vm_2 = self.create_VM(internal_tier) @@ -1874,99 +2377,125 @@ class TestNuageInternalLb(nuageTestCase): self.verify_vsd_vm(internal_vm_2) # Adding newly deployed VMs to the created Internal LB rules - self.debug("Adding two more virtual machines to the created Internal LB rules...") + self.debug("Adding two more virtual machines to the created Internal " + "LB rules...") int_lb_rule_1.assign(self.api_client, [internal_vm_1, internal_vm_2]) - self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", - vm_array=[internal_vm, internal_vm_1, internal_vm_2]) + self.validate_Internal_LB_Rule( + int_lb_rule_1, state="Active", + vm_array=[internal_vm, internal_vm_1, internal_vm_2]) int_lb_rule_2.assign(self.api_client, [internal_vm_1, internal_vm_2]) - self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", - vm_array=[internal_vm, internal_vm_1, internal_vm_2]) + self.validate_Internal_LB_Rule( + int_lb_rule_2, state="Active", + vm_array=[internal_vm, internal_vm_1, internal_vm_2]) # Validating InternalLbVm state - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) # Adding Network ACL rules in the Internal tier - self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...") - ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=internal_tier) - http_rule = self.create_NetworkAclRule(self.test_data["http_rule"], network=internal_tier) + self.debug("Adding Network ACL rules to make the created Internal LB " + "rules (HTTP) accessible...") + http_rule = self.create_NetworkAclRule( + self.test_data["http_rule"], network=internal_tier) # VSD verification - self.verify_vsd_firewall_rule(ssh_rule) self.verify_vsd_firewall_rule(http_rule) + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc) + # Creating Static NAT rule for the VM in the Public tier public_ip = self.acquire_PublicIPAddress(public_tier, vpc) self.validate_PublicIPAddress(public_ip, public_tier) self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier) - self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm) + self.validate_PublicIPAddress( + public_ip, public_tier, static_nat=True, vm=public_vm) # VSD verification - self.verify_vsd_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc) + self.verify_vsd_floating_ip( + public_tier, public_vm, public_ip.ipaddress, vpc) # Adding Network ACL rule in the Public tier - self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...") - public_ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=public_tier) + self.debug("Adding Network ACL rule to make the created NAT rule " + "(SSH) accessible...") + public_ssh_rule = self.create_NetworkAclRule( + self.test_data["ingress_rule"], network=public_tier) # VSD verification self.verify_vsd_firewall_rule(public_ssh_rule) # Internal LB (wget) traffic test ssh_client = self.ssh_into_VM(public_vm, public_ip) - wget_file = self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) + wget_file = self.wget_from_vm_cmd( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) # Verifying Internal LB (wget) traffic test - self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2]) + self.verify_lb_wget_file( + wget_file, [internal_vm, internal_vm_1, internal_vm_2]) # # Stopping the InternalLbVm when the VPC VR is in Stopped state self.stop_InternalLbVm(int_lb_vm) - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped") # VSD Verification self.verify_vsd_lb_device(int_lb_vm, stopped=True) + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc) + # Internal LB (wget) traffic test ssh_client = self.ssh_into_VM(public_vm, public_ip) + wget_file = self.wget_from_vm_cmd( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) + + # Verifying Internal LB (wget) traffic test with self.assertRaises(Exception): - self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) - self.debug("Failed to wget file as the InternalLbVm is in stopped state") + self.verify_lb_wget_file( + wget_file, [internal_vm, internal_vm_1, internal_vm_2]) + self.debug("Failed to wget file as the InternalLbVm is in stopped" + " state") # # Starting the InternalLbVm when the VPC VR is in Stopped state self.start_InternalLbVm(int_lb_vm) - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc) + # Internal LB (wget) traffic test ssh_client = self.ssh_into_VM(public_vm, public_ip) - tries = 0 - while tries < 10: - try: - wget_file = self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) - except Exception as e: - self.debug("Failed to wget file via the InternalLbVm after re-starting the InternalLbVm appliance: %s" - % e) - self.debug("Waiting for the InternalLbVm to be fully resolved for (wget) traffic test...") - time.sleep(30) - tries += 1 - continue - self.debug("Internal LB (wget) traffic test is successful after re-starting the InternalLbVm appliance") - break + wget_file = self.wget_from_vm_cmd( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) # Verifying Internal LB (wget) traffic test - self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2]) + self.verify_lb_wget_file( + wget_file, [internal_vm, internal_vm_1, internal_vm_2]) # Starting the VPC VR # VPC VR has no effect on the InternalLbVm functionality @@ -1982,90 +2511,110 @@ class TestNuageInternalLb(nuageTestCase): # # Stopping the InternalLbVm when the VPC VR is in Running state self.stop_InternalLbVm(int_lb_vm) - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped") # VSD Verification self.verify_vsd_lb_device(int_lb_vm, stopped=True) + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc) + # Internal LB (wget) traffic test ssh_client = self.ssh_into_VM(public_vm, public_ip) + wget_file = self.wget_from_vm_cmd( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) + + # Verifying Internal LB (wget) traffic test with self.assertRaises(Exception): - self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) - self.debug("Failed to wget file as the InternalLbVm is in stopped state") + self.verify_lb_wget_file( + wget_file, [internal_vm, internal_vm_1, internal_vm_2]) + self.debug("Failed to wget file as the InternalLbVm is in stopped" + " state") # # Starting the InternalLbVm when the VPC VR is in Running state self.start_InternalLbVm(int_lb_vm) - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc) + # Internal LB (wget) traffic test ssh_client = self.ssh_into_VM(public_vm, public_ip) - tries = 0 - while tries < 10: - try: - wget_file = self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) - except Exception as e: - self.debug("Failed to wget file via the InternalLbVm after re-starting the InternalLbVm appliance: %s" - % e) - self.debug("Waiting for the InternalLbVm to be fully resolved for (wget) traffic test...") - time.sleep(30) - tries += 1 - continue - self.debug("Internal LB (wget) traffic test is successful after re-starting the InternalLbVm appliance") - break + wget_file = self.wget_from_vm_cmd( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) # Verifying Internal LB (wget) traffic test - self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2]) + self.verify_lb_wget_file( + wget_file, [internal_vm, internal_vm_1, internal_vm_2]) # # Force Stopping the InternalLbVm when the VPC VR is in Running state self.stop_InternalLbVm(int_lb_vm, force=True) - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped") # VSD Verification self.verify_vsd_lb_device(int_lb_vm, stopped=True) + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc) + # Internal LB (wget) traffic test ssh_client = self.ssh_into_VM(public_vm, public_ip) + wget_file = self.wget_from_vm_cmd( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) + + # Verifying Internal LB (wget) traffic test with self.assertRaises(Exception): - self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) - self.debug("Failed to wget file as the InternalLbVm is in stopped state") + self.verify_lb_wget_file( + wget_file, [internal_vm, internal_vm_1, internal_vm_2]) + self.debug("Failed to wget file as the InternalLbVm is in stopped" + " state") # # Starting the InternalLbVm when the VPC VR is in Running state self.start_InternalLbVm(int_lb_vm) - self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running") + self.check_InternalLbVm_state( + internal_tier, int_lb_rule_1.sourceipaddress, state="Running") # VSD Verification self.verify_vsd_lb_device(int_lb_vm) + # Verifying Internal Load Balanced VMs ingress traffic + # (SSH into VM via Static NAT rule) + self.debug("Verifying Internal Load Balanced VMs ingress traffic " + "(SSH into VM via Static NAT rule)...") + self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc) + self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc) + # Internal LB (wget) traffic test ssh_client = self.ssh_into_VM(public_vm, public_ip) - tries = 0 - while tries < 10: - try: - wget_file = self.wget_from_vm_cmd(ssh_client, - int_lb_rule_1.sourceipaddress, - self.test_data["http_rule"]["publicport"] - ) - except Exception as e: - self.debug("Failed to wget file via the InternalLbVm after re-starting the InternalLbVm appliance: %s" - % e) - self.debug("Waiting for the InternalLbVm to be fully resolved for (wget) traffic test...") - time.sleep(30) - tries += 1 - continue - self.debug("Internal LB (wget) traffic test is successful after re-starting the InternalLbVm appliance") - break + wget_file = self.wget_from_vm_cmd( + ssh_client, int_lb_rule_1.sourceipaddress, + self.test_data["http_rule"]["publicport"]) # Verifying Internal LB (wget) traffic test - self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2]) + self.verify_lb_wget_file( + wget_file, [internal_vm, internal_vm_1, internal_vm_2]) diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py index 13b7d9f7a8f..6bf71f01b57 100644 --- a/tools/marvin/marvin/config/test_data.py +++ b/tools/marvin/marvin/config/test_data.py @@ -443,6 +443,22 @@ test_data = { "supportedservices": "Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat,NetworkACL" }, + "vpc_offering_multi_lb": { + "name": "VPC offering with multiple Lb service providers", + "displaytext": "VPC offering with multiple Lb service providers", + "supportedservices": "Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat,NetworkACL", + "serviceProviderList": { + "Vpn": 'VpcVirtualRouter', + "Dhcp": 'VpcVirtualRouter', + "Dns": 'VpcVirtualRouter', + "SourceNat": 'VpcVirtualRouter', + "Lb": ["InternalLbVm", "VpcVirtualRouter"], + "PortForwarding": 'VpcVirtualRouter', + "UserData": 'VpcVirtualRouter', + "StaticNat": 'VpcVirtualRouter', + "NetworkACL": 'VpcVirtualRouter' + } + }, "vpc": { "name": "TestVPC", "displaytext": "TestVPC", From ad5ec66b0d811a3c5ca28b11ed473844a4f203ee Mon Sep 17 00:00:00 2001 From: Sudhansu Date: Mon, 13 Mar 2017 13:14:12 +0530 Subject: [PATCH 24/59] CLOUDSTACK-9831: Previous pod_id still remains in the vm_instance table after VM migration with migrateVirtualMachineWithVolume (cherry picked from commit 3564d30233184161df64e8aaee5ad96917cf8a1d) Signed-off-by: Rohit Yadav --- .../src/com/cloud/vm/VirtualMachineManagerImpl.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java index dfc48d5ccd6..755fb11ee58 100644 --- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -2239,8 +2239,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac work.setResourceId(destHostId); work = _workDao.persist(work); + // Put the vm in migrating state. vm.setLastHostId(srcHostId); + vm.setPodIdToDeployIn(destHost.getPodId()); moveVmToMigratingState(vm, destHostId, work); boolean migrated = false; @@ -2317,6 +2319,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac "Migrate Command failed. Please check logs."); try { _agentMgr.send(destHostId, new Commands(cleanup(vm.getInstanceName())), null); + vm.setPodIdToDeployIn(srcHost.getPodId()); stateTransitTo(vm, Event.OperationFailed, srcHostId); } catch (final AgentUnavailableException e) { s_logger.warn("Looks like the destination Host is unavailable for cleanup.", e); From 765ab549ca27ac495119cd1b6a7d8330e03c6eff Mon Sep 17 00:00:00 2001 From: Jayapal Date: Thu, 9 Mar 2017 16:00:16 +0530 Subject: [PATCH 25/59] CLOUDSTACK-9208: Assertion Error in VM_POWER_STATE handler- Fixed (cherry picked from commit d7eae25322155810ba68aa6109978db287459736) Signed-off-by: Rohit Yadav --- .../src/com/cloud/vm/VirtualMachineManagerImpl.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java index 755fb11ee58..0720f07b8b0 100644 --- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -1273,7 +1273,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final VirtualMachine vm = profile.getVirtualMachine(); final StopCommand stop = new StopCommand(vm, getExecuteInSequence(vm.getHypervisorType()), checkBeforeCleanup); try { - final Answer answer = _agentMgr.send(vm.getHostId(), stop); + Answer answer = null; + if(vm.getHostId() != null) { + answer = _agentMgr.send(vm.getHostId(), stop); + } if (answer != null && answer instanceof StopAnswer) { final StopAnswer stopAns = (StopAnswer)answer; if (vm.getType() == VirtualMachine.Type.User) { From e6855db78aee8cbff675647f9fb52edd4190f9d0 Mon Sep 17 00:00:00 2001 From: Anshul Gangwar Date: Tue, 20 Dec 2016 15:17:08 +0530 Subject: [PATCH 26/59] CLOUDSTACK-9685: delete snapshot on primary associated with a volume when that volume is deleted as that snapshot will never be going to use again and also it will fill up primary storage (cherry picked from commit 336df84f1787de962a67d0a34551f9027303040e) Signed-off-by: Rohit Yadav --- .../storage/snapshot/XenserverSnapshotStrategy.java | 4 ++-- .../cloudstack/storage/volume/VolumeServiceImpl.java | 8 ++++++++ .../xenserver/resource/XenServerStorageProcessor.java | 3 +++ 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java index 2686e403b27..06e05ec34d7 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java @@ -130,7 +130,7 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase { SnapshotDataStoreVO oldestSnapshotOnPrimary = snapshotStoreDao.findOldestSnapshotForVolume(snapshot.getVolumeId(), DataStoreRole.Primary); VolumeVO volume = volumeDao.findById(snapshot.getVolumeId()); if (oldestSnapshotOnPrimary != null) { - if (oldestSnapshotOnPrimary.getDataStoreId() == volume.getPoolId()) { + if (oldestSnapshotOnPrimary.getDataStoreId() == volume.getPoolId() && oldestSnapshotOnPrimary.getId() != parentSnapshotOnPrimaryStore.getId()) { int _deltaSnapshotMax = NumbersUtil.parseInt(configDao.getValue("snapshot.delta.max"), SnapshotManager.DELTAMAX); int deltaSnap = _deltaSnapshotMax; @@ -152,7 +152,7 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase { } else { fullBackup = false; } - } else { + } else if (oldestSnapshotOnPrimary.getId() != parentSnapshotOnPrimaryStore.getId()){ // if there is an snapshot entry for previousPool(primary storage) of migrated volume, delete it becasue CS created one more snapshot entry for current pool snapshotStoreDao.remove(oldestSnapshotOnPrimary.getId()); } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 05ec7d02706..8adf001b4ea 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -68,6 +68,8 @@ import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import org.apache.cloudstack.storage.image.store.TemplateObject; @@ -141,6 +143,8 @@ public class VolumeServiceImpl implements VolumeService { @Inject VMTemplatePoolDao _tmpltPoolDao; @Inject + SnapshotDataStoreDao _snapshotStoreDao; + @Inject VolumeDao _volumeDao; @Inject EndPointSelector _epSelector; @@ -359,6 +363,10 @@ public class VolumeServiceImpl implements VolumeService { s_logger.info("Volume " + vo.getId() + " is not referred anywhere, remove it from volumes table"); volDao.remove(vo.getId()); } + SnapshotDataStoreVO snapStoreVo = _snapshotStoreDao.findByVolume(vo.getId(), DataStoreRole.Primary); + if(snapStoreVo != null){ + _snapshotStoreDao.remove(snapStoreVo.getId()); + } } else { vo.processEvent(Event.OperationFailed); apiResult.setResult(result.getResult()); diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java index 1144276f711..ebf6b1ed884 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/XenServerStorageProcessor.java @@ -561,6 +561,9 @@ public class XenServerStorageProcessor implements StorageProcessor { String errorMsg = null; try { final VDI vdi = VDI.getByUuid(conn, volume.getPath()); + for(VDI svdi : vdi.getSnapshots(conn)) { + deleteVDI(conn, svdi); + } deleteVDI(conn, vdi); return new Answer(null); } catch (final BadServerResponse e) { From 645283abc442055c0ecf9c20b0c38c942f85c708 Mon Sep 17 00:00:00 2001 From: Nitin Kumar Maharana Date: Tue, 6 Dec 2016 13:38:34 +0530 Subject: [PATCH 27/59] CLOUDSTACK-9623: Deploying virtual machine fails due to "Couldn't find vlanId" in Basic Zone (cherry picked from commit 280f3be4e6c77ed50d9f3d09a40ee1a6d470f313) Signed-off-by: Rohit Yadav --- server/src/com/cloud/network/IpAddressManagerImpl.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/com/cloud/network/IpAddressManagerImpl.java b/server/src/com/cloud/network/IpAddressManagerImpl.java index 43a251ab0ec..62a5e876de0 100644 --- a/server/src/com/cloud/network/IpAddressManagerImpl.java +++ b/server/src/com/cloud/network/IpAddressManagerImpl.java @@ -391,6 +391,8 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage AssignIpAddressFromPodVlanSearch = _ipAddressDao.createSearchBuilder(); AssignIpAddressFromPodVlanSearch.and("dc", AssignIpAddressFromPodVlanSearch.entity().getDataCenterId(), Op.EQ); AssignIpAddressFromPodVlanSearch.and("allocated", AssignIpAddressFromPodVlanSearch.entity().getAllocatedTime(), Op.NULL); + AssignIpAddressFromPodVlanSearch.and("vlanId", AssignIpAddressFromPodVlanSearch.entity().getVlanId(), Op.IN); + SearchBuilder podVlanSearch = _vlanDao.createSearchBuilder(); podVlanSearch.and("type", podVlanSearch.entity().getVlanType(), Op.EQ); podVlanSearch.and("networkId", podVlanSearch.entity().getNetworkId(), Op.EQ); From ba607185850deb7f41794b5dea43d0e05bbcea7f Mon Sep 17 00:00:00 2001 From: Sanket Thite Date: Tue, 12 Jul 2016 15:20:47 +0530 Subject: [PATCH 28/59] VM snapshot is disabled if the VM Instance is off (cherry picked from commit bd80c8e1797ef4dc819a22f2629bccd94beb8741) Signed-off-by: Rohit Yadav --- ui/scripts/instances.js | 6 ++++++ ui/scripts/ui/dialog.js | 10 ++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/ui/scripts/instances.js b/ui/scripts/instances.js index 19db2570296..d812d0379b2 100644 --- a/ui/scripts/instances.js +++ b/ui/scripts/instances.js @@ -49,6 +49,12 @@ return true; } return false; + }, + isDisabled: function(args){ + if(args.context.instances[0].state == 'Stopped'){ + return true; + } + return false; } }, quiescevm: { diff --git a/ui/scripts/ui/dialog.js b/ui/scripts/ui/dialog.js index 5e28ba3a10d..2f3f84712d5 100644 --- a/ui/scripts/ui/dialog.js +++ b/ui/scripts/ui/dialog.js @@ -637,8 +637,14 @@ $input.attr('id', inputId); $name.find('label').attr('for', inputId); - if (field.isDisabled) - $input.attr("disabled", "disabled"); + if(field.isDisabled ){ + if(typeof(field.isDisabled) == 'boolean' && field.isDisabled == true){ + $input.attr("disabled","disabled"); + } + else if (typeof(field.isDisabled) == 'function' && field.isDisabled(args) == true){ + $input.attr("disabled","disabled"); + } + } // Tooltip if (field.docID) { From 05f94b8ef76f97f650e5c12f0b7b399ac651a245 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 31 Jan 2017 18:15:12 +0100 Subject: [PATCH 29/59] Bug fix for CLOUDSTACK-9762 Management Server UI (VM statistics page) CPU Utilized value is incorrect. (cherry picked from commit b676a8a7c3fd554d87afdac6328d9ac5d0265308) Signed-off-by: Rohit Yadav --- .../hypervisor/vmware/resource/VmwareResource.java | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 10f0fd32a8d..0ff50ac2fc4 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -5073,21 +5073,23 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa final String guestMemUseStr = "summary.quickStats.guestMemoryUsage"; final String memLimitStr = "resourceConfig.memoryAllocation.limit"; final String memMbStr = "config.hardware.memoryMB"; + final String allocatedCpuStr = "summary.runtime.maxCpuUsage"; ObjectContent[] ocs = - hyperHost.getVmPropertiesOnHyperHost(new String[] {"name", numCpuStr, cpuUseStr ,guestMemUseStr ,memLimitStr ,memMbStr, instanceNameCustomField}); + hyperHost.getVmPropertiesOnHyperHost(new String[] {"name", numCpuStr, cpuUseStr ,guestMemUseStr ,memLimitStr ,memMbStr,allocatedCpuStr ,instanceNameCustomField}); if (ocs != null && ocs.length > 0) { for (ObjectContent oc : ocs) { List objProps = oc.getPropSet(); if (objProps != null) { String name = null; String numberCPUs = null; - String maxCpuUsage = null; + double maxCpuUsage = 0; String memlimit = null; String memkb = null; String guestMemusage = null; String vmNameOnVcenter = null; String vmInternalCSName = null; + double allocatedCpu = 0; for (DynamicProperty objProp : objProps) { if (objProp.getName().equals("name")) { vmNameOnVcenter = objProp.getVal().toString(); @@ -5099,13 +5101,17 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa }else if (objProp.getName().equals(numCpuStr)) { numberCPUs = objProp.getVal().toString(); } else if (objProp.getName().equals(cpuUseStr)) { - maxCpuUsage = objProp.getVal().toString(); + maxCpuUsage = NumberUtils.toDouble(objProp.getVal().toString()); } else if (objProp.getName().equals(memLimitStr)) { memlimit = objProp.getVal().toString(); } else if (objProp.getName().equals(memMbStr)) { memkb = objProp.getVal().toString(); + } else if (objProp.getName().equals(allocatedCpuStr)){ + allocatedCpu = NumberUtils.toDouble(objProp.getVal().toString()); } } + + maxCpuUsage = (maxCpuUsage/allocatedCpu)*100; new VirtualMachineMO(hyperHost.getContext(), oc.getObj()); if (vmInternalCSName != null) { name = vmInternalCSName; @@ -5172,7 +5178,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } } - vmResponseMap.put(name, new VmStatsEntry( NumberUtils.toDouble(memkb)*1024,NumberUtils.toDouble(guestMemusage)*1024,NumberUtils.toDouble(memlimit)*1024, NumberUtils.toDouble(maxCpuUsage), networkReadKBs, networkWriteKBs, NumberUtils.toInt(numberCPUs), "vm")); + vmResponseMap.put(name, new VmStatsEntry( NumberUtils.toDouble(memkb)*1024,NumberUtils.toDouble(guestMemusage)*1024,NumberUtils.toDouble(memlimit)*1024, maxCpuUsage, networkReadKBs, networkWriteKBs, NumberUtils.toInt(numberCPUs), "vm")); } } } From a7cfdf66fd780ab48a6c048ccdabfb304c34854e Mon Sep 17 00:00:00 2001 From: Jayapal Date: Mon, 27 Feb 2017 10:20:33 +0530 Subject: [PATCH 30/59] CLOUDSTACK-9728: Fixed traffic sentinel HTTP 414 error response (cherry picked from commit 70422e79072ff933a33a5f95df3a2c9f4bb0d203) Signed-off-by: Rohit Yadav --- .../resource/TrafficSentinelResource.java | 31 +++++++++++++++---- 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/core/src/com/cloud/network/resource/TrafficSentinelResource.java b/core/src/com/cloud/network/resource/TrafficSentinelResource.java index 610190ae7cc..e4193327e78 100644 --- a/core/src/com/cloud/network/resource/TrafficSentinelResource.java +++ b/core/src/com/cloud/network/resource/TrafficSentinelResource.java @@ -22,6 +22,7 @@ package com.cloud.network.resource; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; +import java.io.OutputStream; import java.net.MalformedURLException; import java.net.URL; import java.net.URLEncoder; @@ -53,6 +54,7 @@ import com.cloud.agent.api.StartupTrafficMonitorCommand; import com.cloud.host.Host; import com.cloud.resource.ServerResource; import com.cloud.utils.exception.ExecutionException; +import java.net.HttpURLConnection; public class TrafficSentinelResource implements ServerResource { @@ -205,14 +207,25 @@ public class TrafficSentinelResource implements ServerResource { _exclZones = cmd.getExcludeZones(); } + BufferedReader in = null; + OutputStream os = null; try { - //Query traffic Sentinel - trafficSentinel = - new URL(_url + "/inmsf/Query?script=" + URLEncoder.encode(getScript(cmd.getPublicIps(), cmd.getStart(), cmd.getEnd()), "UTF-8") + - "&authenticate=basic&resultFormat=txt"); + //Query traffic Sentinel using POST method. 3 parts to the connection call and subsequent writing. - BufferedReader in = new BufferedReader(new InputStreamReader(trafficSentinel.openStream())); + //Part 1 - Connect to the URL of the traffic sentinel's instance. + trafficSentinel = new URL(_url + "/inmsf/Query"); + String postData = "script="+URLEncoder.encode(getScript(cmd.getPublicIps(), cmd.getStart(), cmd.getEnd()), "UTF-8")+"&authenticate=basic&resultFormat=txt"; + HttpURLConnection con = (HttpURLConnection) trafficSentinel.openConnection(); + con.setRequestMethod("POST"); + con.setRequestProperty("Content-Length", String.valueOf(postData.length())); + con.setDoOutput(true); + //Part 2 - Write Data + os = con.getOutputStream(); + os.write(postData.getBytes("UTF-8")); + + //Part 3 - Read response of the request + in = new BufferedReader(new InputStreamReader(con.getInputStream())); String inputLine; while ((inputLine = in.readLine()) != null) { @@ -228,13 +241,19 @@ public class TrafficSentinelResource implements ServerResource { answer.put(publicIp, bytesSentAndReceived); } } - in.close(); } catch (MalformedURLException e1) { s_logger.info("Invalid Traffic Sentinel URL", e1); throw new ExecutionException(e1.getMessage()); } catch (IOException e) { s_logger.debug("Error in direct network usage accounting", e); throw new ExecutionException(e.getMessage()); + } finally { + if (os != null) { + os.close(); + } + if (in != null) { + in.close(); + } } } catch (Exception e) { s_logger.debug(e); From 41d1d40940005be683ab89902816e4c8e5d55287 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 17 Jan 2017 23:39:17 +0530 Subject: [PATCH 31/59] CLOUDSTACK-9748:VPN Users search functionality broken (cherry picked from commit 61823c24f658eb0fe3f1246e61d4a6b9f308cb00) Signed-off-by: Rohit Yadav --- .../com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java b/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java index 2a847147282..29ff5c4d733 100644 --- a/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java +++ b/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java @@ -587,6 +587,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc public Pair, Integer> searchForVpnUsers(ListVpnUsersCmd cmd) { String username = cmd.getUsername(); Long id = cmd.getId(); + String keyword = cmd.getKeyword(); Account caller = CallContext.current().getCallingAccount(); List permittedAccounts = new ArrayList(); @@ -602,6 +603,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); sb.and("username", sb.entity().getUsername(), SearchCriteria.Op.EQ); + sb.and("keyword", sb.entity().getUsername(), SearchCriteria.Op.LIKE); sb.and("state", sb.entity().getState(), Op.IN); SearchCriteria sc = sb.create(); @@ -610,6 +612,10 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc //list only active users sc.setParameters("state", State.Active, State.Add); + if(keyword != null){ + sc.setParameters("keyword", "%" + keyword + "%"); + } + if (id != null) { sc.setParameters("id", id); } From 4c37e111752fdf06037119cf56d34122cb15ed67 Mon Sep 17 00:00:00 2001 From: Jayapal Date: Wed, 8 Mar 2017 10:33:11 +0530 Subject: [PATCH 32/59] CLOUDSTACK-8931: Fail to deploy VM instance when use.system.public.ips=false (cherry picked from commit e1384c3b639541cad08bf11e46cfa5a8af171ee7) Signed-off-by: Rohit Yadav --- .../com/cloud/network/IpAddressManagerImpl.java | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/server/src/com/cloud/network/IpAddressManagerImpl.java b/server/src/com/cloud/network/IpAddressManagerImpl.java index 62a5e876de0..9c66f135d95 100644 --- a/server/src/com/cloud/network/IpAddressManagerImpl.java +++ b/server/src/com/cloud/network/IpAddressManagerImpl.java @@ -680,6 +680,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage boolean fetchFromDedicatedRange = false; List dedicatedVlanDbIds = new ArrayList(); List nonDedicatedVlanDbIds = new ArrayList(); + DataCenter zone = _entityMgr.findById(DataCenter.class, dcId); SearchCriteria sc = null; if (podId != null) { @@ -693,10 +694,14 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage // If owner has dedicated Public IP ranges, fetch IP from the dedicated range // Otherwise fetch IP from the system pool - List maps = _accountVlanMapDao.listAccountVlanMapsByAccount(owner.getId()); - for (AccountVlanMapVO map : maps) { - if (vlanDbIds == null || vlanDbIds.contains(map.getVlanDbId())) - dedicatedVlanDbIds.add(map.getVlanDbId()); + Network network = _networksDao.findById(guestNetworkId); + //Checking if network is null in the case of system VM's. At the time of allocation of IP address to systemVm, no network is present. + if(network == null || !(network.getGuestType() == GuestType.Shared && zone.getNetworkType() == NetworkType.Advanced)) { + List maps = _accountVlanMapDao.listAccountVlanMapsByAccount(owner.getId()); + for (AccountVlanMapVO map : maps) { + if (vlanDbIds == null || vlanDbIds.contains(map.getVlanDbId())) + dedicatedVlanDbIds.add(map.getVlanDbId()); + } } List domainMaps = _domainVlanMapDao.listDomainVlanMapsByDomain(owner.getDomainId()); for (DomainVlanMapVO map : domainMaps) { @@ -729,8 +734,6 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage sc.setParameters("dc", dcId); - DataCenter zone = _entityMgr.findById(DataCenter.class, dcId); - // for direct network take ip addresses only from the vlans belonging to the network if (vlanUse == VlanType.DirectAttached) { sc.setJoinParameters("vlan", "networkId", guestNetworkId); From 5ce122cc080c5ecde4a28842564f699c11363f17 Mon Sep 17 00:00:00 2001 From: Sudhansu Date: Fri, 9 Dec 2016 16:52:46 +0530 Subject: [PATCH 33/59] CLOUDSTACK-9630: Cannot use listNics API as advertised added missing details for listNics API response. (cherry picked from commit e7326b10ef1f51b470df6b683e1e06d753fb02f7) Signed-off-by: Rohit Yadav --- .../src/com/cloud/api/ApiResponseHelper.java | 13 ++ test/integration/component/test_list_nics.py | 150 ++++++++++++++++++ 2 files changed, 163 insertions(+) create mode 100644 test/integration/component/test_list_nics.py diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java index eaf48c7bb0a..54b9bf2c015 100644 --- a/server/src/com/cloud/api/ApiResponseHelper.java +++ b/server/src/com/cloud/api/ApiResponseHelper.java @@ -3456,6 +3456,7 @@ public class ApiResponseHelper implements ResponseGenerator { NicResponse response = new NicResponse(); NetworkVO network = _entityMgr.findById(NetworkVO.class, result.getNetworkId()); VMInstanceVO vm = _entityMgr.findById(VMInstanceVO.class, result.getInstanceId()); + UserVmJoinVO userVm = _entityMgr.findById(UserVmJoinVO.class, result.getInstanceId()); response.setId(result.getUuid()); response.setNetworkid(network.getUuid()); @@ -3464,6 +3465,14 @@ public class ApiResponseHelper implements ResponseGenerator { response.setVmId(vm.getUuid()); } + if (userVm != null){ + if (userVm.getTrafficType() != null) { + response.setTrafficType(userVm.getTrafficType().toString()); + } + if (userVm.getGuestType() != null) { + response.setType(userVm.getGuestType().toString()); + } + } response.setIpaddress(result.getIPv4Address()); if (result.getSecondaryIp()) { @@ -3488,6 +3497,10 @@ public class ApiResponseHelper implements ResponseGenerator { response.setIp6Address(result.getIPv6Address()); } + if (result.getIPv6Cidr() != null) { + response.setIp6Cidr(result.getIPv6Cidr()); + } + response.setDeviceId(String.valueOf(result.getDeviceId())); response.setIsDefault(result.isDefaultNic()); diff --git a/test/integration/component/test_list_nics.py b/test/integration/component/test_list_nics.py new file mode 100644 index 00000000000..12bae1fa2ea --- /dev/null +++ b/test/integration/component/test_list_nics.py @@ -0,0 +1,150 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +#Test from the Marvin - Testing in Python wiki +from marvin.codes import FAILED + +#All tests inherit from cloudstackTestCase +from marvin.cloudstackTestCase import cloudstackTestCase + +#Import Integration Libraries + +#base - contains all resources as entities and defines create, delete, list operations on them +from marvin.lib.base import Account, VirtualMachine, ServiceOffering + +#utils - utility classes for common cleanup, external library wrappers etc +from marvin.lib.utils import cleanup_resources + +#common - commonly used methods for all tests are listed here +from marvin.lib.common import get_zone, get_domain, get_template + +from marvin.cloudstackAPI.listNics import listNicsCmd + + +from nose.plugins.attrib import attr + +class TestDeployVM(cloudstackTestCase): + """Test deploy a VM into a user account + """ + + def setUp(self): + self.testdata = self.testClient.getParsedTestDataConfig() + self.apiclient = self.testClient.getApiClient() + + # Get Zone, Domain and Default Built-in template + self.domain = get_domain(self.apiclient) + self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests()) + self.testdata["mode"] = self.zone.networktype + self.template = get_template(self.apiclient, self.zone.id, self.testdata["ostype"]) + + if self.template == FAILED: + assert False, "get_template() failed to return template with description %s" % self.testdata["ostype"] + + #create a user account + self.account = Account.create( + self.apiclient, + self.testdata["account"], + domainid=self.domain.id + ) + #create a service offering + self.service_offering = ServiceOffering.create( + self.apiclient, + self.testdata["service_offerings"]["small"] + ) + #build cleanup list + self.cleanup = [ + self.service_offering, + self.account + ] + + # Validate the following: + # 1. Virtual Machine is accessible via SSH + # 2. listVirtualMachines returns accurate information + + self.virtual_machine = VirtualMachine.create( + self.apiclient, + self.testdata["virtual_machine"], + accountid=self.account.name, + zoneid=self.zone.id, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + templateid=self.template.id + ) + + list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) + + self.debug( + "Verify listVirtualMachines response for virtual machine: %s"\ + % self.virtual_machine.id + ) + + self.assertEqual( + isinstance(list_vms, list), + True, + "List VM response was not a valid list" + ) + self.assertNotEqual( + len(list_vms), + 0, + "List VM response was empty" + ) + + vm = list_vms[0] + self.assertEqual( + vm.id, + self.virtual_machine.id, + "Virtual Machine ids do not match" + ) + self.assertEqual( + vm.name, + self.virtual_machine.name, + "Virtual Machine names do not match" + ) + self.assertEqual( + vm.state, + "Running", + msg="VM is not in Running state" + ) + + @attr(tags = ['advanced', 'basic'], required_hardware="false") + def test_list_nics(self): + list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) + vmid = self.virtual_machine.id + cmd = listNicsCmd() + cmd.virtualmachineid = vmid + list_nics = self.apiclient.listNics(cmd) + + nic = list_nics[0] + + self.assertIsNotNone( + nic.type, + "Nic Type is %s" % nic.type + ) + + self.assertIsNotNone( + nic.traffictype, + "Nic traffictype is %s" % nic.traffictype + ) + + + + def tearDown(self): + try: + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + self.debug("Warning! Exception in tearDown: %s" % e) + return \ No newline at end of file From b696a00ea2b3c558b8993f7975269adca10256b3 Mon Sep 17 00:00:00 2001 From: Bharat Kumar Date: Tue, 5 Jan 2016 11:23:49 +0530 Subject: [PATCH 34/59] CLOUDSTACK-9725 Failed to update VPC Network during N/w offering Upgrade which doesnt have ACL service Enabled. check if acl service provider is configured when network is associated with a acl. (cherry picked from commit bbff9f15754c06dc8a7a74fdd34ab7968b052c3f) Signed-off-by: Rohit Yadav --- server/src/com/cloud/network/NetworkServiceImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java index e77b40e1818..46ff3832d0b 100644 --- a/server/src/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/com/cloud/network/NetworkServiceImpl.java @@ -2008,7 +2008,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { //perform below validation if the network is vpc network if (network.getVpcId() != null && networkOfferingId != null) { Vpc vpc = _entityMgr.findById(Vpc.class, network.getVpcId()); - _vpcMgr.validateNtwkOffForNtwkInVpc(networkId, networkOfferingId, null, null, vpc, null, _accountMgr.getAccount(network.getAccountId()), null); + _vpcMgr.validateNtwkOffForNtwkInVpc(networkId, networkOfferingId, null, null, vpc, null, _accountMgr.getAccount(network.getAccountId()), network.getNetworkACLId()); } // don't allow to update network in Destroy state From f9e51653ae6b286a08dc5aecb0cfbf0191a96b47 Mon Sep 17 00:00:00 2001 From: Likitha Shetty Date: Thu, 10 Sep 2015 14:58:04 +0530 Subject: [PATCH 35/59] CLOUDSTACK-8829 : Consecutive cold migration fails (cherry picked from commit 88b0459c41c51e832547667ea537fb9ce9d9f4c1) Signed-off-by: Rohit Yadav --- .../cloud/vm/VirtualMachineManagerImpl.java | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java index 0720f07b8b0..f5bdf38740f 100644 --- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -1763,9 +1763,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private void orchestrateStorageMigration(final String vmUuid, final StoragePool destPool) { final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); - final Long srchostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId(); - final HostVO srcHost = _hostDao.findById(srchostId); - final Long srcClusterId = srcHost.getClusterId(); if (destPool == null) { throw new CloudRuntimeException("Unable to migrate vm: missing destination storage pool"); @@ -1799,19 +1796,26 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac // If VM was cold migrated between clusters belonging to two different VMware DCs, // unregister the VM from the source host and cleanup the associated VM files. if (vm.getHypervisorType().equals(HypervisorType.VMware)) { + Long srcClusterId = null; + Long srcHostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId(); + if (srcHostId != null) { + HostVO srcHost = _hostDao.findById(srcHostId); + srcClusterId = srcHost.getClusterId(); + } + final Long destClusterId = destPool.getClusterId(); if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId)) { final String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId); final String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId); if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) { s_logger.debug("Since VM's storage was successfully migrated across VMware Datacenters, unregistering VM: " + vm.getInstanceName() + - " from source host: " + srcHost.getId()); + " from source host: " + srcHostId); final UnregisterVMCommand uvc = new UnregisterVMCommand(vm.getInstanceName()); uvc.setCleanupVmFiles(true); try { - _agentMgr.send(srcHost.getId(), uvc); - } catch (final Exception e) { - throw new CloudRuntimeException("Failed to unregister VM: " + vm.getInstanceName() + " from source host: " + srcHost.getId() + + _agentMgr.send(srcHostId, uvc); + } catch (final AgentUnavailableException | OperationTimedoutException e) { + throw new CloudRuntimeException("Failed to unregister VM: " + vm.getInstanceName() + " from source host: " + srcHostId + " after successfully migrating VM's storage across VMware Datacenters"); } } From 9ddbd9eda7ceb0c01dd0b1af0785ba1bc3a43606 Mon Sep 17 00:00:00 2001 From: Anshul Gangwar Date: Wed, 10 Jun 2015 15:33:42 +0530 Subject: [PATCH 36/59] CLOUDSTACK-8833: Fixed Generating url and migrate volume to another storage , resulting two entry in UI and listvolume is not working for that volume Update the volume id in volume_store_ref table to newly created volume for migration (cherry picked from commit 42b89278e9386b8bc90e36fb600d7730e676ae72) Signed-off-by: Rohit Yadav --- .../datastore/db/VolumeDataStoreDao.java | 2 ++ .../image/db/VolumeDataStoreDaoImpl.java | 17 +++++++++++++++++ .../storage/volume/VolumeServiceImpl.java | 1 + 3 files changed, 20 insertions(+) diff --git a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreDao.java b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreDao.java index e65880d316c..fb9844116c1 100644 --- a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreDao.java +++ b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreDao.java @@ -51,4 +51,6 @@ public interface VolumeDataStoreDao extends GenericDao, List listUploadedVolumesByStoreId(long id); List listByVolumeState(Volume.State... states); + + boolean updateVolumeId(long srcVolId, long destVolId); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java index d01ec411d06..8258042984d 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java @@ -25,6 +25,7 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.utils.exception.CloudRuntimeException; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; @@ -350,4 +351,20 @@ public class VolumeDataStoreDaoImpl extends GenericDaoBase Date: Wed, 15 Mar 2017 15:59:06 +0530 Subject: [PATCH 37/59] CLOUDSTACK-9834: prepareTemplate API call doesn't work well with XenServer & Local SR (Db_exn.Uniqueness_constraint_violation) removed the host uuid from SR label so that any host which has access to the SR(all the hosts in the same pool) can reuse the same SR (cherry picked from commit 1aa6a72bc7deda32c8cd07e468841e3fc15bf6f8) Signed-off-by: Rohit Yadav --- .../xenserver/resource/Xenserver625StorageProcessor.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java index b70057dda9c..05dd56612d0 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java @@ -100,7 +100,7 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor { PBD pbd = null; try { - final String srname = hypervisorResource.getHost().getUuid() + path.trim(); + final String srname = path.trim(); synchronized (srname.intern()) { final Set srs = SR.getByNameLabel(conn, srname); if (srs != null && !srs.isEmpty()) { From 42e60ebac6f067e003598447ef75d914b7916734 Mon Sep 17 00:00:00 2001 From: Nitin Kumar Maharana Date: Wed, 8 Mar 2017 13:22:30 +0530 Subject: [PATCH 38/59] CLOUDSTACK-9708: Router deployment failed due to two threads start VR simultaneously. (cherry picked from commit 9e20525e08d8dae7bcf3d568c5a1ccdb793eeb9d) Signed-off-by: Rohit Yadav --- .../src/com/cloud/vm/dao/UserVmDaoImpl.java | 28 +++++++++---------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java index b1ff7060ba1..92f7e4edfb0 100644 --- a/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java @@ -192,6 +192,18 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use JoinBuilder.JoinType.INNER); AccountDataCenterVirtualSearch.done(); + SearchBuilder nicSearchByNetwork = _nicDao.createSearchBuilder(); + nicSearchByNetwork.and("networkId", nicSearchByNetwork.entity().getNetworkId(), SearchCriteria.Op.EQ); + nicSearchByNetwork.and("removed", nicSearchByNetwork.entity().getRemoved(), SearchCriteria.Op.NULL); + nicSearchByNetwork.and().op("ip4Address", nicSearchByNetwork.entity().getIPv4Address(), SearchCriteria.Op.NNULL); + nicSearchByNetwork.or("ip6Address", nicSearchByNetwork.entity().getIPv6Address(), SearchCriteria.Op.NNULL); + nicSearchByNetwork.cp(); + + UserVmSearch = createSearchBuilder(); + UserVmSearch.and("states", UserVmSearch.entity().getState(), SearchCriteria.Op.IN); + UserVmSearch.join("nicSearchByNetwork", nicSearchByNetwork, UserVmSearch.entity().getId(), nicSearchByNetwork.entity().getInstanceId(), JoinBuilder.JoinType.INNER); + UserVmSearch.done(); + UserVmByIsoSearch = createSearchBuilder(); UserVmByIsoSearch.and("isoId", UserVmByIsoSearch.entity().getIsoId(), SearchCriteria.Op.EQ); UserVmByIsoSearch.done(); @@ -301,25 +313,11 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use @Override public List listByNetworkIdAndStates(long networkId, State... states) { - if (UserVmSearch == null) { - SearchBuilder nicSearch = _nicDao.createSearchBuilder(); - nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); - nicSearch.and("removed", nicSearch.entity().getRemoved(), SearchCriteria.Op.NULL); - nicSearch.and().op("ip4Address", nicSearch.entity().getIPv4Address(), SearchCriteria.Op.NNULL); - nicSearch.or("ip6Address", nicSearch.entity().getIPv6Address(), SearchCriteria.Op.NNULL); - nicSearch.cp(); - - UserVmSearch = createSearchBuilder(); - UserVmSearch.and("states", UserVmSearch.entity().getState(), SearchCriteria.Op.IN); - UserVmSearch.join("nicSearch", nicSearch, UserVmSearch.entity().getId(), nicSearch.entity().getInstanceId(), JoinBuilder.JoinType.INNER); - UserVmSearch.done(); - } - SearchCriteria sc = UserVmSearch.create(); if (states != null && states.length != 0) { sc.setParameters("states", (Object[])states); } - sc.setJoinParameters("nicSearch", "networkId", networkId); + sc.setJoinParameters("nicSearchByNetwork", "networkId", networkId); return listBy(sc); } From 63997b3b192e78fa283c25f905297461565de41f Mon Sep 17 00:00:00 2001 From: Bharat Kumar Date: Tue, 6 Oct 2015 16:32:58 +0530 Subject: [PATCH 39/59] CLOUDSTACK-9666 Added basic configuration validation for the config drive global settings (cherry picked from commit 366175b9a5dd3994b2807fcc5aaeed7a22052cae) Signed-off-by: Rohit Yadav --- .../src/com/cloud/configuration/ConfigurationManagerImpl.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index c70d7a17967..5b8ba791040 100644 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -388,6 +388,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati configValuesForValidation.add("ovm3.heartbeat.timeout"); configValuesForValidation.add("incorrect.login.attempts.allowed"); configValuesForValidation.add("vm.password.length"); + configValuesForValidation.add("externaldhcp.vmip.retrieval.interval"); + configValuesForValidation.add("externaldhcp.vmip.max.retry"); + configValuesForValidation.add("externaldhcp.vmipFetch.threadPool.max"); configValuesForValidation.add("remote.access.vpn.psk.length"); } From 16b67b40fd26cff8241bcb399c505763d279601e Mon Sep 17 00:00:00 2001 From: Anshul Gangwar Date: Fri, 8 Jan 2016 13:28:11 +0530 Subject: [PATCH 40/59] CLOUDSTACK-9686: Fixed multiple entires for builtin template in template store ref table so builtin template is never downloaded completely In handleSysTemplateDownload method creating template only if there exists no entry handleTemplateSync will take care of other scenario (cherry picked from commit 929595c114f1214f064419a305cc115a3e136803) Signed-off-by: Rohit Yadav --- .../apache/cloudstack/storage/image/TemplateServiceImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index 8dcc439f0a6..df277d016e8 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -267,7 +267,7 @@ public class TemplateServiceImpl implements TemplateService { for (VMTemplateVO template : toBeDownloaded) { TemplateDataStoreVO tmpltHost = _vmTemplateStoreDao.findByStoreTemplate(store.getId(), template.getId()); - if (tmpltHost == null || tmpltHost.getState() != ObjectInDataStoreStateMachine.State.Ready) { + if (tmpltHost == null) { associateTemplateToZone(template.getId(), dcId); s_logger.info("Downloading builtin template " + template.getUniqueName() + " to data center: " + dcId); TemplateInfo tmplt = _templateFactory.getTemplate(template.getId(), DataStoreRole.Image); From 9effd472a10e10948d92dfbbf67a04ec82a739d1 Mon Sep 17 00:00:00 2001 From: nitt10prashant Date: Tue, 16 May 2017 17:12:25 +0530 Subject: [PATCH 41/59] Changing vlan to None since network offering being used has Specify Vlan set to False (cherry picked from commit 47747e71881954dab748924ff880beccc687dd1f) Signed-off-by: Rohit Yadav --- test/integration/component/test_acl_isolatednetwork.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/component/test_acl_isolatednetwork.py b/test/integration/component/test_acl_isolatednetwork.py index b1e2575a0c3..0cb583e3779 100644 --- a/test/integration/component/test_acl_isolatednetwork.py +++ b/test/integration/component/test_acl_isolatednetwork.py @@ -57,6 +57,7 @@ class TestIsolatedNetwork(cloudstackTestCase): #cls.acldata = Services().services cls.testdata = cls.testClient.getParsedTestDataConfig() cls.acldata = cls.testdata["acl"] + cls.acldata["network"]["vlan"] = None cls.domain_1 = None cls.domain_2 = None cls.cleanup = [] From 64d09c737ac5c39f8b6338999ecc1364d67ae050 Mon Sep 17 00:00:00 2001 From: Nitesh Sarda Date: Tue, 23 May 2017 17:34:20 +0530 Subject: [PATCH 42/59] CLOUDSTACK-9017 : VPC VR DHCP broken for multihomed guest VMs (cherry picked from commit 10b25adc460f89d4d98ee0c6090a4f785aa088a6) Signed-off-by: Rohit Yadav --- .../patches/debian/config/etc/vpcdnsmasq.conf | 2 ++ .../debian/config/opt/cloud/bin/cs_dhcp.py | 15 --------------- 2 files changed, 2 insertions(+), 15 deletions(-) diff --git a/systemvm/patches/debian/config/etc/vpcdnsmasq.conf b/systemvm/patches/debian/config/etc/vpcdnsmasq.conf index 6cfc433cecf..7c29e65d269 100644 --- a/systemvm/patches/debian/config/etc/vpcdnsmasq.conf +++ b/systemvm/patches/debian/config/etc/vpcdnsmasq.conf @@ -460,3 +460,5 @@ log-facility=/var/log/dnsmasq.log # Include a another lot of configuration options. #conf-file=/etc/dnsmasq.more.conf conf-dir=/etc/dnsmasq.d + +dhcp-optsfile=/etc/dhcpopts.txt diff --git a/systemvm/patches/debian/config/opt/cloud/bin/cs_dhcp.py b/systemvm/patches/debian/config/opt/cloud/bin/cs_dhcp.py index d9f30e5ab49..41a8d7eed8e 100755 --- a/systemvm/patches/debian/config/opt/cloud/bin/cs_dhcp.py +++ b/systemvm/patches/debian/config/opt/cloud/bin/cs_dhcp.py @@ -21,7 +21,6 @@ from netaddr import * def merge(dbag, data): - search(dbag, data['host_name']) # A duplicate ip address wil clobber the old value # This seems desirable .... if "add" in data and data['add'] is False and \ @@ -33,17 +32,3 @@ def merge(dbag, data): dbag[data['ipv4_adress']] = data return dbag - -def search(dbag, name): - """ - Dirty hack because CS does not deprovision hosts - """ - hosts = [] - for o in dbag: - if o == 'id': - continue - print "%s %s" % (dbag[o]['host_name'], name) - if dbag[o]['host_name'] == name: - hosts.append(o) - for o in hosts: - del(dbag[o]) From d03f499b05cd55251d49c873462b98074198d3d4 Mon Sep 17 00:00:00 2001 From: Bharat Kumar Date: Tue, 3 Jan 2017 17:55:08 +0530 Subject: [PATCH 43/59] CLOUDSTACK-9638 Problems caused when inputting double-byte numbers for custom compute offerings (cherry picked from commit 59312dd976a6fb5e4d5d3a5b701fc1cccac9a3d2) Signed-off-by: Rohit Yadav --- server/src/com/cloud/vm/UserVmManagerImpl.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index 45f6ec25347..3edde1da0bf 100644 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -3602,7 +3602,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir _vmDao.persist(vm); for (String key : customParameters.keySet()) { - vm.setDetail(key, customParameters.get(key)); + //handle double byte strings. + vm.setDetail(key, Integer.toString(Integer.parseInt(customParameters.get(key)))); } _vmDao.saveDetails(vm); From e574953427cf0219cbb64ecb27751d3853a973af Mon Sep 17 00:00:00 2001 From: Nitesh Sarda Date: Fri, 3 Mar 2017 16:54:48 +0530 Subject: [PATCH 44/59] CLOUDSTACK-9814 : Unable to edit a Sub domain, which has the same name in different domains (cherry picked from commit 254771c01cb2f3fa4bfc668d36bf11ae133cf61c) Signed-off-by: Rohit Yadav --- .../com/cloud/server/ManagementService.java | 11 --- .../cloud/server/ManagementServerImpl.java | 73 ------------------- .../src/com/cloud/user/DomainManagerImpl.java | 1 + 3 files changed, 1 insertion(+), 84 deletions(-) diff --git a/api/src/com/cloud/server/ManagementService.java b/api/src/com/cloud/server/ManagementService.java index 7f3141bc418..449e1085239 100644 --- a/api/src/com/cloud/server/ManagementService.java +++ b/api/src/com/cloud/server/ManagementService.java @@ -22,7 +22,6 @@ import java.util.Map; import org.apache.cloudstack.api.command.admin.cluster.ListClustersCmd; import org.apache.cloudstack.api.command.admin.config.ListCfgsByCmd; -import org.apache.cloudstack.api.command.admin.domain.UpdateDomainCmd; import org.apache.cloudstack.api.command.admin.guest.AddGuestOsCmd; import org.apache.cloudstack.api.command.admin.guest.AddGuestOsMappingCmd; import org.apache.cloudstack.api.command.admin.guest.ListGuestOsMappingCmd; @@ -63,7 +62,6 @@ import com.cloud.alert.Alert; import com.cloud.capacity.Capacity; import com.cloud.dc.Pod; import com.cloud.dc.Vlan; -import com.cloud.domain.Domain; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.ManagementServerException; import com.cloud.exception.ResourceUnavailableException; @@ -227,15 +225,6 @@ public interface ManagementService { VirtualMachine upgradeSystemVM(UpgradeSystemVMCmd cmd); - /** - * update an existing domain - * - * @param cmd - * - the command containing domainId and new domainName - * @return Domain object if the command succeeded - */ - Domain updateDomain(UpdateDomainCmd cmd); - /** * Searches for alerts * diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index 15abae020c1..302f37c8f40 100644 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -2252,79 +2252,6 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe return new Pair(null, -1); } - @Override - @DB - public DomainVO updateDomain(final UpdateDomainCmd cmd) { - final Long domainId = cmd.getId(); - final String domainName = cmd.getDomainName(); - final String networkDomain = cmd.getNetworkDomain(); - - // check if domain exists in the system - final DomainVO domain = _domainDao.findById(domainId); - if (domain == null) { - final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find domain with specified domain id"); - ex.addProxyObject(domainId.toString(), "domainId"); - throw ex; - } else if (domain.getParent() == null && domainName != null) { - // check if domain is ROOT domain - and deny to edit it with the new - // name - throw new InvalidParameterValueException("ROOT domain can not be edited with a new name"); - } - - final Account caller = getCaller(); - _accountMgr.checkAccess(caller, domain); - - // domain name is unique under the parent domain - if (domainName != null) { - final SearchCriteria sc = _domainDao.createSearchCriteria(); - sc.addAnd("name", SearchCriteria.Op.EQ, domainName); - sc.addAnd("parent", SearchCriteria.Op.EQ, domain.getParent()); - final List domains = _domainDao.search(sc, null); - - final boolean sameDomain = domains.size() == 1 && domains.get(0).getId() == domainId; - - if (!domains.isEmpty() && !sameDomain) { - final InvalidParameterValueException ex = new InvalidParameterValueException("Failed to update specified domain id with name '" + domainName - + "' since it already exists in the system"); - ex.addProxyObject(domain.getUuid(), "domainId"); - throw ex; - } - } - - // validate network domain - if (networkDomain != null && !networkDomain.isEmpty()) { - if (!NetUtils.verifyDomainName(networkDomain)) { - throw new InvalidParameterValueException( - "Invalid network domain. Total length shouldn't exceed 190 chars. Each domain label must be between 1 and 63 characters long, can contain ASCII letters 'a' through 'z', the digits '0' through '9', " - + "and the hyphen ('-'); can't start or end with \"-\""); - } - } - - Transaction.execute(new TransactionCallbackNoReturn() { - @Override - public void doInTransactionWithoutResult(final TransactionStatus status) { - if (domainName != null) { - final String updatedDomainPath = getUpdatedDomainPath(domain.getPath(), domainName); - updateDomainChildren(domain, updatedDomainPath); - domain.setName(domainName); - domain.setPath(updatedDomainPath); - } - - if (networkDomain != null) { - if (networkDomain.isEmpty()) { - domain.setNetworkDomain(null); - } else { - domain.setNetworkDomain(networkDomain); - } - } - _domainDao.update(domainId, domain); - } - }); - - return _domainDao.findById(domainId); - - } - private String getUpdatedDomainPath(final String oldPath, final String newName) { final String[] tokenizedPath = oldPath.split("/"); tokenizedPath[tokenizedPath.length - 1] = newName; diff --git a/server/src/com/cloud/user/DomainManagerImpl.java b/server/src/com/cloud/user/DomainManagerImpl.java index 6973ea1c725..8fb6c32db15 100644 --- a/server/src/com/cloud/user/DomainManagerImpl.java +++ b/server/src/com/cloud/user/DomainManagerImpl.java @@ -616,6 +616,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom if (domainName != null) { SearchCriteria sc = _domainDao.createSearchCriteria(); sc.addAnd("name", SearchCriteria.Op.EQ, domainName); + sc.addAnd("parent", SearchCriteria.Op.EQ, domain.getParent()); List domains = _domainDao.search(sc, null); boolean sameDomain = (domains.size() == 1 && domains.get(0).getId() == domainId); From 9334e091c0010cef79bb96bd8f2a3113ec5f8803 Mon Sep 17 00:00:00 2001 From: Nitesh Sarda Date: Thu, 1 Jun 2017 14:03:06 +0530 Subject: [PATCH 45/59] CLOUDSTACK-9937 : dedicateCluster API response does not return correct detail in response (cherry picked from commit 7364616aa4ac28f413b34306c1d497093dae0600) Signed-off-by: Rohit Yadav --- .../api/commands/DedicateClusterCmd.java | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/DedicateClusterCmd.java b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/DedicateClusterCmd.java index 9ef642e8d31..edc85610efe 100644 --- a/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/DedicateClusterCmd.java +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/api/commands/DedicateClusterCmd.java @@ -105,14 +105,12 @@ public class DedicateClusterCmd extends BaseAsyncCmd { List result = dedicatedService.dedicateCluster(getClusterId(), getDomainId(), getAccountName()); ListResponse response = new ListResponse(); List clusterResponseList = new ArrayList(); - if (result != null) { - for (DedicatedResources resource : result) { - DedicateClusterResponse clusterResponse = dedicatedService.createDedicateClusterResponse(resource); - clusterResponseList.add(clusterResponse); - } - response.setResponses(clusterResponseList); - response.setResponseName(getCommandName()); - this.setResponseObject(response); + + // List of result should always contain single element as only one cluster will be associated with each cluster ID. + if (result != null && result.size() == 1) { + DedicateClusterResponse clusterResponse = dedicatedService.createDedicateClusterResponse(result.get(0)); + clusterResponse.setResponseName(getCommandName()); + this.setResponseObject(clusterResponse); } else { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to dedicate cluster"); } From 1b26a486d171db944005dc48f70d6e759330e648 Mon Sep 17 00:00:00 2001 From: subhash yedugundla Date: Tue, 22 Sep 2015 11:56:40 +0530 Subject: [PATCH 46/59] BUG-ID:CLOUDSTACK-8921 Summary: CLOUDSTACK-8921 snapshot_store_ref table should store actual size of back snapshot in secondary storage Calling SR scan to make sure size is updated correctly (cherry picked from commit 4e4b67cd96f6b360f5810099fb6b902b5491e001) Signed-off-by: Rohit Yadav --- .../xenserver/resource/Xenserver625StorageProcessor.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java index 05dd56612d0..e45832e6e5c 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java @@ -349,6 +349,7 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor { hypervisorResource.waitForTask(conn, task, 1000, wait * 1000); hypervisorResource.checkForSuccess(conn, task); dvdi = Types.toVDI(task, conn); + ssSR.scan(conn); // copied = true; } finally { if (task != null) { @@ -462,6 +463,7 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor { hypervisorResource.checkForSuccess(conn, task); final VDI backedVdi = Types.toVDI(task, conn); snapshotBackupUuid = backedVdi.getUuid(conn); + snapshotSr.scan(conn); physicalSize = backedVdi.getPhysicalUtilisation(conn); if (destStore instanceof SwiftTO) { @@ -541,6 +543,9 @@ public class Xenserver625StorageProcessor extends XenServerStorageProcessor { } else { newSnapshot.setParentSnapshotPath(prevBackupUuid); } + s_logger.info("New snapshot details: " + newSnapshot.toString()); + s_logger.info("New snapshot physical utilization: "+physicalSize); + return new CopyCmdAnswer(newSnapshot); } catch (final Types.XenAPIException e) { details = "BackupSnapshot Failed due to " + e.toString(); From af6c28b3cea89224113826cb55fed7ce27eab180 Mon Sep 17 00:00:00 2001 From: SudharmaJain Date: Sat, 26 Sep 2015 00:50:11 +0530 Subject: [PATCH 47/59] CLOUDSTACK-8910: The reserved_capacity field increases suddenly after a vmware host failure In case of vmware host failure, all the VMs including stopped VMs migrate to the new host. For the Stopped Vms powerhost gets updated. This was triggering HandlePowerStateReport which finally calls updatePowerState updating update_time for the VM. This cause the capacity being reserved for stopped VMs. (cherry picked from commit 9d268c8cd589f70d41aa737206e7cf4b31007702) Signed-off-by: Rohit Yadav --- .../com/cloud/vm/dao/VMInstanceDaoImpl.java | 11 +++ .../cloud/vm/dao/VMInstanceDaoImplTest.java | 70 +++++++++++++++++++ 2 files changed, 81 insertions(+) create mode 100644 engine/schema/test/com/cloud/vm/dao/VMInstanceDaoImplTest.java diff --git a/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java index 1fe2b3f2d9c..df5e60e7a1a 100644 --- a/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -456,6 +456,9 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem // state is same, don't need to update return true; } + if(ifStateUnchanged(oldState,newState, oldHostId, newHostId)) { + return true; + } // lock the target row at beginning to avoid lock-promotion caused deadlock lockRow(vm.getId(), true); @@ -503,6 +506,14 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem return result > 0; } + boolean ifStateUnchanged(State oldState, State newState, Long oldHostId, Long newHostId ) { + if (oldState == State.Stopped && newState == State.Stopped && newHostId == null && oldHostId == null) { + // No change , no need to update + return true; + } + return false; + } + @Override public List listByLastHostId(Long hostId) { SearchCriteria sc = AllFieldsSearch.create(); diff --git a/engine/schema/test/com/cloud/vm/dao/VMInstanceDaoImplTest.java b/engine/schema/test/com/cloud/vm/dao/VMInstanceDaoImplTest.java new file mode 100644 index 00000000000..767b41420b7 --- /dev/null +++ b/engine/schema/test/com/cloud/vm/dao/VMInstanceDaoImplTest.java @@ -0,0 +1,70 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.vm.dao; + +import com.cloud.utils.Pair; +import com.cloud.vm.VirtualMachine; +import org.joda.time.DateTime; +import org.junit.Before; +import org.junit.Test; +import org.junit.Assert; +import org.mockito.Mock; + +import static com.cloud.vm.VirtualMachine.State.Running; +import static com.cloud.vm.VirtualMachine.State.Stopped; + +import static org.mockito.Mockito.when; +import com.cloud.vm.VMInstanceVO; +import org.mockito.MockitoAnnotations; +import org.mockito.Spy; + +/** + * Created by sudharma_jain on 3/2/17. + */ + +public class VMInstanceDaoImplTest { + + @Spy + VMInstanceDaoImpl vmInstanceDao = new VMInstanceDaoImpl(); + + @Mock + VMInstanceVO vm; + + @Before + public void setUp() throws Exception { + MockitoAnnotations.initMocks(this); + Long hostId = null; + when(vm.getHostId()).thenReturn(hostId); + when(vm.getUpdated()).thenReturn(5L); + when(vm.getUpdateTime()).thenReturn(DateTime.now().toDate()); + } + + @Test + public void testUpdateState() throws Exception { + Long destHostId = null; + Pair opaqueMock = new Pair(new Long(1), destHostId); + vmInstanceDao.updateState(Stopped, VirtualMachine.Event.FollowAgentPowerOffReport, Stopped, vm , opaqueMock); + } + + @Test + public void testIfStateAndHostUnchanged() throws Exception { + Assert.assertEquals(vmInstanceDao.ifStateUnchanged(Stopped, Stopped, null, null), true); + Assert.assertEquals(vmInstanceDao.ifStateUnchanged(Stopped, Running, null, null), false); + } + +} From 1535afe158610aaf8642ece90a400c5be7462d9a Mon Sep 17 00:00:00 2001 From: Sudhansu Date: Tue, 21 Mar 2017 16:19:44 +0530 Subject: [PATCH 48/59] CLOUDSTACK-9843 : Performance improvement of deployVirtualMachine, createFirewallRule, createPortForwardingRule removed 1 sec sleep in SSHHelper. (cherry picked from commit 28bb8ba1e4568fc891831772e2051c474e81d528) Signed-off-by: Rohit Yadav --- .../main/java/com/cloud/utils/ssh/SshHelper.java | 13 +------------ .../java/com/cloud/utils/ssh/SshHelperTest.java | 2 +- 2 files changed, 2 insertions(+), 13 deletions(-) diff --git a/utils/src/main/java/com/cloud/utils/ssh/SshHelper.java b/utils/src/main/java/com/cloud/utils/ssh/SshHelper.java index a86a4850b13..88be5774225 100644 --- a/utils/src/main/java/com/cloud/utils/ssh/SshHelper.java +++ b/utils/src/main/java/com/cloud/utils/ssh/SshHelper.java @@ -39,12 +39,6 @@ public class SshHelper { private static final int DEFAULT_CONNECT_TIMEOUT = 180000; private static final int DEFAULT_KEX_TIMEOUT = 60000; - /** - * Waiting time to check if the SSH session was successfully opened. This value (of 1000 - * milliseconds) represents one (1) second. - */ - private static final long WAITING_OPEN_SSH_SESSION = 1000; - private static final Logger s_logger = Logger.getLogger(SshHelper.class); public static Pair sshExecute(String host, int port, String user, File pemKeyFile, String password, String command) throws Exception { @@ -236,14 +230,9 @@ public class SshHelper { } } - /** - * It gets a {@link Session} from the given {@link Connection}; then, it waits - * {@value #WAITING_OPEN_SSH_SESSION} milliseconds before returning the session, given a time to - * ensure that the connection is open before proceeding the execution. - */ + protected static Session openConnectionSession(Connection conn) throws IOException, InterruptedException { Session sess = conn.openSession(); - Thread.sleep(WAITING_OPEN_SSH_SESSION); return sess; } diff --git a/utils/src/test/java/com/cloud/utils/ssh/SshHelperTest.java b/utils/src/test/java/com/cloud/utils/ssh/SshHelperTest.java index 355a51416bc..18e7171c925 100644 --- a/utils/src/test/java/com/cloud/utils/ssh/SshHelperTest.java +++ b/utils/src/test/java/com/cloud/utils/ssh/SshHelperTest.java @@ -146,6 +146,6 @@ public class SshHelperTest { Mockito.verify(conn).openSession(); PowerMockito.verifyStatic(); - Thread.sleep(Mockito.anyLong()); + } } From 4bacb649945908fae35482903d369b8981a3827a Mon Sep 17 00:00:00 2001 From: Sudhansu Date: Fri, 18 Dec 2015 09:57:08 +0530 Subject: [PATCH 49/59] CLOUDSTACK-9842: Unable to map root volume usage to VM removed code which nullifies vm_instance_id Also modified QueryManagerImpl to ignore volume which does not have uuid. This is to avoid duplicate volume listing. (cherry picked from commit 3cced927c4b1d7e1d8f19bccef46ed8d82e31f41) Signed-off-by: Rohit Yadav --- engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java | 1 - server/src/com/cloud/api/query/QueryManagerImpl.java | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java index 6ed556eabec..f691e30c743 100644 --- a/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java @@ -645,7 +645,6 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol String uuid = srcVol.getUuid(); Long instanceId = srcVol.getInstanceId(); srcVol.setUuid(null); - srcVol.setInstanceId(null); destVol.setUuid(uuid); destVol.setInstanceId(instanceId); update(srcVolId, srcVol); diff --git a/server/src/com/cloud/api/query/QueryManagerImpl.java b/server/src/com/cloud/api/query/QueryManagerImpl.java index f61d6736e92..1df8f1e6b11 100644 --- a/server/src/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/com/cloud/api/query/QueryManagerImpl.java @@ -1769,6 +1769,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); sb.and("idIN", sb.entity().getId(), SearchCriteria.Op.IN); sb.and("volumeType", sb.entity().getVolumeType(), SearchCriteria.Op.LIKE); + sb.and("uuid", sb.entity().getUuid(), SearchCriteria.Op.NNULL); sb.and("instanceId", sb.entity().getVmId(), SearchCriteria.Op.EQ); sb.and("dataCenterId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); sb.and("podId", sb.entity().getPodId(), SearchCriteria.Op.EQ); From 8240bc4aa228324e8113795f9fc7bf8f31ec0668 Mon Sep 17 00:00:00 2001 From: Sudhansu Date: Fri, 23 Dec 2016 16:01:21 +0530 Subject: [PATCH 50/59] CLOUDSTACK-9701: When host is disabled/removed, capacity_type for local storage in op_host_capacity is still enabled (cherry picked from commit e06e3b7cd41787efc4e0f3cbf2d5a3040b4f15c9) Signed-off-by: Rohit Yadav --- .../src/com/cloud/resource/ResourceManagerImpl.java | 7 +++++++ .../src/com/cloud/storage/StorageManagerImpl.java | 13 +++++++++++++ 2 files changed, 20 insertions(+) diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java index 2bb1596ed1c..b1994264c15 100644 --- a/server/src/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/com/cloud/resource/ResourceManagerImpl.java @@ -1181,6 +1181,13 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, final CapacityState capacityState = nextState == ResourceState.Enabled ? CapacityState.Enabled : CapacityState.Disabled; final short[] capacityTypes = {Capacity.CAPACITY_TYPE_CPU, Capacity.CAPACITY_TYPE_MEMORY}; _capacityDao.updateCapacityState(null, null, null, host.getId(), capacityState.toString(), capacityTypes); + + final StoragePoolVO storagePool = _storageMgr.findLocalStorageOnHost(host.getId()); + + if(storagePool != null){ + final short[] capacityTypesLocalStorage = {Capacity.CAPACITY_TYPE_LOCAL_STORAGE}; + _capacityDao.updateCapacityState(null, null, null, storagePool.getId(), capacityState.toString(), capacityTypesLocalStorage); + } } return _hostDao.updateResourceState(currentState, event, nextState, host); } diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index c0afa32f16f..128087ac651 100644 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -997,6 +997,19 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } } } + + if (storagePool.getScope() == ScopeType.HOST) { + List stoargePoolHostVO = _storagePoolHostDao.listByPoolId(storagePool.getId()); + + if(stoargePoolHostVO != null && !stoargePoolHostVO.isEmpty()){ + HostVO host = _hostDao.findById(stoargePoolHostVO.get(0).getHostId()); + + if(host != null){ + capacityState = (host.getResourceState() == ResourceState.Disabled) ? CapacityState.Disabled : CapacityState.Enabled; + } + } + } + if (capacities.size() == 0) { CapacityVO capacity = new CapacityVO(storagePool.getId(), storagePool.getDataCenterId(), storagePool.getPodId(), storagePool.getClusterId(), allocated, totalOverProvCapacity, From 75c81d918a359e25be3928ef42feb36614467a88 Mon Sep 17 00:00:00 2001 From: Anshul Gangwar Date: Thu, 29 Jan 2015 14:50:26 -0800 Subject: [PATCH 51/59] CLOUDSTACK-9705: Unauthenticated API allows Admin password reset Now, Updating the password via UpdateUser API is not allowed via integration port (cherry picked from commit d206336e1a89d45162c95228ce3486b31d476504) Signed-off-by: Rohit Yadav --- .../org/apache/cloudstack/api/Parameter.java | 2 ++ .../api/command/admin/user/UpdateUserCmd.java | 9 +++---- server/src/com/cloud/api/ApiServer.java | 24 ++++++++++++++++++- 3 files changed, 30 insertions(+), 5 deletions(-) diff --git a/api/src/org/apache/cloudstack/api/Parameter.java b/api/src/org/apache/cloudstack/api/Parameter.java index fa6075dc970..e354fda02d5 100644 --- a/api/src/org/apache/cloudstack/api/Parameter.java +++ b/api/src/org/apache/cloudstack/api/Parameter.java @@ -51,4 +51,6 @@ public @interface Parameter { RoleType[] authorized() default {}; ApiArgValidator[] validations() default {}; + + boolean acceptedOnAdminPort() default true; } diff --git a/api/src/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java b/api/src/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java index f21e2640ba2..e6ac36719e3 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/user/UpdateUserCmd.java @@ -18,8 +18,6 @@ package org.apache.cloudstack.api.command.admin.user; import javax.inject.Inject; -import org.apache.log4j.Logger; - import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; @@ -29,6 +27,7 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.region.RegionService; +import org.apache.log4j.Logger; import com.cloud.user.Account; import com.cloud.user.User; @@ -61,10 +60,12 @@ public class UpdateUserCmd extends BaseCmd { private String lastname; @Parameter(name = ApiConstants.PASSWORD, - type = CommandType.STRING, - description = "Clear text password (default hashed to SHA256SALT). If you wish to use any other hasing algorithm, you would need to write a custom authentication adapter") + type = CommandType.STRING, + description = "Clear text password (default hashed to SHA256SALT). If you wish to use any other hasing algorithm, you would need to write a custom authentication adapter. Can't be passed when command is executed via integration.api.port", + acceptedOnAdminPort = false) private String password; + @Parameter(name = ApiConstants.SECRET_KEY, type = CommandType.STRING, description = "The secret key for the user. Must be specified with userSecretKey") private String secretKey; diff --git a/server/src/com/cloud/api/ApiServer.java b/server/src/com/cloud/api/ApiServer.java index 4c39f5093f4..a8eb64202a3 100644 --- a/server/src/com/cloud/api/ApiServer.java +++ b/server/src/com/cloud/api/ApiServer.java @@ -44,6 +44,7 @@ import com.cloud.utils.ConstantTimeComparator; import com.cloud.utils.HttpUtils; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; +import com.cloud.utils.ReflectUtil; import com.cloud.utils.StringUtils; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.ManagerBase; @@ -65,6 +66,7 @@ import org.apache.cloudstack.api.BaseAsyncCmd; import org.apache.cloudstack.api.BaseAsyncCreateCmd; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ResponseObject; import org.apache.cloudstack.api.ResponseObject.ResponseView; import org.apache.cloudstack.api.ServerApiException; @@ -150,6 +152,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InterruptedIOException; import java.lang.reflect.Type; +import java.lang.reflect.Field; import java.net.InetAddress; import java.net.ServerSocket; import java.net.Socket; @@ -430,8 +433,27 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer if (!(responseType.equals(HttpUtils.RESPONSE_TYPE_JSON) || responseType.equals(HttpUtils.RESPONSE_TYPE_XML))) { responseType = HttpUtils.RESPONSE_TYPE_XML; } - try { + //verify that parameter is legit for passing via admin port + String[] command = (String[]) parameterMap.get("command"); + if (command != null) { + Class cmdClass = getCmdClass(command[0]); + if (cmdClass != null) { + List fields = ReflectUtil.getAllFieldsForClass(cmdClass, BaseCmd.class); + for (Field field : fields) { + Parameter parameterAnnotation = field.getAnnotation(Parameter.class); + if ((parameterAnnotation == null) || !parameterAnnotation.expose()) { + continue; + } + Object paramObj = parameterMap.get(parameterAnnotation.name()); + if (paramObj != null) { + if (!parameterAnnotation.acceptedOnAdminPort()) { + throw new ServerApiException(ApiErrorCode.ACCOUNT_ERROR, "Parameter " + parameterAnnotation.name() + " can't be passed through the API integration port"); + } + } + } + } + } // always trust commands from API port, user context will always be UID_SYSTEM/ACCOUNT_ID_SYSTEM CallContext.register(accountMgr.getSystemUser(), accountMgr.getSystemAccount()); sb.insert(0, "(userId=" + User.UID_SYSTEM + " accountId=" + Account.ACCOUNT_ID_SYSTEM + " sessionId=" + null + ") "); From f8f71a5af6dced67471935ca4744662f9e7c32de Mon Sep 17 00:00:00 2001 From: Sudhansu Date: Wed, 21 Dec 2016 23:54:01 +0530 Subject: [PATCH 52/59] CLOUDSTACK-9694: Unable to limit the Public IPs in VPC Added missing clause to check for vpc_id (cherry picked from commit cdf2b0727a653f71c2ea10be1b46d2002f88a2e3) Signed-off-by: Rohit Yadav --- .../cloud/network/dao/IPAddressDaoImpl.java | 5 +- .../maint/test_ip_resource_count_for_vpc.py | 350 ++++++++++++++++++ 2 files changed, 353 insertions(+), 2 deletions(-) create mode 100644 test/integration/component/maint/test_ip_resource_count_for_vpc.py diff --git a/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java b/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java index 7b5746d7799..43345b916fe 100644 --- a/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java @@ -135,8 +135,9 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen AllocatedIpCountForAccount.select(null, Func.COUNT, AllocatedIpCountForAccount.entity().getAddress()); AllocatedIpCountForAccount.and("account", AllocatedIpCountForAccount.entity().getAllocatedToAccountId(), Op.EQ); AllocatedIpCountForAccount.and("allocated", AllocatedIpCountForAccount.entity().getAllocatedTime(), Op.NNULL); - AllocatedIpCountForAccount.and("network", AllocatedIpCountForAccount.entity().getAssociatedWithNetworkId(), Op.NNULL); - AllocatedIpCountForAccount.done(); + AllocatedIpCountForAccount.and().op("network", AllocatedIpCountForAccount.entity().getAssociatedWithNetworkId(), Op.NNULL); + AllocatedIpCountForAccount.or("vpc", AllocatedIpCountForAccount.entity().getVpcId(), Op.NNULL); + AllocatedIpCountForAccount.cp();AllocatedIpCountForAccount.done(); CountFreePublicIps = createSearchBuilder(Long.class); CountFreePublicIps.select(null, Func.COUNT, null); diff --git a/test/integration/component/maint/test_ip_resource_count_for_vpc.py b/test/integration/component/maint/test_ip_resource_count_for_vpc.py new file mode 100644 index 00000000000..83ebcd43a6b --- /dev/null +++ b/test/integration/component/maint/test_ip_resource_count_for_vpc.py @@ -0,0 +1,350 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" Component tests VM deployment in VPC network functionality +""" +#Import Local Modules +from nose.plugins.attrib import attr +from marvin.cloudstackTestCase import cloudstackTestCase, unittest +from marvin.lib.base import (VirtualMachine, + NetworkOffering, + VpcOffering, + VPC, + NetworkACL, + PrivateGateway, + StaticRoute, + Router, + Network, + Account, + ServiceOffering, + PublicIPAddress, + NATRule, + StaticNATRule, + Configurations) + +from marvin.lib.common import (get_domain, + get_zone, + get_template, + wait_for_cleanup, + get_free_vlan) + +from marvin.lib.utils import (cleanup_resources, validateList) +from marvin.codes import * +from marvin.cloudstackAPI import rebootRouter +from marvin.cloudstackAPI import updateResourceCount + +class Services: + """Test IP count inn VPC network + """ + + def __init__(self): + self.services = { + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + # Random characters are appended for unique + # username + "password": "password", + }, + "service_offering": { + "name": "Tiny Instance", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 128, + }, + "network_offering": { + "name": 'VPC Network offering', + "displaytext": 'VPC Network off', + "guestiptype": 'Isolated', + "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL', + "traffictype": 'GUEST', + "availability": 'Optional', + "useVpc": 'on', + "serviceProviderList": { + "Dhcp": 'VpcVirtualRouter', + "Dns": 'VpcVirtualRouter', + "SourceNat": 'VpcVirtualRouter', + "PortForwarding": 'VpcVirtualRouter', + "Lb": 'VpcVirtualRouter', + "UserData": 'VpcVirtualRouter', + "StaticNat": 'VpcVirtualRouter', + "NetworkACL": 'VpcVirtualRouter' + }, + }, + "network_offering_no_lb": { + "name": 'VPC Network offering', + "displaytext": 'VPC Network off', + "guestiptype": 'Isolated', + "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,UserData,StaticNat,NetworkACL', + "traffictype": 'GUEST', + "availability": 'Optional', + "useVpc": 'on', + "serviceProviderList": { + "Dhcp": 'VpcVirtualRouter', + "Dns": 'VpcVirtualRouter', + "SourceNat": 'VpcVirtualRouter', + "PortForwarding": 'VpcVirtualRouter', + "UserData": 'VpcVirtualRouter', + "StaticNat": 'VpcVirtualRouter', + "NetworkACL": 'VpcVirtualRouter' + }, + }, + "vpc_offering": { + "name": 'VPC off', + "displaytext": 'VPC off', + "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat', + }, + "vpc": { + "name": "TestVPC", + "displaytext": "TestVPC", + "cidr": '10.0.0.1/24' + }, + "network": { + "name": "Test Network", + "displaytext": "Test Network", + "netmask": '255.255.255.0', + "limit": 5, + # Max networks allowed as per hypervisor + # Xenserver -> 5, VMWare -> 9 + }, + + "virtual_machine": { + "displayname": "Test VM", + "username": "root", + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + # Hypervisor type should be same as + # hypervisor type of cluster + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "ostype": 'CentOS 5.3 (64-bit)', + # Cent OS 5.3 (64 bit) + "timeout": 10, + "mode": 'advanced' + } + + +class TestIPResourceCountVPC(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + cls.testClient = super(TestIPResourceCountVPC, cls).getClsTestClient() + cls.api_client = cls.testClient.getApiClient() + + cls.services = Services().services + # Get Zone, Domain and templates + cls.domain = get_domain(cls.api_client) + cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) + cls.template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + cls.services["virtual_machine"]["template"] = cls.template.id + + cls.service_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offering"] + ) + cls.vpc_off = VpcOffering.create( + cls.api_client, + cls.services["vpc_offering"] + ) + cls.vpc_off.update(cls.api_client, state='Enabled') + cls._cleanup = [ + cls.service_offering, + cls.vpc_off + ] + return + + @classmethod + def tearDownClass(cls): + try: + #Cleanup resources used + cleanup_resources(cls.api_client, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.account = Account.create( + self.apiclient, + self.services["account"], + admin=True, + domainid=self.domain.id + ) + self.cleanup = [self.account] + return + + def tearDown(self): + try: + #Clean up, terminate the created network offerings + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def validate_vpc_offering(self, vpc_offering): + """Validates the VPC offering""" + + self.debug("Check if the VPC offering is created successfully?") + vpc_offs = VpcOffering.list( + self.apiclient, + id=vpc_offering.id + ) + self.assertEqual( + isinstance(vpc_offs, list), + True, + "List VPC offerings should return a valid list" + ) + self.assertEqual( + vpc_offering.name, + vpc_offs[0].name, + "Name of the VPC offering should match with listVPCOff data" + ) + self.debug( + "VPC offering is created successfully - %s" % + vpc_offering.name) + return + + def validate_vpc_network(self, network, state=None): + """Validates the VPC network""" + + self.debug("Check if the VPC network is created successfully?") + vpc_networks = VPC.list( + self.apiclient, + id=network.id + ) + self.assertEqual( + isinstance(vpc_networks, list), + True, + "List VPC network should return a valid list" + ) + self.assertEqual( + network.name, + vpc_networks[0].name, + "Name of the VPC network should match with listVPC data" + ) + if state: + self.assertEqual( + vpc_networks[0].state, + state, + "VPC state should be '%s'" % state + ) + self.debug("VPC network validated - %s" % network.name) + return + + + def updateIPCount(self): + cmd=updateResourceCount.updateResourceCountCmd() + cmd.account=self.account.name + cmd.domainid=self.domain.id + + responce=self.apiclient.updateResourceCount(cmd) + + def acquire_publicip(self, network, vpc): + self.debug("Associating public IP for network: %s" % network.name) + public_ip = PublicIPAddress.create(self.apiclient, + accountid=self.account.name, + zoneid=self.zone.id, + domainid=self.account.domainid, + networkid=network.id, + vpcid=vpc.id + ) + self.debug("Associated {} with network {}".format(public_ip.ipaddress.ipaddress, network.id)) + return public_ip + + @attr(tags=["advanced", "intervlan"], required_hardware="false") + def test_01_ip_resouce_count_vpc_network(self): + """ Test IP count in VPC networks + """ + self.debug("Creating a VPC offering..") + vpc_off = VpcOffering.create( + self.apiclient, + self.services["vpc_offering"] + ) + + self.validate_vpc_offering(vpc_off) + + self.debug("Enabling the VPC offering created") + vpc_off.update(self.apiclient, state='Enabled') + + self.debug("creating a VPC network in the account: %s" % + self.account.name) + self.services["vpc"]["cidr"] = '10.1.1.1/16' + vpc = VPC.create( + self.apiclient, + self.services["vpc"], + vpcofferingid=vpc_off.id, + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid + ) + self.validate_vpc_network(vpc) + + + nw_off = NetworkOffering.create( + self.apiclient, + self.services["network_offering"], + conservemode=False + ) + # Enable Network offering + nw_off.update(self.apiclient, state='Enabled') + self._cleanup.append(nw_off) + + # Creating network using the network offering created + self.debug("Creating network with network offering: %s" % nw_off.id) + network_1 = Network.create( + self.apiclient, + self.services["network"], + accountid=self.account.name, + domainid=self.account.domainid, + networkofferingid=nw_off.id, + zoneid=self.zone.id, + gateway='10.1.1.1', + vpcid=vpc.id + ) + self.debug("Created network with ID: %s" % network_1.id) + + account_list = Account.list(self.apiclient, id=self.account.id) + totalip_1 = account_list[0].iptotal + self.debug("Total IP: %s" % totalip_1) + + public_ip_1 = self.acquire_publicip(network_1, vpc) + public_ip_2 = self.acquire_publicip(network_1, vpc) + public_ip_3 = self.acquire_publicip(network_1, vpc) + + account_list = Account.list(self.apiclient, id=self.account.id) + totalip = account_list[0].iptotal + + self.debug("Total IP: %s" % totalip) + + self.assertTrue(totalip - totalip_1 == 3,"publicip count is 3") + self.updateIPCount() + + account_list = Account.list(self.apiclient, id=self.account.id) + totalip = account_list[0].iptotal + self.assertTrue(totalip - totalip_1 == 3, "publicip count is 3") From 6977cb38411efc72ddf81c6d6bf41bbbca06b852 Mon Sep 17 00:00:00 2001 From: Sateesh Chodapuneedi Date: Tue, 20 Dec 2016 07:45:26 +0530 Subject: [PATCH 53/59] CLOUDSTACK-9684 Invalid zone id error while listing vmware zone Issue ===== While listing datacenters associated with a zone, only zone Id validation is required. There is no need to have additional checks like zone is a legacy zone or not. Fix === Removed unnecessary checks over zone ID and just checking if zone with specified ID exists or not. Signed-off-by: Sateesh Chodapuneedi (cherry picked from commit 0ef1c17541808641983e7c109db31e5cecc0ef44) Signed-off-by: Rohit Yadav --- .../vmware/manager/VmwareManagerImpl.java | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index da83283afd3..7d54ca464ff 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@ -189,7 +189,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw private String _rootDiskController = DiskControllerType.ide.toString(); - private String _dataDiskController = DiskControllerType.osdefault.toString(); + private final String _dataDiskController = DiskControllerType.osdefault.toString(); private final Map _storageMounts = new HashMap(); @@ -1111,8 +1111,8 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw @Override public boolean removeVmwareDatacenter(RemoveVmwareDcCmd cmd) throws ResourceInUseException { Long zoneId = cmd.getZoneId(); - // Validate zone - validateZone(zoneId); + // Validate Id of zone + doesZoneExist(zoneId); // Zone validation to check if the zone already has resources. // Association of VMware DC to zone is not allowed if zone already has resources added. validateZoneWithResources(zoneId, "remove VMware datacenter to zone"); @@ -1180,10 +1180,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw private void validateZone(Long zoneId) throws InvalidParameterValueException { // Check if zone with specified id exists - DataCenterVO zone = _dcDao.findById(zoneId); - if (zone == null) { - throw new InvalidParameterValueException("Can't find zone by the id specified."); - } + doesZoneExist(zoneId); // Check if zone is legacy zone if (isLegacyZone(zoneId)) { throw new InvalidParameterValueException("The specified zone is legacy zone. Adding VMware datacenter to legacy zone is not supported."); @@ -1226,7 +1223,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw long vmwareDcId; // Validate if zone id parameter passed to API is valid - validateZone(zoneId); + doesZoneExist(zoneId); // Check if zone is associated with VMware DC vmwareDcZoneMap = _vmwareDcZoneMapDao.findByZoneId(zoneId); @@ -1243,6 +1240,17 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw return vmwareDcList; } + private void doesZoneExist(Long zoneId) throws InvalidParameterValueException { + // Check if zone with specified id exists + DataCenterVO zone = _dcDao.findById(zoneId); + if (zone == null) { + throw new InvalidParameterValueException("Can't find zone by the id specified."); + } + if (s_logger.isTraceEnabled()) { + s_logger.trace("Zone with id:[" + zoneId + "] exists."); + } + } + @Override public boolean hasNexusVSM(Long clusterId) { ClusterVSMMapVO vsmMapVo = null; From 30bb5587d2bc1dd9a26cc1216023f390614f7b1e Mon Sep 17 00:00:00 2001 From: Priyank Parihar Date: Thu, 5 May 2016 15:30:09 +0530 Subject: [PATCH 54/59] CLOUDSTACK-9610: Disabled Host Keeps Being up status after unmanging cluster. (cherry picked from commit 4b165f1b8f0781299ba955c8425a32e15869a1d1) Signed-off-by: Rohit Yadav --- .../com/cloud/resource/ResourceManager.java | 2 ++ .../cloud/resource/ResourceManagerImpl.java | 18 +++++++++++++++++- .../resource/MockResourceManagerImpl.java | 5 +++++ 3 files changed, 24 insertions(+), 1 deletion(-) mode change 100644 => 100755 engine/components-api/src/com/cloud/resource/ResourceManager.java mode change 100644 => 100755 server/src/com/cloud/resource/ResourceManagerImpl.java mode change 100644 => 100755 server/test/com/cloud/resource/MockResourceManagerImpl.java diff --git a/engine/components-api/src/com/cloud/resource/ResourceManager.java b/engine/components-api/src/com/cloud/resource/ResourceManager.java old mode 100644 new mode 100755 index 849387e598a..7783fa139a9 --- a/engine/components-api/src/com/cloud/resource/ResourceManager.java +++ b/engine/components-api/src/com/cloud/resource/ResourceManager.java @@ -97,6 +97,8 @@ public interface ResourceManager extends ResourceService { public List listAllUpAndEnabledHosts(Host.Type type, Long clusterId, Long podId, long dcId); + public List listAllHosts(final Host.Type type, final Long clusterId, final Long podId, final long dcId); + public List listAllHostsInCluster(long clusterId); public List listHostsInClusterByStatus(long clusterId, Status status); diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java old mode 100644 new mode 100755 index b1994264c15..899ad213c74 --- a/server/src/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/com/cloud/resource/ResourceManagerImpl.java @@ -1086,7 +1086,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, try { cluster.setManagedState(Managed.ManagedState.PrepareUnmanaged); _clusterDao.update(cluster.getId(), cluster); - List hosts = listAllUpAndEnabledHosts(Host.Type.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId()); + List hosts = listAllHosts(Host.Type.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId()); for (final HostVO host : hosts) { if (host.getType().equals(Host.Type.Routing) && !host.getStatus().equals(Status.Down) && !host.getStatus().equals(Status.Disconnected) && !host.getStatus().equals(Status.Up) && !host.getStatus().equals(Status.Alert)) { @@ -2494,6 +2494,22 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, return sc.list(); } + @Override + public List listAllHosts(final Type type, final Long clusterId, final Long podId, final long dcId) { + final QueryBuilder sc = QueryBuilder.create(HostVO.class); + if (type != null) { + sc.and(sc.entity().getType(), Op.EQ, type); + } + if (clusterId != null) { + sc.and(sc.entity().getClusterId(), Op.EQ, clusterId); + } + if (podId != null) { + sc.and(sc.entity().getPodId(), Op.EQ, podId); + } + sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId); + return sc.list(); + } + @Override public List listAllUpAndEnabledNonHAHosts(final Type type, final Long clusterId, final Long podId, final long dcId) { final String haTag = _haMgr.getHaTag(); diff --git a/server/test/com/cloud/resource/MockResourceManagerImpl.java b/server/test/com/cloud/resource/MockResourceManagerImpl.java old mode 100644 new mode 100755 index 6e6d66fe18f..e293f2d5400 --- a/server/test/com/cloud/resource/MockResourceManagerImpl.java +++ b/server/test/com/cloud/resource/MockResourceManagerImpl.java @@ -359,6 +359,11 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana return null; } + @Override + public List listAllHosts(final Type type, final Long clusterId, final Long podId, final long dcId) { + return null; + } + /* (non-Javadoc) * @see com.cloud.resource.ResourceManager#listAllHostsInCluster(long) */ From 77fb2c92cedc88919907610d56164e16cb9a87ce Mon Sep 17 00:00:00 2001 From: Subhash Yedugundla Date: Tue, 4 Nov 2014 14:15:22 +0530 Subject: [PATCH 55/59] CLOUDSTACK-9592 Empty responses from site to site connection status are not handled propertly (cherry picked from commit ddf8fd514af3ee6fd2a7524a2bc7ea84fc22ea2d) Signed-off-by: Rohit Yadav --- .../api/CheckS2SVpnConnectionsAnswer.java | 10 ++ .../VirtualNetworkApplianceManagerImpl.java | 28 ++-- ...irtualNetworkApplianceManagerImplTest.java | 129 ++++++++++++++++-- 3 files changed, 142 insertions(+), 25 deletions(-) diff --git a/core/src/com/cloud/agent/api/CheckS2SVpnConnectionsAnswer.java b/core/src/com/cloud/agent/api/CheckS2SVpnConnectionsAnswer.java index 50dfaf202f9..b299c602dde 100644 --- a/core/src/com/cloud/agent/api/CheckS2SVpnConnectionsAnswer.java +++ b/core/src/com/cloud/agent/api/CheckS2SVpnConnectionsAnswer.java @@ -76,4 +76,14 @@ public class CheckS2SVpnConnectionsAnswer extends Answer { } return null; } + + public boolean isIPPresent(String ip) { + if (this.getResult()) { + Boolean status = ipToConnected.get(ip); + if (status != null) { + return true; + } + } + return false; + } } diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index e877f023702..903d31dec3c 100644 --- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -898,18 +898,22 @@ Configurable, StateListener routers = new ArrayList(); + routers.add(router); + + Site2SiteVpnConnectionVO conn = new Site2SiteVpnConnectionVO(1L, 1L, 1L, 1L, false); + Site2SiteVpnConnectionVO conn1 = new Site2SiteVpnConnectionVO(1L, 1L, 1L, 1L, false); + conn.setState(Site2SiteVpnConnection.State.Disconnected); + conn1.setState(Site2SiteVpnConnection.State.Disconnected); + List conns = new ArrayList(); + conns.add(conn); + conns.add(conn1); + + Site2SiteCustomerGatewayVO gw = new Site2SiteCustomerGatewayVO("Testing gateway", 1L, 1L, "192.168.50.15", "Guest List", "ipsecPsk", "ikePolicy", "espPolicy", 1L, 1L, true, true); + HostVO hostVo = new HostVO(1L, "Testing host", Host.Type.Routing, "192.168.50.15", "privateNetmask", "privateMacAddress", "publicIpAddress", "publicNetmask", "publicMacAddress", "storageIpAddress", "storageNetmask", "storageMacAddress", "deuxStorageIpAddress", "duxStorageNetmask", "deuxStorageMacAddress", "guid", Status.Up, "version", "iqn", new Date() , 1L, 1L, 1L, 1L, "parent", 20L, Storage.StoragePoolType.Gluster); + hostVo.setManagementServerId(ManagementServerNode.getManagementServerId()); + + ArrayList ipList = new ArrayList<>(); + ipList.add("192.168.50.15"); + + _s2sConnCommand = new CheckS2SVpnConnectionsCommand(ipList); + + when(_s2sVpnMgr.getConnectionsForRouter(router)).thenReturn(conns); + when(_s2sVpnConnectionDao.persist(conn)).thenReturn(null); + when(_s2sCustomerGatewayDao.findById(conn.getCustomerGatewayId())).thenReturn(gw); + when(_hostDao.findById(router.getHostId())).thenReturn(hostVo); + when(_routerControlHelper.getRouterControlIp(router.getId())).thenReturn("192.168.50.15"); + doReturn(_s2sVpnAnswer).when(_agentMgr).easySend(anyLong(), any(CheckS2SVpnConnectionsCommand.class)); + when(_s2sVpnAnswer.getResult()).thenReturn(true); + when(_s2sVpnConnectionDao.acquireInLockTable(conn.getId())).thenReturn(conn); + when(_s2sVpnAnswer.isIPPresent("192.168.50.15")).thenReturn(true); + when(_s2sVpnAnswer.isConnected("192.168.50.15")).thenReturn(true); + doNothing().when(_alertMgr).sendAlert(any(AlertManager.AlertType.class), anyLong(), anyLong(), anyString(), anyString()); + + virtualNetworkApplianceManagerImpl.updateSite2SiteVpnConnectionState(routers); + + for(Site2SiteVpnConnection connection : conns){ + assertEquals(Site2SiteVpnConnection.State.Connected, connection.getState()); + } + } + + + } From 96671543dc96309bc141a236305d8459d3d4c43e Mon Sep 17 00:00:00 2001 From: Sudharma Jain Date: Wed, 26 Oct 2016 18:08:58 +0530 Subject: [PATCH 56/59] CLOUDSTACK-9567 Difference in the api call outputs for CAPACITY_TYPE_CPU = 1 (cherry picked from commit 732be53044d0a682bd124a291febc6e11cfde77b) Signed-off-by: Rohit Yadav --- .../cloud/capacity/dao/CapacityDaoImpl.java | 8 +- test/integration/component/test_list_pod.py | 110 ++++++++++++++++++ 2 files changed, 114 insertions(+), 4 deletions(-) create mode 100644 test/integration/component/test_list_pod.py diff --git a/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java b/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java index 95d57d0eb47..f4e78051046 100644 --- a/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java +++ b/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java @@ -139,10 +139,10 @@ public class CapacityDaoImpl extends GenericDaoBase implements private static final String LIST_CAPACITY_GROUP_BY_CAPACITY_PART1= "SELECT sum(capacity.used_capacity), sum(capacity.reserved_capacity)," - + " (case capacity_type when 1 then (sum(total_capacity) * CAST((select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id) AS DECIMAL (10,4))) " - + "when '0' then (sum(total_capacity) * CAST((select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id) AS DECIMAL(10,4)))else sum(total_capacity) end)," - + "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / ( case capacity_type when 1 then (sum(total_capacity) * CAST((select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id) AS DECIMAL(10,4))) " - + "when '0' then (sum(total_capacity) * CAST((select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id) AS DECIMAL(10,4))) else sum(total_capacity) end)) percent," + + " (case capacity_type when 1 then sum(total_capacity * CAST((select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id) AS DECIMAL (10,4))) " + + "when '0' then sum(total_capacity * CAST((select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id) AS DECIMAL(10,4)))else sum(total_capacity) end)," + + "((sum(capacity.used_capacity) + sum(capacity.reserved_capacity)) / ( case capacity_type when 1 then sum(total_capacity * CAST((select value from `cloud`.`cluster_details` where cluster_details.name= 'cpuOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id) AS DECIMAL(10,4))) " + + "when '0' then sum(total_capacity * CAST((select value from `cloud`.`cluster_details` where cluster_details.name= 'memoryOvercommitRatio' AND cluster_details.cluster_id=capacity.cluster_id) AS DECIMAL(10,4))) else sum(total_capacity) end)) percent," + "capacity.capacity_type, capacity.data_center_id, pod_id, cluster_id FROM `cloud`.`op_host_capacity` capacity WHERE total_capacity > 0 AND data_center_id is not null AND capacity_state='Enabled' "; private static final String LIST_CAPACITY_GROUP_BY_CAPACITY_PART2 = " GROUP BY capacity_type"; diff --git a/test/integration/component/test_list_pod.py b/test/integration/component/test_list_pod.py new file mode 100644 index 00000000000..07ab2c589af --- /dev/null +++ b/test/integration/component/test_list_pod.py @@ -0,0 +1,110 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" test for listPods +""" + +from nose.plugins.attrib import attr +from marvin.cloudstackTestCase import cloudstackTestCase, unittest +from marvin.lib.utils import (cleanup_resources) +from marvin.lib.base import (Pod, Cluster, Capacities) +from marvin.cloudstackAPI import (updateConfiguration) + + +class TestListPod(cloudstackTestCase): + @classmethod + def setUpClass(cls): + super(TestListPod, cls) + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + + # build cleanup list + self.cleanup = [] + + def tearDown(self): + try: + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + self.debug("Warning! Exception in tearDown: %s" % e) + + @attr(tags=["advanced", "basic"], required_hardware="false") + def test_list_pod_with_overcommit(self): + """Test List Pod Api with cluster CPU and Memory OverProvisioning + """ + + podlist = Pod.list(self.apiclient) + + for pod in podlist: + clusterlist = Cluster.list(self.apiclient, podid=pod.id) + if len(clusterlist) > 1: + + updateCpuOvercommitCmd = updateConfiguration.updateConfigurationCmd() + updateCpuOvercommitCmd.clusterid = clusterlist[0].id + updateCpuOvercommitCmd.name="cpu.overprovisioning.factor" + + if clusterlist[0].cpuovercommitratio == clusterlist[1].cpuovercommitratio and clusterlist[0].cpuovercommitratio == "1.0": + cpuovercommit = "1.0" + updateCpuOvercommitCmd.value="2.0" + self.apiclient.updateConfiguration(updateCpuOvercommitCmd) + + elif clusterlist[0].cpuovercommitratio != clusterlist[1].cpuovercommitratio: + cpuovercommit = clusterlist[0].cpuovercommitratio + + else: + cpuovercommit = clusterlist[0].cpuovercommitratio + updateCpuOvercommitCmd.value="1.0" + self.apiclient.updateConfiguration(updateCpuOvercommitCmd) + + updateMemoryOvercommitCmd = updateConfiguration.updateConfigurationCmd() + updateMemoryOvercommitCmd.clusterid = clusterlist[0].id + updateMemoryOvercommitCmd.name="mem.overprovisioning.factor" + + if clusterlist[0].memoryovercommitratio == clusterlist[1].memoryovercommitratio and clusterlist[0].memoryovercommitratio == "1.0": + memoryovercommit = "1.0" + updateMemoryOvercommitCmd.value="2.0" + self.apiclient.updateConfiguration(updateMemoryOvercommitCmd) + + elif clusterlist[0].memoryovercommitratio != clusterlist[1].memoryovercommitratio: + memoryovercommit = clusterlist[0].memoryovercommitratio + + else: + memoryovercommit = clusterlist[0].memoryovercommitratio + updateMemoryOvercommitCmd.value="1.0" + self.apiclient.updateConfiguration(updateMemoryOvercommitCmd) + + podWithCap = Pod.list(self.apiclient, id=pod.id, showcapacities=True) + cpucapacity = Capacities.list(self.apiclient, podid=pod.id, type=1) + memorycapacity = Capacities.list(self.apiclient, podid=pod.id, type=0) + + updateCpuOvercommitCmd.value = cpuovercommit + updateMemoryOvercommitCmd.value = memoryovercommit + + self.apiclient.updateConfiguration(updateCpuOvercommitCmd) + self.apiclient.updateConfiguration(updateMemoryOvercommitCmd) + + self.assertEqual( + [cap for cap in podWithCap[0].capacity if cap.type == 1][0].capacitytotal, + cpucapacity[0].capacitytotal, + "listPods api returns wrong CPU capacity " + ) + + self.assertEqual( + [cap for cap in podWithCap[0].capacity if cap.type == 0][0].capacitytotal, + memorycapacity[0].capacitytotal, + "listPods api returns wrong memory capacity" + ) From aaae977c532bdde9e30e29a23dccf50936b8ab69 Mon Sep 17 00:00:00 2001 From: Subhash yedugundla Date: Tue, 4 Nov 2014 14:15:22 +0530 Subject: [PATCH 57/59] CLOUDSTACK-9555 when a template is deleted and then copied over again , it is still marked as Removed in template_zone_ref table (cherry picked from commit c3bc69c724440786f890116b516b03fc38c672cb) Signed-off-by: Rohit Yadav --- test/integration/smoke/test_templates.py | 198 +++++++++++++++++++++++ 1 file changed, 198 insertions(+) diff --git a/test/integration/smoke/test_templates.py b/test/integration/smoke/test_templates.py index 145282937ac..175f44e2d3c 100644 --- a/test/integration/smoke/test_templates.py +++ b/test/integration/smoke/test_templates.py @@ -19,6 +19,7 @@ #Import Local Modules from marvin.codes import FAILED from marvin.cloudstackTestCase import cloudstackTestCase, unittest +from marvin.cloudstackAPI import listZones from marvin.lib.utils import random_gen, cleanup_resources from marvin.lib.base import (Account, ServiceOffering, @@ -794,3 +795,200 @@ class TestTemplates(cloudstackTestCase): "ListTemplates should not list any system templates" ) return + +class TestCopyDeleteTemplate(cloudstackTestCase): + + def setUp(self): + + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + + if self.unsupportedHypervisor: + self.skipTest("Skipping test because unsupported hypervisor\ + %s" % self.hypervisor) + return + + def tearDown(self): + try: + #Clean up, terminate the created templates + cleanup_resources(self.apiclient, self.cleanup) + + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + @classmethod + def setUpClass(cls): + testClient = super(TestCopyDeleteTemplate, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls._cleanup = [] + cls.services = testClient.getParsedTestDataConfig() + cls.unsupportedHypervisor = False + cls.hypervisor = testClient.getHypervisorInfo() + if cls.hypervisor.lower() in ['lxc']: + # Template creation from root volume is not supported in LXC + cls.unsupportedHypervisor = True + return + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + cls.services['mode'] = cls.zone.networktype + try: + cls.disk_offering = DiskOffering.create( + cls.apiclient, + cls.services["disk_offering"] + ) + cls._cleanup.append(cls.disk_offering) + template = get_template( + cls.apiclient, + cls.zone.id, + cls.services["ostype"] + ) + if template == FAILED: + assert False, "get_template() failed to return template with description %s" % cls.services["ostype"] + + cls.services["template"]["ostypeid"] = template.ostypeid + cls.services["template_2"]["ostypeid"] = template.ostypeid + cls.services["ostypeid"] = template.ostypeid + + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + cls.services["volume"]["diskoffering"] = cls.disk_offering.id + cls.services["volume"]["zoneid"] = cls.zone.id + cls.services["sourcezoneid"] = cls.zone.id + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=cls.domain.id + ) + cls._cleanup.append(cls.account) + cls.service_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offerings"]["tiny"] + ) + cls._cleanup.append(cls.service_offering) + #create virtual machine + cls.virtual_machine = VirtualMachine.create( + cls.apiclient, + cls.services["virtual_machine"], + templateid=template.id, + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id, + mode=cls.services["mode"] + ) + #Stop virtual machine + cls.virtual_machine.stop(cls.apiclient) + + list_volume = Volume.list( + cls.apiclient, + virtualmachineid=cls.virtual_machine.id, + type='ROOT', + listall=True + ) + + cls.volume = list_volume[0] + except Exception as e: + cls.tearDownClass() + raise unittest.SkipTest("Exception in setUpClass: %s" % e) + return + + @classmethod + def tearDownClass(cls): + try: + cls.apiclient = super(TestCopyDeleteTemplate, cls).getClsTestClient().getApiClient() + #Cleanup resources used + cleanup_resources(cls.apiclient, cls._cleanup) + + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + return + + + + @attr(tags=["advanced", "advancedns"], required_hardware="false") + def test_09_copy_delete_template(self): + cmd = listZones.listZonesCmd() + zones = self.apiclient.listZones(cmd) + if not isinstance(zones, list): + raise Exception("Failed to find zones.") + if len(zones) < 2: + self.skipTest( + "Skipping test due to there are less than two zones.") + return + + self.sourceZone = zones[0] + self.destZone = zones[1] + + template = Template.create( + self.apiclient, + self.services["template"], + self.volume.id, + account=self.account.name, + domainid=self.account.domainid + ) + self.cleanup.append(template) + + self.debug("Created template with ID: %s" % template.id) + + list_template_response = Template.list( + self.apiclient, + templatefilter=\ + self.services["templatefilter"], + id=template.id + ) + + self.assertEqual( + isinstance(list_template_response, list), + True, + "Check list response returns a valid list" + ) + #Verify template response to check whether template added successfully + self.assertNotEqual( + len(list_template_response), + 0, + "Check template available in List Templates" + ) + #Copy template from zone1 to zone2 + copytemplate = Template.copy( + cls.apiclient, + zoneid=cls.sourceZone.id, + destzoneid = cls.destZone.id + ) + cls._cleanup.append(cls.copytemplate) + + list_template_response = Template.list( + self.apiclient, + templatefilter=self.services["template"]["templatefilter"], + id=self.template.id, + zoneid=self.destZone.id + ) + self.assertEqual( + list_template_response, + None, + "Check template available in List Templates" + ) + + self.deltemplate = list_template_response[0] + + self.debug("Deleting template: %s" % self.deltemplate) + # Delete the template + self.deltemplate.delete(self.apiclient) + self.debug("Delete template: %s successful" % self.deltemplate) + + copytemplate = Template.copy( + self.apiclient, + zoneid=self.sourceZone.id, + destzoneid = self.destZone.id + ) + + removed = cls.dbclient.execute("select removed from template_zone_ref where zone_id='%s' and template_id='%s';" % self.destZone.id, self.template.id) + + self.assertEqual( + removed, + NULL, + "Removed state is not correct." + ) + return From 26bcb9afd81077ab41e363e69546b251bb5bd6be Mon Sep 17 00:00:00 2001 From: Nitesh Sarda Date: Wed, 31 May 2017 18:19:08 +0530 Subject: [PATCH 58/59] CLOUDSTACK-9935 : Search in VPN Customer Gateway not working (cherry picked from commit fdadc7b7607e9f3b78d0577a09986fab346c1788) Signed-off-by: Rohit Yadav --- .../com/cloud/network/vpn/Site2SiteVpnManagerImpl.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/server/src/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java b/server/src/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java index 37465262e16..2e722753f63 100644 --- a/server/src/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java +++ b/server/src/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java @@ -586,6 +586,7 @@ public class Site2SiteVpnManagerImpl extends ManagerBase implements Site2SiteVpn boolean listAll = cmd.listAll(); long startIndex = cmd.getStartIndex(); long pageSizeVal = cmd.getPageSizeVal(); + String keyword = cmd.getKeyword(); Account caller = CallContext.current().getCallingAccount(); List permittedAccounts = new ArrayList(); @@ -602,12 +603,17 @@ public class Site2SiteVpnManagerImpl extends ManagerBase implements Site2SiteVpn _accountMgr.buildACLSearchBuilder(sb, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria); sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE); SearchCriteria sc = sb.create(); _accountMgr.buildACLSearchCriteria(sc, domainId, isRecursive, permittedAccounts, listProjectResourcesCriteria); if (id != null) { - sc.addAnd("id", SearchCriteria.Op.EQ, id); + sc.setParameters("id", id); + } + if(keyword != null && !keyword.isEmpty()) + { + sc.setParameters("name", "%" + keyword + "%"); } Pair, Integer> result = _customerGatewayDao.searchAndCount(sc, searchFilter); From b70879cbac563ff63295f8b4510e21bf96eece55 Mon Sep 17 00:00:00 2001 From: subhash yedugundla Date: Tue, 31 May 2016 17:15:04 +0530 Subject: [PATCH 59/59] CLOUDSTACK-9560 Root volume of deleted VM left unremoved (cherry picked from commit 5568ee93ad08f998822dc0428e511dc8e8609014) Signed-off-by: Rohit Yadav --- .../com/cloud/storage/StorageManagerImpl.java | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index 128087ac651..e12e91ae330 100644 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -27,6 +27,7 @@ import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -2253,27 +2254,41 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // Cleanup expired volume URLs List volumesOnImageStoreList = _volumeStoreDao.listVolumeDownloadUrls(); + HashSet expiredVolumeIds = new HashSet(); + HashSet activeVolumeIds = new HashSet(); for(VolumeDataStoreVO volumeOnImageStore : volumesOnImageStoreList){ + long volumeId = volumeOnImageStore.getVolumeId(); try { long downloadUrlCurrentAgeInSecs = DateUtil.getTimeDifference(DateUtil.now(), volumeOnImageStore.getExtractUrlCreated()); if(downloadUrlCurrentAgeInSecs < _downloadUrlExpirationInterval){ // URL hasnt expired yet + activeVolumeIds.add(volumeId); continue; } - - s_logger.debug("Removing download url " + volumeOnImageStore.getExtractUrl() + " for volume id " + volumeOnImageStore.getVolumeId()); + expiredVolumeIds.add(volumeId); + s_logger.debug("Removing download url " + volumeOnImageStore.getExtractUrl() + " for volume id " + volumeId); // Remove it from image store ImageStoreEntity secStore = (ImageStoreEntity) _dataStoreMgr.getDataStore(volumeOnImageStore.getDataStoreId(), DataStoreRole.Image); secStore.deleteExtractUrl(volumeOnImageStore.getInstallPath(), volumeOnImageStore.getExtractUrl(), Upload.Type.VOLUME); - // Now expunge it from DB since this entry was created only for download purpose + // Now expunge it from DB since this entry was created only for download purpose _volumeStoreDao.expunge(volumeOnImageStore.getId()); }catch(Throwable th){ s_logger.warn("Caught exception while deleting download url " +volumeOnImageStore.getExtractUrl() + " for volume id " + volumeOnImageStore.getVolumeId(), th); } } + for(Long volumeId : expiredVolumeIds) + { + if(activeVolumeIds.contains(volumeId)) { + continue; + } + Volume volume = _volumeDao.findById(volumeId); + if (volume != null && volume.getState() == Volume.State.Expunged) { + _volumeDao.remove(volumeId); + } + } // Cleanup expired template URLs List templatesOnImageStoreList = _templateStoreDao.listTemplateDownloadUrls();