From 7319debc87e5b64c45eaa57679e2154d02378abf Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Thu, 1 Jun 2023 15:29:06 +0530 Subject: [PATCH] cks: k8s cluster on vpc tier (#7479) * cks: cluster on vpc tier Signed-off-by: Abhishek Kumar * fix Signed-off-by: Abhishek Kumar * wip Signed-off-by: Abhishek Kumar * fix Signed-off-by: Abhishek Kumar * refactor Signed-off-by: Abhishek Kumar * changes Signed-off-by: Abhishek Kumar * fix test Signed-off-by: Abhishek Kumar * fix test Signed-off-by: Abhishek Kumar * fix test Signed-off-by: Abhishek Kumar * fix Signed-off-by: Abhishek Kumar * python fix Signed-off-by: Abhishek Kumar * fix trailing space Signed-off-by: Abhishek Kumar --------- Signed-off-by: Abhishek Kumar --- .../cloud/network/vpc/NetworkACLService.java | 4 +- .../cluster/KubernetesClusterManagerImpl.java | 87 +++--- .../KubernetesClusterActionWorker.java | 170 ++++++++--- .../KubernetesClusterDestroyWorker.java | 71 ++++- ...esClusterResourceModifierActionWorker.java | 285 +++++++++++++++--- .../KubernetesClusterScaleWorker.java | 75 +++-- .../KubernetesClusterStartWorker.java | 140 +++------ .../KubernetesClusterManagerImplTest.java | 106 ++++++- .../KubernetesClusterActionWorkerTest.java | 133 ++++++++ .../network/vpc/NetworkACLServiceImpl.java | 18 +- .../vpc/NetworkACLServiceImplTest.java | 33 +- .../smoke/test_kubernetes_clusters.py | 140 ++++++++- 12 files changed, 948 insertions(+), 314 deletions(-) create mode 100644 plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorkerTest.java diff --git a/api/src/main/java/com/cloud/network/vpc/NetworkACLService.java b/api/src/main/java/com/cloud/network/vpc/NetworkACLService.java index 7c4e8b45333..40aee1f08f1 100644 --- a/api/src/main/java/com/cloud/network/vpc/NetworkACLService.java +++ b/api/src/main/java/com/cloud/network/vpc/NetworkACLService.java @@ -96,4 +96,6 @@ public interface NetworkACLService { * Updates a network item ACL to a new position. This method allows users to inform between which ACLs the given ACL will be placed. Therefore, the 'number' field will be filled out by the system in the best way possible to place the ACL accordingly. */ NetworkACLItem moveNetworkAclRuleToNewPosition(MoveNetworkAclItemCmd moveNetworkAclItemCmd); -} \ No newline at end of file + + NetworkACLItem moveRuleToTheTopInACLList(NetworkACLItem ruleBeingMoved); +} diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index e3662161fba..0c07268b82f 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -124,6 +124,7 @@ import com.cloud.network.security.SecurityGroupManager; import com.cloud.network.security.SecurityGroupService; import com.cloud.network.security.SecurityGroupVO; import com.cloud.network.security.SecurityRule; +import com.cloud.network.vpc.NetworkACL; import com.cloud.offering.NetworkOffering; import com.cloud.offering.ServiceOffering; import com.cloud.offerings.NetworkOfferingServiceMapVO; @@ -352,48 +353,50 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne return template; } - private boolean validateIsolatedNetwork(Network network, int clusterTotalNodeCount) { - if (Network.GuestType.Isolated.equals(network.getGuestType())) { - if (Network.State.Allocated.equals(network.getState())) { // Allocated networks won't have IP and rules - return true; + protected void validateIsolatedNetworkIpRules(long ipId, FirewallRule.Purpose purpose, Network network, int clusterTotalNodeCount) { + List rules = firewallRulesDao.listByIpAndPurposeAndNotRevoked(ipId, purpose); + for (FirewallRuleVO rule : rules) { + Integer startPort = rule.getSourcePortStart(); + Integer endPort = rule.getSourcePortEnd(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format("Validating rule with purpose: %s for network: %s with ports: %d-%d", purpose.toString(), network.getUuid(), startPort, endPort)); } - IpAddress sourceNatIp = getSourceNatIp(network); - if (sourceNatIp == null) { - throw new InvalidParameterValueException(String.format("Network ID: %s does not have a source NAT IP associated with it. To provision a Kubernetes Cluster, source NAT IP is required", network.getUuid())); + if (startPort <= KubernetesClusterActionWorker.CLUSTER_API_PORT && KubernetesClusterActionWorker.CLUSTER_API_PORT <= endPort) { + throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting %s rules to provision Kubernetes cluster for API access", network.getUuid(), purpose.toString().toLowerCase())); } - List rules = firewallRulesDao.listByIpAndPurposeAndNotRevoked(sourceNatIp.getId(), FirewallRule.Purpose.Firewall); - for (FirewallRuleVO rule : rules) { - Integer startPort = rule.getSourcePortStart(); - Integer endPort = rule.getSourcePortEnd(); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Network rule : " + startPort + " " + endPort); - } - if (startPort <= KubernetesClusterActionWorker.CLUSTER_API_PORT && KubernetesClusterActionWorker.CLUSTER_API_PORT <= endPort) { - throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting firewall rules to provision Kubernetes cluster for API access", network.getUuid())); - } - if (startPort <= KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT && KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterTotalNodeCount <= endPort) { - throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting firewall rules to provision Kubernetes cluster for node VM SSH access", network.getUuid())); - } - } - rules = firewallRulesDao.listByIpAndPurposeAndNotRevoked(sourceNatIp.getId(), FirewallRule.Purpose.PortForwarding); - for (FirewallRuleVO rule : rules) { - Integer startPort = rule.getSourcePortStart(); - Integer endPort = rule.getSourcePortEnd(); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Network rule : " + startPort + " " + endPort); - } - if (startPort <= KubernetesClusterActionWorker.CLUSTER_API_PORT && KubernetesClusterActionWorker.CLUSTER_API_PORT <= endPort) { - throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting port forwarding rules to provision Kubernetes cluster for API access", network.getUuid())); - } - if (startPort <= KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT && KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterTotalNodeCount <= endPort) { - throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting port forwarding rules to provision Kubernetes cluster for node VM SSH access", network.getUuid())); - } + int expectedSshStart = KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT; + int expectedSshEnd = expectedSshStart + clusterTotalNodeCount - 1; + if (Math.max(expectedSshStart, startPort) <= Math.min(expectedSshEnd, endPort)) { + throw new InvalidParameterValueException(String.format("Network ID: %s has conflicting %s rules to provision Kubernetes cluster for node VM SSH access", network.getUuid(), purpose.toString().toLowerCase())); } } - return true; } - private boolean validateNetwork(Network network, int clusterTotalNodeCount) { + private void validateIsolatedNetwork(Network network, int clusterTotalNodeCount) { + if (!Network.GuestType.Isolated.equals(network.getGuestType())) { + return; + } + if (Network.State.Allocated.equals(network.getState())) { // Allocated networks won't have IP and rules + return; + } + IpAddress sourceNatIp = getSourceNatIp(network); + if (sourceNatIp == null) { + throw new InvalidParameterValueException(String.format("Network ID: %s does not have a source NAT IP associated with it. To provision a Kubernetes Cluster, source NAT IP is required", network.getUuid())); + } + validateIsolatedNetworkIpRules(sourceNatIp.getId(), FirewallRule.Purpose.Firewall, network, clusterTotalNodeCount); + validateIsolatedNetworkIpRules(sourceNatIp.getId(), FirewallRule.Purpose.PortForwarding, network, clusterTotalNodeCount); + } + + protected void validateVpcTier(Network network) { + if (Network.State.Allocated.equals(network.getState())) { // Allocated networks won't have IP and rules + return; + } + if (network.getNetworkACLId() == NetworkACL.DEFAULT_DENY) { + throw new InvalidParameterValueException(String.format("Network ID: %s can not be used for Kubernetes cluster as it uses default deny ACL", network.getUuid())); + } + } + + private void validateNetwork(Network network, int clusterTotalNodeCount) { NetworkOffering networkOffering = networkOfferingDao.findById(network.getNetworkOfferingId()); if (networkOffering.isSystemOnly()) { throw new InvalidParameterValueException(String.format("Network ID: %s is for system use only", network.getUuid())); @@ -401,7 +404,8 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne if (!networkModel.areServicesSupportedInNetwork(network.getId(), Service.UserData)) { throw new InvalidParameterValueException(String.format("Network ID: %s does not support userdata that is required for Kubernetes cluster", network.getUuid())); } - if (!networkModel.areServicesSupportedInNetwork(network.getId(), Service.Firewall)) { + Long vpcId = network.getVpcId(); + if (vpcId == null && !networkModel.areServicesSupportedInNetwork(network.getId(), Service.Firewall)) { throw new InvalidParameterValueException(String.format("Network ID: %s does not support firewall that is required for Kubernetes cluster", network.getUuid())); } if (!networkModel.areServicesSupportedInNetwork(network.getId(), Service.PortForwarding)) { @@ -410,8 +414,11 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne if (!networkModel.areServicesSupportedInNetwork(network.getId(), Service.Dhcp)) { throw new InvalidParameterValueException(String.format("Network ID: %s does not support DHCP that is required for Kubernetes cluster", network.getUuid())); } + if (network.getVpcId() != null) { + validateVpcTier(network); + return; + } validateIsolatedNetwork(network, clusterTotalNodeCount); - return true; } private boolean validateServiceOffering(final ServiceOffering serviceOffering, final KubernetesSupportedVersion version) { @@ -736,9 +743,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne network = networkDao.findById(networkId); if (Network.GuestType.Isolated.equals(network.getGuestType())) { if (kubernetesClusterDao.listByNetworkId(network.getId()).isEmpty()) { - if (!validateNetwork(network, controlNodesCount + nodesCount)) { - throw new InvalidParameterValueException(String.format("Network ID: %s is not suitable for Kubernetes cluster", network.getUuid())); - } + validateNetwork(network, controlNodesCount + nodesCount); networkModel.checkNetworkPermissions(owner, network); } else { throw new InvalidParameterValueException(String.format("Network ID: %s is already under use by another Kubernetes cluster", network.getUuid())); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 2114ce40237..0417161c3f5 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -17,9 +17,37 @@ package com.cloud.kubernetes.cluster.actionworkers; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +import javax.inject.Inject; + +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.ca.CAManager; +import org.apache.cloudstack.config.ApiServiceConfiguration; +import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; + import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.VlanDao; +import com.cloud.exception.InsufficientAddressCapacityException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; import com.cloud.hypervisor.Hypervisor; import com.cloud.kubernetes.cluster.KubernetesCluster; import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO; @@ -36,7 +64,10 @@ import com.cloud.network.IpAddressManager; import com.cloud.network.Network; import com.cloud.network.Network.GuestType; import com.cloud.network.NetworkModel; +import com.cloud.network.NetworkService; +import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.NetworkDao; +import com.cloud.network.vpc.VpcService; import com.cloud.projects.ProjectService; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.Storage; @@ -64,37 +95,15 @@ import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.VmDetailConstants; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.UserVmDetailsDao; -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.ca.CAManager; -import org.apache.cloudstack.config.ApiServiceConfiguration; -import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; - -import javax.inject.Inject; -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Objects; -import java.util.Set; -import java.util.stream.Collectors; public class KubernetesClusterActionWorker { public static final String CLUSTER_NODE_VM_USER = "cloud"; public static final int CLUSTER_API_PORT = 6443; + public static final int DEFAULT_SSH_PORT = 22; public static final int CLUSTER_NODES_DEFAULT_START_SSH_PORT = 2222; - public static final int CLUSTER_NODES_DEFAULT_SSH_PORT_SG = 22; + public static final int CLUSTER_NODES_DEFAULT_SSH_PORT_SG = DEFAULT_SSH_PORT; public static final String CKS_CLUSTER_SECURITY_GROUP_NAME = "CKSSecurityGroup"; @@ -113,12 +122,16 @@ public class KubernetesClusterActionWorker { @Inject protected IpAddressManager ipAddressManager; @Inject + protected IPAddressDao ipAddressDao; + @Inject protected NetworkOrchestrationService networkMgr; @Inject protected NetworkDao networkDao; @Inject protected NetworkModel networkModel; @Inject + protected NetworkService networkService; + @Inject protected ServiceOfferingDao serviceOfferingDao; @Inject protected SSHKeyPairDao sshKeyPairDao; @@ -140,6 +153,8 @@ public class KubernetesClusterActionWorker { protected LaunchPermissionDao launchPermissionDao; @Inject public ProjectService projectService; + @Inject + public VpcService vpcService; protected KubernetesClusterDao kubernetesClusterDao; protected KubernetesClusterVmMapDao kubernetesClusterVmMapDao; @@ -329,7 +344,79 @@ public class KubernetesClusterActionWorker { return ip; } - protected Pair getKubernetesClusterServerIpSshPort(UserVm controlVm) { + protected IpAddress getNetworkSourceNatIp(Network network) { + List addresses = networkModel.listPublicIpsAssignedToGuestNtwk(network.getId(), true); + if (CollectionUtils.isNotEmpty(addresses)) { + return addresses.get(0); + } + LOGGER.warn(String.format("No public IP addresses found for network : %s, Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); + return null; + } + + protected IpAddress getVpcTierKubernetesPublicIp(Network network) { + KubernetesClusterDetailsVO detailsVO = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), ApiConstants.PUBLIC_IP_ID); + if (detailsVO == null || StringUtils.isEmpty(detailsVO.getValue())) { + return null; + } + IpAddress address = ipAddressDao.findByUuid(detailsVO.getValue()); + if (address == null || network.getVpcId() != address.getVpcId()) { + LOGGER.warn(String.format("Public IP with ID: %s linked to the Kubernetes cluster: %s is not usable", detailsVO.getValue(), kubernetesCluster.getName())); + return null; + } + return address; + } + + protected IpAddress acquireVpcTierKubernetesPublicIp(Network network) throws + InsufficientAddressCapacityException, ResourceAllocationException, ResourceUnavailableException { + IpAddress ip = networkService.allocateIP(owner, kubernetesCluster.getZoneId(), network.getId(), null, null); + if (ip == null) { + return null; + } + ip = vpcService.associateIPToVpc(ip.getId(), network.getVpcId()); + ip = ipAddressManager.associateIPToGuestNetwork(ip.getId(), network.getId(), false); + kubernetesClusterDetailsDao.addDetail(kubernetesCluster.getId(), ApiConstants.PUBLIC_IP_ID, ip.getUuid(), false); + return ip; + } + + protected Pair getKubernetesClusterServerIpSshPortForIsolatedNetwork(Network network) { + String ip = null; + IpAddress address = getNetworkSourceNatIp(network); + if (address != null) { + ip = address.getAddress().addr(); + } + return new Pair<>(ip, CLUSTER_NODES_DEFAULT_START_SSH_PORT); + } + + protected Pair getKubernetesClusterServerIpSshPortForSharedNetwork(UserVm controlVm) { + int port = DEFAULT_SSH_PORT; + controlVm = fetchControlVmIfMissing(controlVm); + if (controlVm == null) { + LOGGER.warn(String.format("Unable to retrieve control VM for Kubernetes cluster : %s", kubernetesCluster.getName())); + return new Pair<>(null, port); + } + return new Pair<>(controlVm.getPrivateIpAddress(), port); + } + + protected Pair getKubernetesClusterServerIpSshPortForVpcTier(Network network, + boolean acquireNewPublicIpForVpcTierIfNeeded) throws + InsufficientAddressCapacityException, ResourceAllocationException, ResourceUnavailableException { + int port = CLUSTER_NODES_DEFAULT_START_SSH_PORT; + IpAddress address = getVpcTierKubernetesPublicIp(network); + if (address != null) { + return new Pair<>(address.getAddress().addr(), port); + } + if (acquireNewPublicIpForVpcTierIfNeeded) { + address = acquireVpcTierKubernetesPublicIp(network); + if (address != null) { + return new Pair<>(address.getAddress().addr(), port); + } + } + LOGGER.warn(String.format("No public IP found for the the VPC tier: %s, Kubernetes cluster : %s", network, kubernetesCluster.getName())); + return new Pair<>(null, port); + } + + protected Pair getKubernetesClusterServerIpSshPort(UserVm controlVm, boolean acquireNewPublicIpForVpcTierIfNeeded) throws + InsufficientAddressCapacityException, ResourceAllocationException, ResourceUnavailableException { int port = CLUSTER_NODES_DEFAULT_START_SSH_PORT; KubernetesClusterDetailsVO detail = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS); if (detail != null && StringUtils.isNotEmpty(detail.getValue())) { @@ -340,32 +427,27 @@ public class KubernetesClusterActionWorker { LOGGER.warn(String.format("Network for Kubernetes cluster : %s cannot be found", kubernetesCluster.getName())); return new Pair<>(null, port); } + if (network.getVpcId() != null) { + return getKubernetesClusterServerIpSshPortForVpcTier(network, acquireNewPublicIpForVpcTierIfNeeded); + } if (Network.GuestType.Isolated.equals(network.getGuestType())) { - List addresses = networkModel.listPublicIpsAssignedToGuestNtwk(network.getId(), true); - if (CollectionUtils.isEmpty(addresses)) { - LOGGER.warn(String.format("No public IP addresses found for network : %s, Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); - return new Pair<>(null, port); - } - for (IpAddress address : addresses) { - if (address.isSourceNat()) { - return new Pair<>(address.getAddress().addr(), port); - } - } - LOGGER.warn(String.format("No source NAT IP addresses found for network : %s, Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); - return new Pair<>(null, port); + return getKubernetesClusterServerIpSshPortForIsolatedNetwork(network); } else if (Network.GuestType.Shared.equals(network.getGuestType())) { - port = 22; - controlVm = fetchControlVmIfMissing(controlVm); - if (controlVm == null) { - LOGGER.warn(String.format("Unable to retrieve control VM for Kubernetes cluster : %s", kubernetesCluster.getName())); - return new Pair<>(null, port); - } - return new Pair<>(controlVm.getPrivateIpAddress(), port); + return getKubernetesClusterServerIpSshPortForSharedNetwork(controlVm); } LOGGER.warn(String.format("Unable to retrieve server IP address for Kubernetes cluster : %s", kubernetesCluster.getName())); return new Pair<>(null, port); } + protected Pair getKubernetesClusterServerIpSshPort(UserVm controlVm) { + try { + return getKubernetesClusterServerIpSshPort(controlVm, false); + } catch (InsufficientAddressCapacityException | ResourceAllocationException | ResourceUnavailableException e) { + LOGGER.debug("This exception should not have occurred", e); + } + return new Pair<>(null, CLUSTER_NODES_DEFAULT_START_SSH_PORT); + } + protected void attachIsoKubernetesVMs(List clusterVMs, final KubernetesSupportedVersion kubernetesSupportedVersion) throws CloudRuntimeException { KubernetesSupportedVersion version = kubernetesSupportedVersion; if (kubernetesSupportedVersion == null) { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java index a3e372778eb..29da3ffb59d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterDestroyWorker.java @@ -19,16 +19,19 @@ package com.cloud.kubernetes.cluster.actionworkers; import java.util.ArrayList; import java.util.List; +import java.util.stream.Collectors; import javax.inject.Inject; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.context.CallContext; import org.apache.commons.collections.CollectionUtils; import org.apache.log4j.Level; import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.ManagementServerException; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceUnavailableException; @@ -134,25 +137,15 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod } } - private void deleteKubernetesClusterNetworkRules() throws ManagementServerException { - NetworkVO network = networkDao.findById(kubernetesCluster.getNetworkId()); - if (network == null) { - return; - } - List removedVmIds = new ArrayList<>(); - if (!CollectionUtils.isEmpty(clusterVMs)) { - for (KubernetesClusterVmMapVO clusterVM : clusterVMs) { - removedVmIds.add(clusterVM.getVmId()); - } - } - IpAddress publicIp = getSourceNatIp(network); + protected void deleteKubernetesClusterIsolatedNetworkRules(Network network, List removedVmIds) throws ManagementServerException { + IpAddress publicIp = getNetworkSourceNatIp(network); if (publicIp == null) { throw new ManagementServerException(String.format("No source NAT IP addresses found for network : %s", network.getName())); } try { - removeLoadBalancingRule(publicIp, network, owner, CLUSTER_API_PORT); + removeLoadBalancingRule(publicIp, network, owner); } catch (ResourceUnavailableException e) { - throw new ManagementServerException(String.format("Failed to KubernetesCluster load balancing rule for network : %s", network.getName())); + throw new ManagementServerException(String.format("Failed to KubernetesCluster load balancing rule for network : %s", network.getName()), e); } FirewallRule firewallRule = removeApiFirewallRule(publicIp); if (firewallRule == null) { @@ -162,6 +155,19 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod if (firewallRule == null) { logMessage(Level.WARN, "Firewall rule for SSH access can't be removed", null); } + try { + removePortForwardingRules(publicIp, network, owner, removedVmIds); + } catch (ResourceUnavailableException e) { + throw new ManagementServerException(String.format("Failed to KubernetesCluster port forwarding rules for network : %s", network.getName()), e); + } + } + + protected void deleteKubernetesClusterVpcTierRules(Network network, List removedVmIds) throws ManagementServerException { + IpAddress publicIp = getVpcTierKubernetesPublicIp(network); + if (publicIp == null) { + return; + } + removeVpcTierAclRules(network); try { removePortForwardingRules(publicIp, network, owner, removedVmIds); } catch (ResourceUnavailableException e) { @@ -169,6 +175,22 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod } } + private void deleteKubernetesClusterNetworkRules() throws ManagementServerException { + NetworkVO network = networkDao.findById(kubernetesCluster.getNetworkId()); + if (network == null) { + return; + } + List removedVmIds = new ArrayList<>(); + if (!CollectionUtils.isEmpty(clusterVMs)) { + removedVmIds = clusterVMs.stream().map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList()); + } + if (network.getVpcId() != null) { + deleteKubernetesClusterVpcTierRules(network, removedVmIds); + return; + } + deleteKubernetesClusterIsolatedNetworkRules(network, removedVmIds); + } + private void validateClusterVMsDestroyed() { if(clusterVMs!=null && !clusterVMs.isEmpty()) { // Wait for few seconds to get all VMs really expunged final int maxRetries = 3; @@ -200,6 +222,19 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod } } + private void releaseVpcTierPublicIpIfNeeded() throws InsufficientAddressCapacityException { + NetworkVO networkVO = networkDao.findById(kubernetesCluster.getNetworkId()); + if (networkVO == null || networkVO.getVpcId() == null) { + return; + } + IpAddress address = getVpcTierKubernetesPublicIp(networkVO); + if (address == null) { + return; + } + networkService.releaseIpAddress(address.getId()); + kubernetesClusterDetailsDao.removeDetail(kubernetesCluster.getId(), ApiConstants.PUBLIC_IP_ID); + } + public boolean destroy() throws CloudRuntimeException { init(); validateClusterSate(); @@ -258,6 +293,14 @@ public class KubernetesClusterDestroyWorker extends KubernetesClusterResourceMod updateKubernetesClusterEntryForGC(); throw new CloudRuntimeException(msg, e); } + try { + releaseVpcTierPublicIpIfNeeded(); + } catch (InsufficientAddressCapacityException e) { + String msg = String.format("Failed to release public IP for VPC tier used by Kubernetes cluster : %s", kubernetesCluster.getName()); + LOGGER.warn(msg, e); + updateKubernetesClusterEntryForGC(); + throw new CloudRuntimeException(msg, e); + } } } else { String msg = String.format("Failed to destroy one or more VMs as part of Kubernetes cluster : %s cleanup", kubernetesCluster.getName()); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index bc06d16e8f9..726aadff4d0 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -27,12 +27,14 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; import javax.inject.Inject; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.command.user.firewall.CreateFirewallRuleCmd; +import org.apache.cloudstack.api.command.user.network.CreateNetworkACLCmd; import org.apache.cloudstack.api.command.user.vm.StartVMCmd; import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; import org.apache.commons.codec.binary.Base64; @@ -47,11 +49,14 @@ import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenter; import com.cloud.dc.dao.ClusterDao; import com.cloud.deploy.DeployDestination; +import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ManagementServerException; import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.Host; import com.cloud.host.HostVO; @@ -71,9 +76,15 @@ import com.cloud.network.firewall.FirewallService; import com.cloud.network.lb.LoadBalancingRulesService; import com.cloud.network.rules.FirewallRule; import com.cloud.network.rules.FirewallRuleVO; +import com.cloud.network.rules.LoadBalancer; import com.cloud.network.rules.PortForwardingRuleVO; import com.cloud.network.rules.RulesService; import com.cloud.network.rules.dao.PortForwardingRulesDao; +import com.cloud.network.vpc.NetworkACL; +import com.cloud.network.vpc.NetworkACLItem; +import com.cloud.network.vpc.NetworkACLItemDao; +import com.cloud.network.vpc.NetworkACLItemVO; +import com.cloud.network.vpc.NetworkACLService; import com.cloud.offering.ServiceOffering; import com.cloud.resource.ResourceManager; import com.cloud.storage.Volume; @@ -115,6 +126,10 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu @Inject protected FirewallService firewallService; @Inject + protected NetworkACLService networkACLService; + @Inject + protected NetworkACLItemDao networkACLItemDao; + @Inject protected LoadBalancingRulesService lbService; @Inject protected RulesService rulesService; @@ -403,19 +418,6 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu return nodeVm; } - protected IpAddress getSourceNatIp(Network network) { - List addresses = networkModel.listPublicIpsAssignedToGuestNtwk(network.getId(), true); - if (CollectionUtils.isEmpty(addresses)) { - return null; - } - for (IpAddress address : addresses) { - if (address.isSourceNat()) { - return address; - } - } - return null; - } - protected void provisionFirewallRules(final IpAddress publicIp, final Account account, int startPort, int endPort) throws NoSuchFieldException, IllegalAccessException, ResourceUnavailableException, NetworkRuleConflictException { List sourceCidrList = new ArrayList(); @@ -448,52 +450,51 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu firewallService.applyIngressFwRules(publicIp.getId(), account); } + protected void provisionPublicIpPortForwardingRule(IpAddress publicIp, Network network, Account account, + final long vmId, final int sourcePort, final int destPort) throws NetworkRuleConflictException, ResourceUnavailableException { + final long publicIpId = publicIp.getId(); + final long networkId = network.getId(); + final long accountId = account.getId(); + final long domainId = account.getDomainId(); + Nic vmNic = networkModel.getNicInNetwork(vmId, networkId); + final Ip vmIp = new Ip(vmNic.getIPv4Address()); + PortForwardingRuleVO pfRule = Transaction.execute((TransactionCallbackWithException) status -> { + PortForwardingRuleVO newRule = + new PortForwardingRuleVO(null, publicIpId, + sourcePort, sourcePort, + vmIp, + destPort, destPort, + "tcp", networkId, accountId, domainId, vmId); + newRule.setDisplay(true); + newRule.setState(FirewallRule.State.Add); + newRule = portForwardingRulesDao.persist(newRule); + return newRule; + }); + rulesService.applyPortForwardingRules(publicIp.getId(), account); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Provisioned SSH port forwarding rule: %s from port %d to %d on %s to the VM IP : %s in Kubernetes cluster : %s", pfRule.getUuid(), sourcePort, destPort, publicIp.getAddress().addr(), vmIp.toString(), kubernetesCluster.getName())); + } + } + /** * To provision SSH port forwarding rules for the given Kubernetes cluster * for its given virtual machines + * * @param publicIp * @param network * @param account - * @param List clusterVMIds (when empty then method must be called while - * down-scaling of the KubernetesCluster therefore no new rules - * to be added) - * @param firewallRuleSourcePortStart + * @param clusterVMIds (when empty then method must be called while + * down-scaling of the KubernetesCluster therefore no new rules + * to be added) * @throws ResourceUnavailableException * @throws NetworkRuleConflictException */ protected void provisionSshPortForwardingRules(IpAddress publicIp, Network network, Account account, - List clusterVMIds, int firewallRuleSourcePortStart) throws ResourceUnavailableException, + List clusterVMIds) throws ResourceUnavailableException, NetworkRuleConflictException { if (!CollectionUtils.isEmpty(clusterVMIds)) { - final long publicIpId = publicIp.getId(); - final long networkId = network.getId(); - final long accountId = account.getId(); - final long domainId = account.getDomainId(); for (int i = 0; i < clusterVMIds.size(); ++i) { - long vmId = clusterVMIds.get(i); - Nic vmNic = networkModel.getNicInNetwork(vmId, networkId); - final Ip vmIp = new Ip(vmNic.getIPv4Address()); - final long vmIdFinal = vmId; - final int srcPortFinal = firewallRuleSourcePortStart + i; - PortForwardingRuleVO pfRule = Transaction.execute(new TransactionCallbackWithException() { - @Override - public PortForwardingRuleVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { - PortForwardingRuleVO newRule = - new PortForwardingRuleVO(null, publicIpId, - srcPortFinal, srcPortFinal, - vmIp, - 22, 22, - "tcp", networkId, accountId, domainId, vmIdFinal); - newRule.setDisplay(true); - newRule.setState(FirewallRule.State.Add); - newRule = portForwardingRulesDao.persist(newRule); - return newRule; - } - }); - rulesService.applyPortForwardingRules(publicIp.getId(), account); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned SSH port forwarding rule from port %d to 22 on %s to the VM IP : %s in Kubernetes cluster : %s", srcPortFinal, publicIp.getAddress().addr(), vmIp.toString(), kubernetesCluster.getName())); - } + provisionPublicIpPortForwardingRule(publicIp, network, account, clusterVMIds.get(i), CLUSTER_NODES_DEFAULT_START_SSH_PORT + i, DEFAULT_SSH_PORT); } } } @@ -552,19 +553,205 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu } protected void removeLoadBalancingRule(final IpAddress publicIp, final Network network, - final Account account, final int port) throws ResourceUnavailableException { + final Account account) throws ResourceUnavailableException { List rules = loadBalancerDao.listByIpAddress(publicIp.getId()); for (LoadBalancerVO rule : rules) { if (rule.getNetworkId() == network.getId() && rule.getAccountId() == account.getId() && - rule.getSourcePortStart() == port && - rule.getSourcePortEnd() == port) { + rule.getSourcePortStart() == CLUSTER_API_PORT && + rule.getSourcePortEnd() == CLUSTER_API_PORT) { lbService.deleteLoadBalancerRule(rule.getId(), true); break; } } } + protected void provisionVpcTierAllowPortACLRule(final Network network, int startPort, int endPorts) throws NoSuchFieldException, + IllegalAccessException, ResourceUnavailableException { + List aclItems = networkACLItemDao.listByACL(network.getNetworkACLId()); + aclItems = aclItems.stream().filter(x -> !NetworkACLItem.State.Revoke.equals(x.getState())).collect(Collectors.toList()); + CreateNetworkACLCmd rule = new CreateNetworkACLCmd(); + rule = ComponentContext.inject(rule); + Map fieldValues = Map.of( + "protocol", "TCP", + "publicStartPort", startPort, + "publicEndPort", endPorts, + "trafficType", NetworkACLItem.TrafficType.Ingress.toString(), + "networkId", network.getId(), + "aclId", network.getNetworkACLId(), + "action", NetworkACLItem.Action.Allow.toString() + ); + for (Map.Entry entry : fieldValues.entrySet()) { + Field field = rule.getClass().getDeclaredField(entry.getKey()); + field.setAccessible(true); + field.set(rule, entry.getValue()); + } + NetworkACLItem aclRule = networkACLService.createNetworkACLItem(rule); + networkACLService.moveRuleToTheTopInACLList(aclRule); + networkACLService.applyNetworkACL(aclRule.getAclId()); + } + + protected void removeVpcTierAllowPortACLRule(final Network network, int startPort, int endPort) throws NoSuchFieldException, + IllegalAccessException, ResourceUnavailableException { + List aclItems = networkACLItemDao.listByACL(network.getNetworkACLId()); + aclItems = aclItems.stream().filter(x -> (x.getProtocol() != null && + x.getProtocol().equals("TCP") && + x.getSourcePortStart() != null && + x.getSourcePortStart().equals(startPort) && + x.getSourcePortEnd() != null && + x.getSourcePortEnd().equals(endPort) && + x.getAction().equals(NetworkACLItem.Action.Allow))) + .collect(Collectors.toList()); + + for (NetworkACLItemVO aclItem : aclItems) { + networkACLService.revokeNetworkACLItem(aclItem.getId()); + } + } + + protected void provisionLoadBalancerRule(final IpAddress publicIp, final Network network, + final Account account, final List clusterVMIds, final int port) throws NetworkRuleConflictException, + InsufficientAddressCapacityException { + LoadBalancer lb = lbService.createPublicLoadBalancerRule(null, "api-lb", "LB rule for API access", + port, port, port, port, + publicIp.getId(), NetUtils.TCP_PROTO, "roundrobin", network.getId(), + account.getId(), false, NetUtils.TCP_PROTO, true); + + Map> vmIdIpMap = new HashMap<>(); + for (int i = 0; i < kubernetesCluster.getControlNodeCount(); ++i) { + List ips = new ArrayList<>(); + Nic controlVmNic = networkModel.getNicInNetwork(clusterVMIds.get(i), kubernetesCluster.getNetworkId()); + ips.add(controlVmNic.getIPv4Address()); + vmIdIpMap.put(clusterVMIds.get(i), ips); + } + lbService.assignToLoadBalancer(lb.getId(), null, vmIdIpMap, false); + } + + protected void createFirewallRules(IpAddress publicIp, List clusterVMIds, boolean apiRule) throws ManagementServerException { + // Firewall rule for SSH access on each node VM + try { + int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1; + provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Provisioned firewall rule to open up port %d to %d on %s for Kubernetes cluster : %s", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getName())); + } + } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { + throw new ManagementServerException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + if (!apiRule) { + return; + } + // Firewall rule for API access for control node VMs + try { + provisionFirewallRules(publicIp, owner, CLUSTER_API_PORT, CLUSTER_API_PORT); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Provisioned firewall rule to open up port %d on %s for Kubernetes cluster %s", + CLUSTER_API_PORT, publicIp.getAddress().addr(), kubernetesCluster.getName())); + } + } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { + throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + } + + /** + * Setup network rules for Kubernetes cluster + * Open up firewall port CLUSTER_API_PORT, secure port on which Kubernetes + * API server is running. Also create load balancing rule to forward public + * IP traffic to control VMs' private IP. + * Open up firewall ports NODES_DEFAULT_START_SSH_PORT to NODES_DEFAULT_START_SSH_PORT+n + * for SSH access. Also create port-forwarding rule to forward public IP traffic to all + * @param network + * @param clusterVMIds + * @throws ManagementServerException + */ + protected void setupKubernetesClusterIsolatedNetworkRules(IpAddress publicIp, Network network, List clusterVMIds, boolean apiRule) throws ManagementServerException { + createFirewallRules(publicIp, clusterVMIds, apiRule); + + // Port forwarding rule for SSH access on each node VM + try { + provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds); + } catch (ResourceUnavailableException | NetworkRuleConflictException e) { + throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + + if (!apiRule) { + return; + } + // Load balancer rule for API access for control node VMs + try { + provisionLoadBalancerRule(publicIp, network, owner, clusterVMIds, CLUSTER_API_PORT); + } catch (NetworkRuleConflictException | InsufficientAddressCapacityException e) { + throw new ManagementServerException(String.format("Failed to provision load balancer rule for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + } + + protected void createVpcTierAclRules(Network network) throws ManagementServerException { + if (network.getNetworkACLId() == NetworkACL.DEFAULT_ALLOW) { + return; + } + // ACL rule for API access for control node VMs + try { + provisionVpcTierAllowPortACLRule(network, CLUSTER_API_PORT, CLUSTER_API_PORT); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Provisioned ACL rule to open up port %d on %s for Kubernetes cluster %s", + CLUSTER_API_PORT, publicIpAddress, kubernetesCluster.getName())); + } + } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | InvalidParameterValueException | PermissionDeniedException e) { + throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + try { + provisionVpcTierAllowPortACLRule(network, DEFAULT_SSH_PORT, DEFAULT_SSH_PORT); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Provisioned ACL rule to open up port %d on %s for Kubernetes cluster %s", + DEFAULT_SSH_PORT, publicIpAddress, kubernetesCluster.getName())); + } + } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | InvalidParameterValueException | PermissionDeniedException e) { + throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + } + + protected void removeVpcTierAclRules(Network network) throws ManagementServerException { + if (network.getNetworkACLId() == NetworkACL.DEFAULT_ALLOW) { + return; + } + // ACL rule for API access for control node VMs + try { + removeVpcTierAllowPortACLRule(network, CLUSTER_API_PORT, CLUSTER_API_PORT); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Removed network ACL rule to open up port %d on %s for Kubernetes cluster %s", + CLUSTER_API_PORT, publicIpAddress, kubernetesCluster.getName())); + } + } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException e) { + throw new ManagementServerException(String.format("Failed to remove network ACL rule for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + // ACL rule for SSH access for all node VMs + try { + removeVpcTierAllowPortACLRule(network, DEFAULT_SSH_PORT, DEFAULT_SSH_PORT); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(String.format("Removed network ACL rule to open up port %d on %s for Kubernetes cluster %s", + DEFAULT_SSH_PORT, publicIpAddress, kubernetesCluster.getName())); + } + } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException e) { + throw new ManagementServerException(String.format("Failed to remove network ACL rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + } + + protected void setupKubernetesClusterVpcTierRules(IpAddress publicIp, Network network, List clusterVMIds) throws ManagementServerException { + // Create ACL rules + createVpcTierAclRules(network); + // Add port forwarding for API access + try { + provisionPublicIpPortForwardingRule(publicIp, network, owner, clusterVMIds.get(0), CLUSTER_API_PORT, CLUSTER_API_PORT); + } catch (ResourceUnavailableException | NetworkRuleConflictException e) { + throw new ManagementServerException(String.format("Failed to activate API port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + // Add port forwarding rule for SSH access on each node VM + try { + provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds); + } catch (ResourceUnavailableException | NetworkRuleConflictException e) { + throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + } + protected String getKubernetesClusterNodeNamePrefix() { String prefix = kubernetesCluster.getName(); if (!NetUtils.verifyDomainNameLabel(prefix, true)) { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index b6c9f52640e..e3708673f9d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -115,6 +115,44 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif logTransitStateToFailedIfNeededAndThrow(logLevel, message, null); } + private void scaleKubernetesClusterIsolatedNetworkRules(final List clusterVMIds) throws ManagementServerException { + IpAddress publicIp = getNetworkSourceNatIp(network); + if (publicIp == null) { + throw new ManagementServerException(String.format("No source NAT IP addresses found for network : %s, Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); + } + + // Remove existing SSH firewall rules + FirewallRule firewallRule = removeSshFirewallRule(publicIp); + if (firewallRule == null) { + throw new ManagementServerException("Firewall rule for node SSH access can't be provisioned"); + } + int existingFirewallRuleSourcePortEnd = firewallRule.getSourcePortEnd(); + try { + removePortForwardingRules(publicIp, network, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, existingFirewallRuleSourcePortEnd); + } catch (ResourceUnavailableException e) { + throw new ManagementServerException(String.format("Failed to remove SSH port forwarding rules for removed VMs for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + setupKubernetesClusterIsolatedNetworkRules(publicIp, network, clusterVMIds, false); + } + + private void scaleKubernetesClusterVpcTierRules(final List clusterVMIds) throws ManagementServerException { + IpAddress publicIp = getVpcTierKubernetesPublicIp(network); + if (publicIp == null) { + throw new ManagementServerException(String.format("No public IP addresses found for VPC tier : %s, Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); + } + try { + removePortForwardingRules(publicIp, network, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1); + } catch (ResourceUnavailableException e) { + throw new ManagementServerException(String.format("Failed to remove SSH port forwarding rules for removed VMs for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + // Add port forwarding rule for SSH access on each node VM + try { + provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds); + } catch (ResourceUnavailableException | NetworkRuleConflictException e) { + throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + } + /** * Scale network rules for an existing Kubernetes cluster while scaling it * Open up firewall for SSH access from port NODES_DEFAULT_START_SSH_PORT to NODES_DEFAULT_START_SSH_PORT+n. @@ -130,40 +168,11 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif } return; } - IpAddress publicIp = getSourceNatIp(network); - if (publicIp == null) { - throw new ManagementServerException(String.format("No source NAT IP addresses found for network : %s, Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); - } - - // Remove existing SSH firewall rules - FirewallRule firewallRule = removeSshFirewallRule(publicIp); - if (firewallRule == null) { - throw new ManagementServerException("Firewall rule for node SSH access can't be provisioned"); - } - int existingFirewallRuleSourcePortEnd = firewallRule.getSourcePortEnd(); - int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1; - // Provision new SSH firewall rules - try { - provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Provisioned firewall rule to open up port %d to %d on %s in Kubernetes cluster %s", - CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getName())); - } - } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException e) { - throw new ManagementServerException(String.format("Failed to activate SSH firewall rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); - } - - try { - removePortForwardingRules(publicIp, network, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, existingFirewallRuleSourcePortEnd); - } catch (ResourceUnavailableException e) { - throw new ManagementServerException(String.format("Failed to remove SSH port forwarding rules for removed VMs for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); - } - - try { - provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, CLUSTER_NODES_DEFAULT_START_SSH_PORT); - } catch (ResourceUnavailableException | NetworkRuleConflictException e) { - throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + if (network.getVpcId() != null) { + scaleKubernetesClusterVpcTierRules(clusterVMIds); + return; } + scaleKubernetesClusterIsolatedNetworkRules(clusterVMIds); } private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, final ServiceOffering newServiceOffering) throws CloudRuntimeException { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 39e45d1f9ed..84ad9bdc0a6 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -17,6 +17,27 @@ package com.cloud.kubernetes.cluster.actionworkers; +import java.io.IOException; +import java.net.InetAddress; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.framework.ca.Certificate; +import org.apache.cloudstack.utils.security.CertUtils; +import org.apache.commons.codec.binary.Base64; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.log4j.Level; + import com.cloud.dc.DataCenter; import com.cloud.dc.Vlan; import com.cloud.dc.VlanVO; @@ -25,7 +46,7 @@ import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.ManagementServerException; -import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.hypervisor.Hypervisor; import com.cloud.kubernetes.cluster.KubernetesCluster; @@ -40,7 +61,6 @@ import com.cloud.kubernetes.version.KubernetesVersionManagerImpl; import com.cloud.network.IpAddress; import com.cloud.network.Network; import com.cloud.network.addr.PublicIp; -import com.cloud.network.rules.LoadBalancer; import com.cloud.offering.ServiceOffering; import com.cloud.storage.LaunchPermissionVO; import com.cloud.user.Account; @@ -49,33 +69,11 @@ import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.Ip; -import com.cloud.utils.net.NetUtils; -import com.cloud.vm.Nic; import com.cloud.vm.ReservationContext; import com.cloud.vm.ReservationContextImpl; import com.cloud.vm.UserVmManager; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VmDetailConstants; -import org.apache.cloudstack.api.BaseCmd; -import org.apache.cloudstack.api.InternalIdentity; -import org.apache.cloudstack.framework.ca.Certificate; -import org.apache.cloudstack.utils.security.CertUtils; -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Level; - -import java.io.IOException; -import java.net.InetAddress; -import java.net.MalformedURLException; -import java.net.URL; -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; public class KubernetesClusterStartWorker extends KubernetesClusterResourceModifierActionWorker { @@ -378,91 +376,28 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif return network; } - private void provisionLoadBalancerRule(final IpAddress publicIp, final Network network, - final Account account, final List clusterVMIds, final int port) throws NetworkRuleConflictException, - InsufficientAddressCapacityException { - LoadBalancer lb = lbService.createPublicLoadBalancerRule(null, "api-lb", "LB rule for API access", - port, port, port, port, - publicIp.getId(), NetUtils.TCP_PROTO, "roundrobin", network.getId(), - account.getId(), false, NetUtils.TCP_PROTO, true); - - Map> vmIdIpMap = new HashMap<>(); - for (int i = 0; i < kubernetesCluster.getControlNodeCount(); ++i) { - List ips = new ArrayList<>(); - Nic controlVmNic = networkModel.getNicInNetwork(clusterVMIds.get(i), kubernetesCluster.getNetworkId()); - ips.add(controlVmNic.getIPv4Address()); - vmIdIpMap.put(clusterVMIds.get(i), ips); - } - lbService.assignToLoadBalancer(lb.getId(), null, vmIdIpMap, false); - } - - /** - * Setup network rules for Kubernetes cluster - * Open up firewall port CLUSTER_API_PORT, secure port on which Kubernetes - * API server is running. Also create load balancing rule to forward public - * IP traffic to control VMs' private IP. - * Open up firewall ports NODES_DEFAULT_START_SSH_PORT to NODES_DEFAULT_START_SSH_PORT+n - * for SSH access. Also create port-forwarding rule to forward public IP traffic to all - * @param network - * @param clusterVMs - * @throws ManagementServerException - */ - private void setupKubernetesClusterNetworkRules(Network network, List clusterVMs) throws ManagementServerException { + protected void setupKubernetesClusterNetworkRules(Network network, List clusterVMs) throws ManagementServerException { if (!Network.GuestType.Isolated.equals(network.getGuestType())) { if (LOGGER.isDebugEnabled()) { LOGGER.debug(String.format("Network : %s for Kubernetes cluster : %s is not an isolated network, therefore, no need for network rules", network.getName(), kubernetesCluster.getName())); } return; } - List clusterVMIds = new ArrayList<>(); - for (UserVm vm : clusterVMs) { - clusterVMIds.add(vm.getId()); + List clusterVMIds = clusterVMs.stream().map(UserVm::getId).collect(Collectors.toList()); + if (network.getVpcId() != null) { + IpAddress publicIp = getVpcTierKubernetesPublicIp(network); + if (publicIp == null) { + throw new ManagementServerException(String.format("No public IP addresses found for VPC tier : %s, Kubernetes cluster : %s", network.getName(), kubernetesCluster.getName())); + } + setupKubernetesClusterVpcTierRules(publicIp, network, clusterVMIds); + return; } - IpAddress publicIp = getSourceNatIp(network); + IpAddress publicIp = getNetworkSourceNatIp(network); if (publicIp == null) { throw new ManagementServerException(String.format("No source NAT IP addresses found for network : %s, Kubernetes cluster : %s", - network.getName(), kubernetesCluster.getName())); - } - - createFirewallRules(publicIp, clusterVMIds); - - // Load balancer rule fo API access for control node VMs - try { - provisionLoadBalancerRule(publicIp, network, owner, clusterVMIds, CLUSTER_API_PORT); - } catch (NetworkRuleConflictException | InsufficientAddressCapacityException e) { - throw new ManagementServerException(String.format("Failed to provision load balancer rule for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); - } - - // Port forwarding rule fo SSH access on each node VM - try { - provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, CLUSTER_NODES_DEFAULT_START_SSH_PORT); - } catch (ResourceUnavailableException | NetworkRuleConflictException e) { - throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); - } - } - - private void createFirewallRules(IpAddress publicIp, List clusterVMIds) throws ManagementServerException { - // Firewall rule fo API access for control node VMs - try { - provisionFirewallRules(publicIp, owner, CLUSTER_API_PORT, CLUSTER_API_PORT); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned firewall rule to open up port %d on %s for Kubernetes cluster %s", - CLUSTER_API_PORT, publicIp.getAddress().addr(), kubernetesCluster.getName())); - } - } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { - throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); - } - - // Firewall rule fo SSH access on each node VM - try { - int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1; - provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); - if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned firewall rule to open up port %d to %d on %s for Kubernetes cluster : %s", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getName())); - } - } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { - throw new ManagementServerException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + network.getName(), kubernetesCluster.getName())); } + setupKubernetesClusterIsolatedNetworkRules(publicIp, network, clusterVMIds, true); } private void startKubernetesClusterVMs() { @@ -546,7 +481,12 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif } catch (ManagementServerException e) { logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster : %s as its network cannot be started", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); } - Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + Pair publicIpSshPort = new Pair<>(null, null); + try { + publicIpSshPort = getKubernetesClusterServerIpSshPort(null, true); + } catch (InsufficientAddressCapacityException | ResourceAllocationException | ResourceUnavailableException e) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster : %s as failed to acquire public IP" , kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); + } publicIpAddress = publicIpSshPort.first(); if (StringUtils.isEmpty(publicIpAddress) && (Network.GuestType.Isolated.equals(network.getGuestType()) || kubernetesCluster.getControlNodeCount() > 1)) { // Shared network, single-control node cluster won't have an IP yet diff --git a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImplTest.java b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImplTest.java index 8475c9ebaf6..52e555c824a 100644 --- a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImplTest.java +++ b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImplTest.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.kubernetes.cluster; + +import java.util.ArrayList; import java.util.List; import org.junit.Test; @@ -31,12 +33,21 @@ import com.cloud.api.query.vo.TemplateJoinVO; import com.cloud.dc.DataCenter; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; +import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterActionWorker; +import com.cloud.network.Network; +import com.cloud.network.dao.FirewallRulesDao; +import com.cloud.network.rules.FirewallRule; +import com.cloud.network.rules.FirewallRuleVO; +import com.cloud.network.vpc.NetworkACL; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; @RunWith(MockitoJUnitRunner.class) public class KubernetesClusterManagerImplTest { + @Mock + FirewallRulesDao firewallRulesDao; + @Mock VMTemplateDao templateDao; @@ -45,11 +56,84 @@ public class KubernetesClusterManagerImplTest { @Spy @InjectMocks - KubernetesClusterManagerImpl clusterManager; + KubernetesClusterManagerImpl kubernetesClusterManager; + + @Test + public void testValidateVpcTierAllocated() { + Network network = Mockito.mock(Network.class); + Mockito.when(network.getState()).thenReturn(Network.State.Allocated); + kubernetesClusterManager.validateVpcTier(network); + } + + @Test(expected = InvalidParameterValueException.class) + public void testValidateVpcTierDefaultDenyRule() { + Network network = Mockito.mock(Network.class); + Mockito.when(network.getState()).thenReturn(Network.State.Implemented); + Mockito.when(network.getNetworkACLId()).thenReturn(NetworkACL.DEFAULT_DENY); + kubernetesClusterManager.validateVpcTier(network); + } + + @Test + public void testValidateVpcTierValid() { + Network network = Mockito.mock(Network.class); + Mockito.when(network.getState()).thenReturn(Network.State.Implemented); + Mockito.when(network.getNetworkACLId()).thenReturn(NetworkACL.DEFAULT_ALLOW); + kubernetesClusterManager.validateVpcTier(network); + } + + @Test + public void validateIsolatedNetworkIpRulesNoRules() { + long ipId = 1L; + FirewallRule.Purpose purpose = FirewallRule.Purpose.Firewall; + Network network = Mockito.mock(Network.class); + Mockito.when(firewallRulesDao.listByIpAndPurposeAndNotRevoked(ipId, purpose)).thenReturn(new ArrayList<>()); + kubernetesClusterManager.validateIsolatedNetworkIpRules(ipId, FirewallRule.Purpose.Firewall, network, 3); + } + + private FirewallRuleVO createRule(int startPort, int endPort) { + FirewallRuleVO rule = new FirewallRuleVO(null, null, startPort, endPort, "tcp", 1, 1, 1, FirewallRule.Purpose.Firewall, List.of("0.0.0.0/0"), null, null, null, FirewallRule.TrafficType.Ingress); + return rule; + } + + @Test + public void validateIsolatedNetworkIpRulesNoConflictingRules() { + long ipId = 1L; + FirewallRule.Purpose purpose = FirewallRule.Purpose.Firewall; + Network network = Mockito.mock(Network.class); + Mockito.when(firewallRulesDao.listByIpAndPurposeAndNotRevoked(ipId, purpose)).thenReturn(List.of(createRule(80, 80), createRule(443, 443))); + kubernetesClusterManager.validateIsolatedNetworkIpRules(ipId, FirewallRule.Purpose.Firewall, network, 3); + } + + @Test(expected = InvalidParameterValueException.class) + public void validateIsolatedNetworkIpRulesApiConflictingRules() { + long ipId = 1L; + FirewallRule.Purpose purpose = FirewallRule.Purpose.Firewall; + Network network = Mockito.mock(Network.class); + Mockito.when(firewallRulesDao.listByIpAndPurposeAndNotRevoked(ipId, purpose)).thenReturn(List.of(createRule(6440, 6445), createRule(443, 443))); + kubernetesClusterManager.validateIsolatedNetworkIpRules(ipId, FirewallRule.Purpose.Firewall, network, 3); + } + + @Test(expected = InvalidParameterValueException.class) + public void validateIsolatedNetworkIpRulesSshConflictingRules() { + long ipId = 1L; + FirewallRule.Purpose purpose = FirewallRule.Purpose.Firewall; + Network network = Mockito.mock(Network.class); + Mockito.when(firewallRulesDao.listByIpAndPurposeAndNotRevoked(ipId, purpose)).thenReturn(List.of(createRule(2200, KubernetesClusterActionWorker.CLUSTER_NODES_DEFAULT_START_SSH_PORT), createRule(443, 443))); + kubernetesClusterManager.validateIsolatedNetworkIpRules(ipId, FirewallRule.Purpose.Firewall, network, 3); + } + + @Test + public void validateIsolatedNetworkIpRulesNearConflictingRules() { + long ipId = 1L; + FirewallRule.Purpose purpose = FirewallRule.Purpose.Firewall; + Network network = Mockito.mock(Network.class); + Mockito.when(firewallRulesDao.listByIpAndPurposeAndNotRevoked(ipId, purpose)).thenReturn(List.of(createRule(2220, 2221), createRule(2225, 2227), createRule(6440, 6442), createRule(6444, 6446))); + kubernetesClusterManager.validateIsolatedNetworkIpRules(ipId, FirewallRule.Purpose.Firewall, network, 3); + } @Test public void testValidateKubernetesClusterScaleSizeNullNewSizeNoError() { - clusterManager.validateKubernetesClusterScaleSize(Mockito.mock(KubernetesClusterVO.class), null, 100, Mockito.mock(DataCenter.class)); + kubernetesClusterManager.validateKubernetesClusterScaleSize(Mockito.mock(KubernetesClusterVO.class), null, 100, Mockito.mock(DataCenter.class)); } @Test @@ -57,7 +141,7 @@ public class KubernetesClusterManagerImplTest { Long size = 2L; KubernetesClusterVO clusterVO = Mockito.mock(KubernetesClusterVO.class); Mockito.when(clusterVO.getNodeCount()).thenReturn(size); - clusterManager.validateKubernetesClusterScaleSize(clusterVO, size, 100, Mockito.mock(DataCenter.class)); + kubernetesClusterManager.validateKubernetesClusterScaleSize(clusterVO, size, 100, Mockito.mock(DataCenter.class)); } @Test(expected = PermissionDeniedException.class) @@ -66,7 +150,7 @@ public class KubernetesClusterManagerImplTest { KubernetesClusterVO clusterVO = Mockito.mock(KubernetesClusterVO.class); Mockito.when(clusterVO.getNodeCount()).thenReturn(size); Mockito.when(clusterVO.getState()).thenReturn(KubernetesCluster.State.Stopped); - clusterManager.validateKubernetesClusterScaleSize(clusterVO, 3L, 100, Mockito.mock(DataCenter.class)); + kubernetesClusterManager.validateKubernetesClusterScaleSize(clusterVO, 3L, 100, Mockito.mock(DataCenter.class)); } @Test(expected = InvalidParameterValueException.class) @@ -75,7 +159,7 @@ public class KubernetesClusterManagerImplTest { KubernetesClusterVO clusterVO = Mockito.mock(KubernetesClusterVO.class); Mockito.when(clusterVO.getState()).thenReturn(KubernetesCluster.State.Running); Mockito.when(clusterVO.getNodeCount()).thenReturn(size); - clusterManager.validateKubernetesClusterScaleSize(clusterVO, 0L, 100, Mockito.mock(DataCenter.class)); + kubernetesClusterManager.validateKubernetesClusterScaleSize(clusterVO, 0L, 100, Mockito.mock(DataCenter.class)); } @Test(expected = InvalidParameterValueException.class) @@ -83,7 +167,7 @@ public class KubernetesClusterManagerImplTest { KubernetesClusterVO clusterVO = Mockito.mock(KubernetesClusterVO.class); Mockito.when(clusterVO.getState()).thenReturn(KubernetesCluster.State.Running); Mockito.when(clusterVO.getControlNodeCount()).thenReturn(1L); - clusterManager.validateKubernetesClusterScaleSize(clusterVO, 4L, 4, Mockito.mock(DataCenter.class)); + kubernetesClusterManager.validateKubernetesClusterScaleSize(clusterVO, 4L, 4, Mockito.mock(DataCenter.class)); } @Test @@ -92,7 +176,7 @@ public class KubernetesClusterManagerImplTest { Mockito.when(clusterVO.getState()).thenReturn(KubernetesCluster.State.Running); Mockito.when(clusterVO.getControlNodeCount()).thenReturn(1L); Mockito.when(clusterVO.getNodeCount()).thenReturn(4L); - clusterManager.validateKubernetesClusterScaleSize(clusterVO, 2L, 10, Mockito.mock(DataCenter.class)); + kubernetesClusterManager.validateKubernetesClusterScaleSize(clusterVO, 2L, 10, Mockito.mock(DataCenter.class)); } @Test(expected = InvalidParameterValueException.class) @@ -102,7 +186,7 @@ public class KubernetesClusterManagerImplTest { Mockito.when(clusterVO.getControlNodeCount()).thenReturn(1L); Mockito.when(clusterVO.getNodeCount()).thenReturn(2L); Mockito.when(templateDao.findById(Mockito.anyLong())).thenReturn(null); - clusterManager.validateKubernetesClusterScaleSize(clusterVO, 4L, 10, Mockito.mock(DataCenter.class)); + kubernetesClusterManager.validateKubernetesClusterScaleSize(clusterVO, 4L, 10, Mockito.mock(DataCenter.class)); } @Test(expected = InvalidParameterValueException.class) @@ -113,7 +197,7 @@ public class KubernetesClusterManagerImplTest { Mockito.when(clusterVO.getNodeCount()).thenReturn(2L); Mockito.when(templateDao.findById(Mockito.anyLong())).thenReturn(Mockito.mock(VMTemplateVO.class)); Mockito.when(templateJoinDao.newTemplateView(Mockito.any(VMTemplateVO.class), Mockito.anyLong(), Mockito.anyBoolean())).thenReturn(null); - clusterManager.validateKubernetesClusterScaleSize(clusterVO, 4L, 10, Mockito.mock(DataCenter.class)); + kubernetesClusterManager.validateKubernetesClusterScaleSize(clusterVO, 4L, 10, Mockito.mock(DataCenter.class)); } @Test @@ -124,6 +208,6 @@ public class KubernetesClusterManagerImplTest { Mockito.when(clusterVO.getNodeCount()).thenReturn(2L); Mockito.when(templateDao.findById(Mockito.anyLong())).thenReturn(Mockito.mock(VMTemplateVO.class)); Mockito.when(templateJoinDao.newTemplateView(Mockito.any(VMTemplateVO.class), Mockito.anyLong(), Mockito.anyBoolean())).thenReturn(List.of(Mockito.mock(TemplateJoinVO.class))); - clusterManager.validateKubernetesClusterScaleSize(clusterVO, 4L, 10, Mockito.mock(DataCenter.class)); + kubernetesClusterManager.validateKubernetesClusterScaleSize(clusterVO, 4L, 10, Mockito.mock(DataCenter.class)); } -} \ No newline at end of file +} diff --git a/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorkerTest.java b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorkerTest.java new file mode 100644 index 00000000000..1eb55808e09 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/test/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorkerTest.java @@ -0,0 +1,133 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.kubernetes.cluster.actionworkers; + +import java.util.UUID; + +import org.apache.cloudstack.api.ApiConstants; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.kubernetes.cluster.KubernetesCluster; +import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO; +import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; +import com.cloud.kubernetes.cluster.dao.KubernetesClusterDao; +import com.cloud.kubernetes.cluster.dao.KubernetesClusterDetailsDao; +import com.cloud.kubernetes.cluster.dao.KubernetesClusterVmMapDao; +import com.cloud.kubernetes.version.dao.KubernetesSupportedVersionDao; +import com.cloud.network.IpAddress; +import com.cloud.network.Network; +import com.cloud.network.dao.IPAddressDao; +import com.cloud.network.dao.IPAddressVO; + +@RunWith(MockitoJUnitRunner.class) +public class KubernetesClusterActionWorkerTest { + + @Mock + KubernetesClusterDao kubernetesClusterDao; + + @Mock + KubernetesClusterVmMapDao kubernetesClusterVmMapDao; + + @Mock + KubernetesClusterDetailsDao kubernetesClusterDetailsDao; + + @Mock + KubernetesSupportedVersionDao kubernetesSupportedVersionDao; + + @Mock + KubernetesClusterManagerImpl kubernetesClusterManager; + + @Mock + IPAddressDao ipAddressDao; + + KubernetesClusterActionWorker actionWorker = null; + + final static Long DEFAULT_ID = 1L; + + @Before + public void setUp() throws Exception { + kubernetesClusterManager.kubernetesClusterDao = kubernetesClusterDao; + kubernetesClusterManager.kubernetesSupportedVersionDao = kubernetesSupportedVersionDao; + kubernetesClusterManager.kubernetesClusterDetailsDao = kubernetesClusterDetailsDao; + kubernetesClusterManager.kubernetesClusterVmMapDao = kubernetesClusterVmMapDao; + KubernetesCluster kubernetesCluster = Mockito.mock(KubernetesCluster.class); + Mockito.when(kubernetesCluster.getId()).thenReturn(DEFAULT_ID); + actionWorker = new KubernetesClusterActionWorker(kubernetesCluster, kubernetesClusterManager); + actionWorker.ipAddressDao = ipAddressDao; + } + + @Test + public void testGetVpcTierKubernetesPublicIpNullDetail() { + IpAddress result = actionWorker.getVpcTierKubernetesPublicIp(Mockito.mock(Network.class)); + Assert.assertNull(result); + } + + private String mockClusterPublicIpDetail(boolean isNull) { + String uuid = isNull ? null : UUID.randomUUID().toString(); + KubernetesClusterDetailsVO detailsVO = new KubernetesClusterDetailsVO(DEFAULT_ID, ApiConstants.PUBLIC_IP_ID, uuid, false); + Mockito.when(kubernetesClusterDetailsDao.findDetail(DEFAULT_ID, ApiConstants.PUBLIC_IP_ID)).thenReturn(detailsVO); + return uuid; + } + + @Test + public void testGetVpcTierKubernetesPublicIpNullDetailValue() { + mockClusterPublicIpDetail(true); + IpAddress result = actionWorker.getVpcTierKubernetesPublicIp(Mockito.mock(Network.class)); + Assert.assertNull(result); + } + + private Network mockNetworkForGetVpcTierKubernetesPublicIpTest() { + Network network = Mockito.mock(Network.class); + Mockito.when(network.getVpcId()).thenReturn(DEFAULT_ID); + return network; + } + + @Test + public void testGetVpcTierKubernetesPublicIpNullVpc() { + String uuid = mockClusterPublicIpDetail(false); + IPAddressVO address = Mockito.mock(IPAddressVO.class); + Mockito.when(ipAddressDao.findByUuid(uuid)).thenReturn(address); + IpAddress result = actionWorker.getVpcTierKubernetesPublicIp(mockNetworkForGetVpcTierKubernetesPublicIpTest()); + Assert.assertNull(result); + } + + @Test + public void testGetVpcTierKubernetesPublicIpDifferentVpc() { + String uuid = mockClusterPublicIpDetail(false); + IPAddressVO address = Mockito.mock(IPAddressVO.class); + Mockito.when(address.getVpcId()).thenReturn(2L); + Mockito.when(ipAddressDao.findByUuid(uuid)).thenReturn(address); + IpAddress result = actionWorker.getVpcTierKubernetesPublicIp(mockNetworkForGetVpcTierKubernetesPublicIpTest()); + Assert.assertNull(result); + } + + @Test + public void testGetVpcTierKubernetesPublicIpValid() { + String uuid = mockClusterPublicIpDetail(false); + IPAddressVO address = Mockito.mock(IPAddressVO.class); + Mockito.when(address.getVpcId()).thenReturn(DEFAULT_ID); + Mockito.when(ipAddressDao.findByUuid(uuid)).thenReturn(address); + IpAddress result = actionWorker.getVpcTierKubernetesPublicIp(mockNetworkForGetVpcTierKubernetesPublicIpTest()); + Assert.assertNotNull(result); + } +} diff --git a/server/src/main/java/com/cloud/network/vpc/NetworkACLServiceImpl.java b/server/src/main/java/com/cloud/network/vpc/NetworkACLServiceImpl.java index 45a39db96a4..ed37eb5b375 100644 --- a/server/src/main/java/com/cloud/network/vpc/NetworkACLServiceImpl.java +++ b/server/src/main/java/com/cloud/network/vpc/NetworkACLServiceImpl.java @@ -977,6 +977,15 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ } } + @Override + public NetworkACLItem moveRuleToTheTopInACLList(NetworkACLItem ruleBeingMoved) { + List allRules = getAllAclRulesSortedByNumber(ruleBeingMoved.getAclId()); + if (allRules.size() == 1) { + return ruleBeingMoved; + } + return moveRuleToTheTop(ruleBeingMoved, allRules); + } + /** * Validates the consistency of the ACL; the validation process is the following. *
    @@ -1028,7 +1037,7 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ /** * Moves an ACL to the space between to other rules. If there is already enough room to accommodate the ACL rule being moved, we simply get the 'number' field from the previous ACL rule and add one, and then define this new value as the 'number' value for the ACL rule being moved. - * Otherwise, we will need to make room. This process is executed via {@link #updateAclRuleToNewPositionAndExecuteShiftIfNecessary(NetworkACLItemVO, int, List, int)}, which will create the space between ACL rules if necessary. This involves shifting ACL rules to accommodate the rule being moved. + * Otherwise, we will need to make room. This process is executed via {@link #updateAclRuleToNewPositionAndExecuteShiftIfNecessary(NetworkACLItem, int, List, int)}, which will create the space between ACL rules if necessary. This involves shifting ACL rules to accommodate the rule being moved. */ protected NetworkACLItem moveRuleBetweenAclRules(NetworkACLItemVO ruleBeingMoved, List allAclRules, NetworkACLItemVO previousRule, NetworkACLItemVO nextRule) { if (previousRule.getNumber() + 1 != nextRule.getNumber()) { @@ -1070,7 +1079,7 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ * Move the rule to the top of the ACL rule list. This means that the ACL rule being moved will receive the position '1'. * Also, if necessary other ACL rules will have their 'number' field updated to create room for the new top rule. */ - protected NetworkACLItem moveRuleToTheTop(NetworkACLItemVO ruleBeingMoved, List allAclRules) { + protected NetworkACLItem moveRuleToTheTop(NetworkACLItem ruleBeingMoved, List allAclRules) { return updateAclRuleToNewPositionAndExecuteShiftIfNecessary(ruleBeingMoved, 1, allAclRules, 0); } @@ -1092,9 +1101,8 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ *
  • ACL C - number 4 *
*/ - protected NetworkACLItem updateAclRuleToNewPositionAndExecuteShiftIfNecessary(NetworkACLItemVO ruleBeingMoved, int newNumberFieldValue, List allAclRules, + protected NetworkACLItem updateAclRuleToNewPositionAndExecuteShiftIfNecessary(NetworkACLItem ruleBeingMoved, int newNumberFieldValue, List allAclRules, int indexToStartProcessing) { - ruleBeingMoved.setNumber(newNumberFieldValue); for (int i = indexToStartProcessing; i < allAclRules.size(); i++) { NetworkACLItemVO networkACLItemVO = allAclRules.get(i); if (networkACLItemVO.getId() == ruleBeingMoved.getId()) { @@ -1151,4 +1159,4 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ Account caller = CallContext.current().getCallingAccount(); _accountMgr.checkAccess(caller, null, true, vpc); } -} \ No newline at end of file +} diff --git a/server/src/test/java/com/cloud/network/vpc/NetworkACLServiceImplTest.java b/server/src/test/java/com/cloud/network/vpc/NetworkACLServiceImplTest.java index ecbf983f65b..f5ef1ee98b1 100644 --- a/server/src/test/java/com/cloud/network/vpc/NetworkACLServiceImplTest.java +++ b/server/src/test/java/com/cloud/network/vpc/NetworkACLServiceImplTest.java @@ -26,7 +26,9 @@ import static org.mockito.Mockito.times; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ServerApiException; @@ -1264,6 +1266,8 @@ public class NetworkACLServiceImplTest { @Test public void updateAclRuleToNewPositionAndExecuteShiftIfNecessaryTest() { Mockito.when(previousAclRuleMock.getNumber()).thenReturn(10); + Mockito.when(previousAclRuleMock.getId()).thenReturn(50l); + Mockito.when(nextAclRuleMock.getNumber()).thenReturn(11); Mockito.when(nextAclRuleMock.getId()).thenReturn(50l); @@ -1290,18 +1294,33 @@ public class NetworkACLServiceImplTest { allAclRules.add(aclRuleBeingMovedMock); Mockito.doNothing().when(networkAclItemDaoMock).updateNumberFieldNetworkItem(Mockito.anyLong(), Mockito.anyInt()); - Mockito.doReturn(null).when(networkAclItemDaoMock).findById(Mockito.anyLong()); - networkAclServiceImpl.updateAclRuleToNewPositionAndExecuteShiftIfNecessary(aclRuleBeingMovedMock, 11, allAclRules, 1); + Map updatedItems = new HashMap<>(); + Mockito.doAnswer((Answer) invocation -> { + Long id = (Long)invocation.getArguments()[0]; + int position = (int)invocation.getArguments()[1]; + NetworkACLItemVO item = new NetworkACLItemVO(); + item.setNumber(position); + updatedItems.put(id, item); + return null; + }).when(networkAclItemDaoMock).updateNumberFieldNetworkItem(Mockito.anyLong(), Mockito.anyInt()); + Mockito.doAnswer((Answer) invocation -> { + Long id = (Long)invocation.getArguments()[0]; + return updatedItems.get(id); + }).when(networkAclItemDaoMock).findById(Mockito.anyLong()); - Mockito.verify(aclRuleBeingMovedMock).setNumber(11); - Mockito.verify(nextAclRuleMock).setNumber(12); + NetworkACLItem result = networkAclServiceImpl.updateAclRuleToNewPositionAndExecuteShiftIfNecessary(aclRuleBeingMovedMock, 11, allAclRules, 1); + + Assert.assertNotNull(result); + Assert.assertEquals(11, result.getNumber()); + Assert.assertEquals(11, updatedItems.get(aclRuleBeingMovedMock.getId()).getNumber()); + Assert.assertEquals(12, updatedItems.get(nextAclRuleMock.getId()).getNumber()); Mockito.verify(networkAclItemDaoMock).updateNumberFieldNetworkItem(1l, 11); Mockito.verify(networkAclItemDaoMock).updateNumberFieldNetworkItem(50l, 12); - Assert.assertEquals(13, networkACLItemVO12.getNumber()); - Assert.assertEquals(14, networkACLItemVO13.getNumber()); - Assert.assertEquals(15, networkACLItemVO14.getNumber()); + Assert.assertEquals(networkACLItemVO12.getNumber() + 1, updatedItems.get(networkACLItemVO12.getId()).getNumber()); + Assert.assertEquals(networkACLItemVO13.getNumber() + 1, updatedItems.get(networkACLItemVO13.getId()).getNumber()); + Assert.assertEquals(networkACLItemVO14.getNumber() + 1, updatedItems.get(networkACLItemVO14.getId()).getNumber()); } @Test diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index 59896cfe480..c227e7fbfa7 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -14,7 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -""" Tests for Kubernetes supported version """ +""" Tests for Kubernetes cluster """ #Import Local Modules from marvin.cloudstackTestCase import cloudstackTestCase @@ -37,11 +37,16 @@ from marvin.cloudstackAPI import (listInfrastructure, from marvin.cloudstackException import CloudstackAPIException from marvin.codes import PASS, FAILED from marvin.lib.base import (Template, + NetworkOffering, Network, ServiceOffering, Account, StoragePool, - Configurations) + Configurations, + VpcOffering, + VPC, + NetworkACLList, + NetworkACL) from marvin.lib.utils import (cleanup_resources, validateList, random_gen) @@ -57,6 +62,11 @@ import time, io, yaml _multiprocess_shared_ = True k8s_cluster = None +VPC_DATA = { + "cidr": "10.1.0.0/22", + "tier1_gateway": "10.1.1.1", + "tier_netmask": "255.255.255.0" +} class TestKubernetesCluster(cloudstackTestCase): @@ -75,6 +85,7 @@ class TestKubernetesCluster(cloudstackTestCase): cls.setup_failed = False cls._cleanup = [] cls.kubernetes_version_ids = [] + cls.vpcAllowAllAclDetailsMap = {} if cls.hypervisorNotSupported == False: cls.endpoint_url = Configurations.list(cls.apiclient, name="endpoint.url")[0].value @@ -342,11 +353,7 @@ class TestKubernetesCluster(cloudstackTestCase): return def tearDown(self): - try: - cleanup_resources(self.apiclient, self.cleanup) - except Exception as e: - raise Exception("Warning: Exception during cleanup : %s" % e) - return + super(TestKubernetesCluster, self).tearDown() @attr(tags=["advanced", "smoke"], required_hardware="true") @skipTestIf("hypervisorNotSupported") @@ -449,7 +456,7 @@ class TestKubernetesCluster(cloudstackTestCase): self.debug("Autoscaling Kubernetes cluster with ID: %s" % k8s_cluster.id) try: k8s_cluster = self.autoscaleKubernetesCluster(k8s_cluster.id, 1, 2) - self.verifyKubernetesClusterAutocale(k8s_cluster, 1, 2) + self.verifyKubernetesClusterAutoscale(k8s_cluster, 1, 2) up = self.waitForAutoscalerPodInRunningState(k8s_cluster.id) self.assertTrue(up, "Autoscaler pod failed to run") @@ -576,6 +583,33 @@ class TestKubernetesCluster(cloudstackTestCase): self.debug("Deleting Kubernetes cluster with ID: %s" % k8s_cluster.id) return + @attr(tags=["advanced", "smoke"], required_hardware="true") + @skipTestIf("hypervisorNotSupported") + def test_10_vpc_tier_kubernetes_cluster(self): + """Test to deploy a Kubernetes cluster on VPC + + # Validate the following: + # 1. Deploy a Kubernetes cluster on a VPC tier + # 2. Destroy it + """ + if self.setup_failed == True: + self.fail("Setup incomplete") + global k8s_cluster + if k8s_cluster != None and k8s_cluster.id != None: + self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) + self.createVpcOffering() + self.createVpcTierOffering() + self.deployVpc() + self.deployNetworkTier() + self.default_network = self.vpc_tier + k8s_cluster = self.getValidKubernetesCluster(1, 1) + + self.debug("Deleting Kubernetes cluster with ID: %s" % k8s_cluster.id) + self.deleteKubernetesClusterAndVerify(k8s_cluster.id) + self.debug("Kubernetes cluster with ID: %s successfully deleted" % k8s_cluster.id) + k8s_cluster = None + return + def createKubernetesCluster(self, name, version_id, size=1, control_nodes=1): createKubernetesClusterCmd = createKubernetesCluster.createKubernetesClusterCmd() createKubernetesClusterCmd.name = name @@ -783,7 +817,7 @@ class TestKubernetesCluster(cloudstackTestCase): self.verifyKubernetesClusterState(cluster_response, 'Running') self.verifyKubernetesClusterSize(cluster_response, size, control_nodes) - def verifyKubernetesClusterAutocale(self, cluster_response, minsize, maxsize): + def verifyKubernetesClusterAutoscale(self, cluster_response, minsize, maxsize): """Check if Kubernetes cluster state and node sizes are valid after upgrade""" self.verifyKubernetesClusterState(cluster_response, 'Running') @@ -816,3 +850,91 @@ class TestKubernetesCluster(cloudstackTestCase): 'Stopped', "KubernetesCluster not stopped in DB, {}".format(db_cluster_state) ) + + def createVpcOffering(self): + off_service = self.services["vpc_offering"] + self.vpc_offering = VpcOffering.create( + self.apiclient, + off_service + ) + self.cleanup.append(self.vpc_offering) + self.vpc_offering.update(self.apiclient, state='Enabled') + + def createVpcTierOffering(self): + off_service = self.services["nw_offering_isolated_vpc"] + self.vpc_tier_offering = NetworkOffering.create( + self.apiclient, + off_service, + conservemode=False + ) + self.cleanup.append(self.vpc_tier_offering) + self.vpc_tier_offering.update(self.apiclient, state='Enabled') + + def deployAllowEgressDenyIngressVpcInternal(self, cidr): + service = self.services["vpc"] + service["cidr"] = cidr + vpc = VPC.create( + self.apiclient, + service, + vpcofferingid=self.vpc_offering.id, + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid + ) + self.cleanup.append(vpc) + acl = NetworkACLList.create( + self.apiclient, + services={}, + name="allowegressdenyingress", + description="allowegressdenyingress", + vpcid=vpc.id + ) + rule ={ + "protocol": "all", + "traffictype": "egress", + } + NetworkACL.create(self.apiclient, + services=rule, + aclid=acl.id + ) + rule["traffictype"] = "ingress" + rule["action"] = "deny" + NetworkACL.create(self.apiclient, + services=rule, + aclid=acl.id + ) + self.vpcAllowAllAclDetailsMap[vpc.id] = acl.id + return vpc + + def deployVpc(self): + self.vpc = self.deployAllowEgressDenyIngressVpcInternal(VPC_DATA["cidr"]) + + def deployNetworkTierInternal(self, network_offering_id, vpc_id, tier_gateway, tier_netmask, acl_id=None, tier_name=None): + if not acl_id and vpc_id in self.vpcAllowAllAclDetailsMap: + acl_id = self.vpcAllowAllAclDetailsMap[vpc_id] + service = self.services["ntwk"] + if tier_name: + service["name"] = tier_name + service["displaytext"] = "vpc-%s" % tier_name + network = Network.create( + self.apiclient, + service, + self.account.name, + self.account.domainid, + networkofferingid=network_offering_id, + vpcid=vpc_id, + zoneid=self.zone.id, + gateway=tier_gateway, + netmask=tier_netmask, + aclid=acl_id + ) + self.cleanup.append(network) + return network + + def deployNetworkTier(self): + self.vpc_tier = self.deployNetworkTierInternal( + self.vpc_tier_offering.id, + self.vpc.id, + VPC_DATA["tier1_gateway"], + VPC_DATA["tier_netmask"] + )