CKS Enhancements - Phase 1

* Ability to specify different compute or service offerings for different types of CKS cluster nodes – worker, master or etcd

* Ability to use CKS ready custom templates for CKS cluster nodes

---------

Co-authored-by: Pearl Dsilva <pearl1594@gmail.com>
This commit is contained in:
Nicolas Vazquez 2024-03-07 09:37:22 -03:00 committed by nvazquez
parent 21af134087
commit 4982b1b059
No known key found for this signature in database
GPG Key ID: 656E1BCC8CB54F84
52 changed files with 2598 additions and 169 deletions

View File

@ -19,8 +19,17 @@ package com.cloud.kubernetes.cluster;
import com.cloud.utils.component.Adapter;
import org.apache.cloudstack.acl.ControlledEntity;
import java.util.Map;
public interface KubernetesClusterHelper extends Adapter {
enum KubernetesClusterNodeType {
CONTROL, WORKER, ETCD, DEFAULT
}
ControlledEntity findByUuid(String uuid);
ControlledEntity findByVmId(long vmId);
boolean isValidNodeType(String nodeType);
Map<String, Long> getServiceOfferingNodeTypeMap(Map<String, Map<String, String>> serviceOfferingNodeTypeMap);
Map<String, Long> getTemplateNodeTypeMap(Map<String, Map<String, String>> templateNodeTypeMap);
}

View File

@ -144,6 +144,8 @@ public interface VirtualMachineTemplate extends ControlledEntity, Identity, Inte
boolean isDeployAsIs();
boolean isForCks();
Long getUserDataId();
UserData.UserDataOverridePolicy getUserDataOverridePolicy();

View File

@ -88,6 +88,9 @@ public interface VmDetailConstants {
String DEPLOY_AS_IS_CONFIGURATION = "configurationId";
String KEY_PAIR_NAMES = "keypairnames";
String CKS_CONTROL_NODE_LOGIN_USER = "controlNodeLoginUser";
String CKS_NODE_TYPE = "node";
String OFFERING = "offering";
String TEMPLATE = "template";
// VMware to KVM VM migrations specific
String VMWARE_TO_KVM_PREFIX = "vmware-to-kvm";

View File

@ -496,6 +496,12 @@ public class ApiConstants {
public static final String VLAN = "vlan";
public static final String VLAN_RANGE = "vlanrange";
public static final String WORKER_SERVICE_OFFERING_ID = "workerofferingid";
public static final String WORKER_SERVICE_OFFERING_NAME = "workerofferingname";
public static final String CONTROL_SERVICE_OFFERING_ID = "controlofferingid";
public static final String CONTROL_SERVICE_OFFERING_NAME = "controlofferingname";
public static final String ETCD_SERVICE_OFFERING_ID = "etcdofferingid";
public static final String ETCD_SERVICE_OFFERING_NAME = "etcdofferingname";
public static final String REMOVE_VLAN = "removevlan";
public static final String VLAN_ID = "vlanid";
public static final String ISOLATED_PVLAN = "isolatedpvlan";
@ -837,6 +843,7 @@ public class ApiConstants {
public static final String SPLIT_CONNECTIONS = "splitconnections";
public static final String FOR_VPC = "forvpc";
public static final String FOR_NSX = "fornsx";
public static final String FOR_CKS = "forcks";
public static final String NSX_SUPPORT_LB = "nsxsupportlb";
public static final String NSX_SUPPORTS_INTERNAL_LB = "nsxsupportsinternallb";
public static final String FOR_TUNGSTEN = "fortungsten";
@ -1043,6 +1050,7 @@ public class ApiConstants {
public static final String MASTER_NODES = "masternodes";
public static final String NODE_IDS = "nodeids";
public static final String CONTROL_NODES = "controlnodes";
public static final String ETCD_NODES = "etcdnodes";
public static final String MIN_SEMANTIC_VERSION = "minimumsemanticversion";
public static final String MIN_KUBERNETES_VERSION_ID = "minimumkubernetesversionid";
public static final String NODE_ROOT_DISK_SIZE = "noderootdisksize";
@ -1051,6 +1059,8 @@ public class ApiConstants {
public static final String AUTOSCALING_ENABLED = "autoscalingenabled";
public static final String MIN_SIZE = "minsize";
public static final String MAX_SIZE = "maxsize";
public static final String NODE_TYPE_OFFERING_MAP = "nodeofferings";
public static final String NODE_TYPE_TEMPLATE_MAP = "nodetemplates";
public static final String BOOT_TYPE = "boottype";
public static final String BOOT_MODE = "bootmode";

View File

@ -93,6 +93,11 @@ public class GetUploadParamsForTemplateCmd extends AbstractGetUploadParamsCmd {
description = "(VMware only) true if VM deployments should preserve all the configurations defined for this template", since = "4.15.1")
private Boolean deployAsIs;
@Parameter(name=ApiConstants.FOR_CKS,
type = CommandType.BOOLEAN,
description = "if true, the templates would be available for deploying CKS clusters", since = "4.20.0")
protected Boolean forCks;
public String getDisplayText() {
return StringUtils.isBlank(displayText) ? getName() : displayText;
}
@ -162,6 +167,10 @@ public class GetUploadParamsForTemplateCmd extends AbstractGetUploadParamsCmd {
Boolean.TRUE.equals(deployAsIs);
}
public boolean isForCks() {
return Boolean.TRUE.equals(forCks);
}
@Override
public void execute() throws ServerApiException {
validateRequest();

View File

@ -104,6 +104,11 @@ public class ListTemplatesCmd extends BaseListTaggedResourcesCmd implements User
since = "4.19.0")
private Boolean isVnf;
@Parameter(name = ApiConstants.FOR_CKS, type = CommandType.BOOLEAN,
description = "list templates that can be used to deploy CKS clusters",
since = "4.20.0")
private Boolean forCks;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -191,6 +196,8 @@ public class ListTemplatesCmd extends BaseListTaggedResourcesCmd implements User
return isVnf;
}
public Boolean getForCks() { return forCks; }
@Override
public String getCommandName() {
return s_name;

View File

@ -167,6 +167,11 @@ public class RegisterTemplateCmd extends BaseCmd implements UserCmd {
description = "(VMware only) true if VM deployments should preserve all the configurations defined for this template", since = "4.15.1")
protected Boolean deployAsIs;
@Parameter(name=ApiConstants.FOR_CKS,
type = CommandType.BOOLEAN,
description = "if true, the templates would be available for deploying CKS clusters", since = "4.20.0")
protected Boolean forCks;
@Parameter(name = ApiConstants.TEMPLATE_TYPE, type = CommandType.STRING,
description = "the type of the template. Valid options are: USER/VNF (for all users) and SYSTEM/ROUTING/BUILTIN (for admins only).",
since = "4.19.0")
@ -289,6 +294,10 @@ public class RegisterTemplateCmd extends BaseCmd implements UserCmd {
Boolean.TRUE.equals(deployAsIs);
}
public boolean isForCks() {
return Boolean.TRUE.equals(forCks);
}
public String getTemplateType() {
return templateType;
}

View File

@ -200,6 +200,11 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements
since = "4.15")
private Boolean deployAsIs;
@SerializedName(ApiConstants.FOR_CKS)
@Param(description = "If true it indicates that the template can be used for CKS cluster deployments",
since = "4.20")
private Boolean forCks;
@SerializedName(ApiConstants.DEPLOY_AS_IS_DETAILS)
@Param(description = "VMware only: additional key/value details tied with deploy-as-is template",
since = "4.15")
@ -440,6 +445,10 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements
this.deployAsIs = deployAsIs;
}
public void setForCks(Boolean forCks) {
this.forCks = forCks;
}
public void setParentTemplateId(String parentTemplateId) {
this.parentTemplateId = parentTemplateId;
}

View File

@ -160,6 +160,9 @@ public class VMTemplateVO implements VirtualMachineTemplate {
@Column(name = "deploy_as_is")
private boolean deployAsIs;
@Column(name = "for_cks")
private boolean forCks;
@Column(name = "user_data_id")
private Long userDataId;
@ -655,6 +658,14 @@ public class VMTemplateVO implements VirtualMachineTemplate {
this.deployAsIs = deployAsIs;
}
public boolean isForCks() {
return forCks;
}
public void setForCks(boolean forCks) {
this.forCks = forCks;
}
@Override
public Long getUserDataId() {
return userDataId;

View File

@ -323,3 +323,23 @@ CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.quarantined_ips', 'remover_account_i
-- Explicitly add support for VMware 8.0b (8.0.0.2), 8.0c (8.0.0.3)
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities` (uuid, hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) values (UUID(), 'VMware', '8.0.0.2', 1024, 0, 59, 64, 1, 1);
INSERT IGNORE INTO `cloud`.`hypervisor_capabilities` (uuid, hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, max_hosts_per_cluster, storage_motion_supported, vm_snapshot_enabled) values (UUID(), 'VMware', '8.0.0.3', 1024, 0, 59, 64, 1, 1);
-- Add for_cks column to the vm_template table
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.vm_template','for_cks', 'int(1) unsigned DEFAULT "0" COMMENT "if true, the template can be used for CKS cluster deployment"');
-- Add support for different node types service offerings on CKS clusters
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster','control_service_offering_id', 'bigint unsigned COMMENT "service offering ID for Control Node(s)"');
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster','worker_service_offering_id', 'bigint unsigned COMMENT "service offering ID for Worker Node(s)"');
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster','etcd_service_offering_id', 'bigint unsigned COMMENT "service offering ID for etcd Nodes"');
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster','etcd_node_count', 'bigint unsigned COMMENT "number of etcd nodes to be deployed for the Kubernetes cluster"');
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster','control_template_id', 'bigint unsigned COMMENT "template id to be used for Control Node(s)"');
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster','worker_template_id', 'bigint unsigned COMMENT "template id to be used for Worker Node(s)"');
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster','etcd_template_id', 'bigint unsigned COMMENT "template id to be used for etcd Nodes"');
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.kubernetes_cluster_vm_map','etcd_node', 'tinyint(1) unsigned NOT NULL DEFAULT 0 COMMENT "indicates if the VM is an etcd node"');
ALTER TABLE `cloud`.`kubernetes_cluster` ADD CONSTRAINT `fk_cluster__control_service_offering_id` FOREIGN KEY `fk_cluster__control_service_offering_id`(`control_service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE;
ALTER TABLE `cloud`.`kubernetes_cluster` ADD CONSTRAINT `fk_cluster__worker_service_offering_id` FOREIGN KEY `fk_cluster__worker_service_offering_id`(`worker_service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE;
ALTER TABLE `cloud`.`kubernetes_cluster` ADD CONSTRAINT `fk_cluster__etcd_service_offering_id` FOREIGN KEY `fk_cluster__etcd_service_offering_id`(`etcd_service_offering_id`) REFERENCES `service_offering`(`id`) ON DELETE CASCADE;
ALTER TABLE `cloud`.`kubernetes_cluster` ADD CONSTRAINT `fk_cluster__control_template_id` FOREIGN KEY `fk_cluster__control_template_id`(`control_template_id`) REFERENCES `vm_template`(`id`) ON DELETE CASCADE;
ALTER TABLE `cloud`.`kubernetes_cluster` ADD CONSTRAINT `fk_cluster__worker_template_id` FOREIGN KEY `fk_cluster__worker_template_id`(`worker_template_id`) REFERENCES `vm_template`(`id`) ON DELETE CASCADE;
ALTER TABLE `cloud`.`kubernetes_cluster` ADD CONSTRAINT `fk_cluster__etcd_template_id` FOREIGN KEY `fk_cluster__etcd_template_id`(`etcd_template_id`) REFERENCES `vm_template`(`id`) ON DELETE CASCADE;

View File

@ -99,6 +99,7 @@ SELECT
IFNULL(`data_center`.`id`, 0)) AS `temp_zone_pair`,
`vm_template`.`direct_download` AS `direct_download`,
`vm_template`.`deploy_as_is` AS `deploy_as_is`,
`vm_template`.`for_cks` AS `for_cks`,
`user_data`.`id` AS `user_data_id`,
`user_data`.`uuid` AS `user_data_uuid`,
`user_data`.`name` AS `user_data_name`,

View File

@ -417,6 +417,11 @@ public class TemplateObject implements TemplateInfo {
return this.imageVO.isDeployAsIs();
}
@Override
public boolean isForCks() {
return imageVO.isForCks();
}
public void setInstallPath(String installPath) {
this.installPath = installPath;
}

View File

@ -142,4 +142,11 @@ public interface KubernetesCluster extends ControlledEntity, com.cloud.utils.fsm
Long getMaxSize();
Long getSecurityGroupId();
ClusterType getClusterType();
Long getControlServiceOfferingId();
Long getWorkerServiceOfferingId();
Long getEtcdServiceOfferingId();
Long getControlTemplateId();
Long getWorkerTemplateId();
Long getEtcdTemplateId();
Long getEtcdNodeCount();
}

View File

@ -16,24 +16,41 @@
// under the License.
package com.cloud.kubernetes.cluster;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.kubernetes.cluster.dao.KubernetesClusterDao;
import com.cloud.kubernetes.cluster.dao.KubernetesClusterVmMapDao;
import com.cloud.offering.ServiceOffering;
import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.utils.component.AdapterBase;
import com.cloud.vm.VmDetailConstants;
import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import javax.inject.Inject;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
@Component
public class KubernetesClusterHelperImpl extends AdapterBase implements KubernetesClusterHelper, Configurable {
public static final Logger LOGGER = Logger.getLogger(KubernetesClusterHelperImpl.class.getName());
@Inject
private KubernetesClusterDao kubernetesClusterDao;
@Inject
private KubernetesClusterVmMapDao kubernetesClusterVmMapDao;
@Inject
protected ServiceOfferingDao serviceOfferingDao;
@Inject
protected VMTemplateDao vmTemplateDao;
@Override
public ControlledEntity findByUuid(String uuid) {
@ -49,6 +66,127 @@ public class KubernetesClusterHelperImpl extends AdapterBase implements Kubernet
return kubernetesClusterDao.findById(clusterVmMapVO.getClusterId());
}
@Override
public boolean isValidNodeType(String nodeType) {
if (StringUtils.isBlank(nodeType)) {
return false;
}
try {
KubernetesClusterNodeType.valueOf(nodeType.toUpperCase());
return true;
} catch (IllegalArgumentException e) {
return false;
}
}
protected void checkNodeTypeOfferingEntryCompleteness(String nodeTypeStr, String serviceOfferingUuid) {
if (StringUtils.isAnyEmpty(nodeTypeStr, serviceOfferingUuid)) {
String error = String.format("Incomplete Node Type to Service Offering ID mapping: '%s' -> '%s'", nodeTypeStr, serviceOfferingUuid);
LOGGER.error(error);
throw new InvalidParameterValueException(error);
}
}
protected void checkNodeTypeTemplateEntryCompleteness(String nodeTypeStr, String templateUuid) {
if (StringUtils.isAnyEmpty(nodeTypeStr, templateUuid)) {
String error = String.format("Incomplete Node Type to template ID mapping: '%s' -> '%s'", nodeTypeStr, templateUuid);
LOGGER.error(error);
throw new InvalidParameterValueException(error);
}
}
protected void checkNodeTypeOfferingEntryValues(String nodeTypeStr, ServiceOffering serviceOffering, String serviceOfferingUuid) {
if (!isValidNodeType(nodeTypeStr)) {
String error = String.format("The provided value '%s' for Node Type is invalid", nodeTypeStr);
LOGGER.error(error);
throw new InvalidParameterValueException(String.format(error));
}
if (serviceOffering == null) {
String error = String.format("Cannot find a service offering with ID %s", serviceOfferingUuid);
LOGGER.error(error);
throw new InvalidParameterValueException(error);
}
}
protected void checkNodeTypeTemplateEntryValues(String nodeTypeStr, VMTemplateVO template, String templateUuid) {
if (!isValidNodeType(nodeTypeStr)) {
String error = String.format("The provided value '%s' for Node Type is invalid", nodeTypeStr);
LOGGER.error(error);
throw new InvalidParameterValueException(String.format(error));
}
if (template == null) {
String error = String.format("Cannot find a template with ID %s", templateUuid);
LOGGER.error(error);
throw new InvalidParameterValueException(error);
}
}
protected void addNodeTypeOfferingEntry(String nodeTypeStr, String serviceOfferingUuid, ServiceOffering serviceOffering, Map<String, Long> mapping) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Node Type: '%s' should use Service Offering ID: '%s'", nodeTypeStr, serviceOfferingUuid));
}
KubernetesClusterNodeType nodeType = KubernetesClusterNodeType.valueOf(nodeTypeStr.toUpperCase());
mapping.put(nodeType.name(), serviceOffering.getId());
}
protected void addNodeTypeTemplateEntry(String nodeTypeStr, String templateUuid, VMTemplateVO template, Map<String, Long> mapping) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Node Type: '%s' should use template ID: '%s'", nodeTypeStr, templateUuid));
}
KubernetesClusterNodeType nodeType = KubernetesClusterNodeType.valueOf(nodeTypeStr.toUpperCase());
mapping.put(nodeType.name(), template.getId());
}
protected void processNodeTypeOfferingEntryAndAddToMappingIfValid(Map<String, String> entry, Map<String, Long> mapping) {
if (MapUtils.isEmpty(entry)) {
return;
}
String nodeTypeStr = entry.get(VmDetailConstants.CKS_NODE_TYPE);
String serviceOfferingUuid = entry.get(VmDetailConstants.OFFERING);
checkNodeTypeOfferingEntryCompleteness(nodeTypeStr, serviceOfferingUuid);
ServiceOffering serviceOffering = serviceOfferingDao.findByUuid(serviceOfferingUuid);
checkNodeTypeOfferingEntryValues(nodeTypeStr, serviceOffering, serviceOfferingUuid);
addNodeTypeOfferingEntry(nodeTypeStr, serviceOfferingUuid, serviceOffering, mapping);
}
protected void processNodeTypeTemplateEntryAndAddToMappingIfValid(Map<String, String> entry, Map<String, Long> mapping) {
if (MapUtils.isEmpty(entry)) {
return;
}
String nodeTypeStr = entry.get(VmDetailConstants.CKS_NODE_TYPE);
String templateUuid = entry.get(VmDetailConstants.TEMPLATE);
checkNodeTypeTemplateEntryCompleteness(nodeTypeStr, templateUuid);
VMTemplateVO template = vmTemplateDao.findByUuid(templateUuid);
checkNodeTypeTemplateEntryValues(nodeTypeStr, template, templateUuid);
addNodeTypeTemplateEntry(nodeTypeStr, templateUuid, template, mapping);
}
@Override
public Map<String, Long> getServiceOfferingNodeTypeMap(Map<String, Map<String, String>> serviceOfferingNodeTypeMap) {
Map<String, Long> mapping = new HashMap<>();
if (MapUtils.isNotEmpty(serviceOfferingNodeTypeMap)) {
for (Map<String, String> entry : serviceOfferingNodeTypeMap.values()) {
processNodeTypeOfferingEntryAndAddToMappingIfValid(entry, mapping);
}
}
return mapping;
}
@Override
public Map<String, Long> getTemplateNodeTypeMap(Map<String, Map<String, String>> templateNodeTypeMap) {
Map<String, Long> mapping = new HashMap<>();
if (MapUtils.isNotEmpty(templateNodeTypeMap)) {
for (Map<String, String> entry : templateNodeTypeMap.values()) {
processNodeTypeTemplateEntryAndAddToMappingIfValid(entry, mapping);
}
}
return mapping;
}
@Override
public String getConfigComponentName() {
return KubernetesClusterHelper.class.getSimpleName();

View File

@ -16,6 +16,10 @@
// under the License.
package com.cloud.kubernetes.cluster;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.CONTROL;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.ETCD;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.WORKER;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.DEFAULT;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
import static com.cloud.vm.UserVmManager.AllowUserExpungeRecoverVm;
@ -40,6 +44,9 @@ import java.util.concurrent.TimeUnit;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType;
import com.cloud.network.dao.NsxProviderDao;
import com.cloud.network.element.NsxProviderVO;
import com.cloud.uservm.UserVm;
import com.cloud.vm.UserVmService;
import org.apache.cloudstack.acl.ControlledEntity;
@ -72,6 +79,7 @@ import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang3.StringUtils;
import com.cloud.api.ApiDBUtils;
@ -190,6 +198,8 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
protected StateMachine2<KubernetesCluster.State, KubernetesCluster.Event, KubernetesCluster> _stateMachine = KubernetesCluster.State.getStateMachine();
protected final static List<String> CLUSTER_NODES_TYPES_LIST = Arrays.asList(WORKER.name(), CONTROL.name(), ETCD.name());
ScheduledExecutorService _gcExecutor;
ScheduledExecutorService _stateScanner;
@ -259,6 +269,8 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
public SecurityGroupService securityGroupService;
@Inject
public NetworkHelper networkHelper;
@Inject
private NsxProviderDao nsxProviderDao;
@Inject
private UserVmService userVmService;
@ -364,16 +376,32 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
return null;
}
public VMTemplateVO getKubernetesServiceTemplate(DataCenter dataCenter, Hypervisor.HypervisorType hypervisorType) {
public VMTemplateVO getKubernetesServiceTemplate(DataCenter dataCenter, Hypervisor.HypervisorType hypervisorType, Map<String, Long> templateNodeTypeMap, KubernetesClusterNodeType nodeType) {
VMTemplateVO template = templateDao.findSystemVMReadyTemplate(dataCenter.getId(), hypervisorType);
if (DataCenter.Type.Edge.equals(dataCenter.getType()) && template != null && !template.isDirectDownload()) {
logger.debug(String.format("Template %s can not be used for edge zone %s", template, dataCenter));
template = templateDao.findRoutingTemplate(hypervisorType, networkHelper.getHypervisorRouterTemplateConfigMap().get(hypervisorType).valueIn(dataCenter.getId()));
}
if (template == null) {
throw new CloudRuntimeException("Not able to find the System or Routing template in ready state for the zone " + dataCenter.getUuid());
switch (nodeType) {
case CONTROL:
case ETCD:
case WORKER:
VMTemplateVO nodeTemplate = Objects.nonNull(templateNodeTypeMap) ? templateDao.findById(templateNodeTypeMap.getOrDefault(nodeType.name(), 0L)) : template;
template = Objects.nonNull(nodeTemplate) ? nodeTemplate : template;
if (Objects.isNull(template)) {
throwDefaultCksTemplateNotFound(dataCenter.getUuid());
}
return template;
default:
if (Objects.isNull(template)) {
throwDefaultCksTemplateNotFound(dataCenter.getUuid());
}
return template;
}
return template;
}
public void throwDefaultCksTemplateNotFound(String datacenterId) {
throw new CloudRuntimeException("Not able to find the System or Routing template in ready state for the zone " + datacenterId);
}
protected void validateIsolatedNetworkIpRules(long ipId, FirewallRule.Purpose purpose, Network network, int clusterTotalNodeCount) {
@ -444,7 +472,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
validateIsolatedNetwork(network, clusterTotalNodeCount);
}
private boolean validateServiceOffering(final ServiceOffering serviceOffering, final KubernetesSupportedVersion version) {
protected void validateServiceOffering(final ServiceOffering serviceOffering, final KubernetesSupportedVersion version) throws InvalidParameterValueException {
if (serviceOffering.isDynamic()) {
throw new InvalidParameterValueException(String.format("Custom service offerings are not supported for creating clusters, service offering ID: %s", serviceOffering.getUuid()));
}
@ -457,7 +485,6 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
if (serviceOffering.getRamSize() < version.getMinimumRamSize()) {
throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, associated Kubernetes version ID: %s needs minimum %d MB RAM", serviceOffering.getUuid(), version.getUuid(), version.getMinimumRamSize()));
}
return true;
}
private void validateDockerRegistryParams(final String dockerRegistryUserName,
@ -543,6 +570,33 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
throw new InsufficientServerCapacityException(msg, DataCenter.class, zone.getId());
}
protected void setNodeTypeServiceOfferingResponse(KubernetesClusterResponse response,
KubernetesClusterNodeType nodeType,
Long offeringId) {
if (offeringId == null) {
return;
}
ServiceOfferingVO offering = serviceOfferingDao.findById(offeringId);
if (offering != null) {
setServiceOfferingResponseForNodeType(response, offering, nodeType);
}
}
protected void setServiceOfferingResponseForNodeType(KubernetesClusterResponse response,
ServiceOfferingVO offering,
KubernetesClusterNodeType nodeType) {
if (CONTROL == nodeType) {
response.setControlOfferingId(offering.getUuid());
response.setControlOfferingName(offering.getName());
} else if (WORKER == nodeType) {
response.setWorkerOfferingId(offering.getUuid());
response.setWorkerOfferingName(offering.getName());
} else if (ETCD == nodeType) {
response.setEtcdOfferingId(offering.getUuid());
response.setEtcdOfferingName(offering.getName());
}
}
@Override
public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetesClusterId) {
KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId);
@ -566,6 +620,14 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
response.setServiceOfferingId(offering.getUuid());
response.setServiceOfferingName(offering.getName());
}
setNodeTypeServiceOfferingResponse(response, WORKER, kubernetesCluster.getWorkerServiceOfferingId());
setNodeTypeServiceOfferingResponse(response, CONTROL, kubernetesCluster.getControlServiceOfferingId());
setNodeTypeServiceOfferingResponse(response, ETCD, kubernetesCluster.getEtcdServiceOfferingId());
if (kubernetesCluster.getEtcdNodeCount() != null) {
response.setEtcdNodes(kubernetesCluster.getEtcdNodeCount());
}
KubernetesSupportedVersionVO version = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId());
if (version != null) {
response.setKubernetesVersionId(version.getUuid());
@ -734,7 +796,6 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
final String name = cmd.getName();
final Long zoneId = cmd.getZoneId();
final Long kubernetesVersionId = cmd.getKubernetesVersionId();
final Long serviceOfferingId = cmd.getServiceOfferingId();
final Account owner = accountService.getActiveAccountById(cmd.getEntityOwnerId());
final Long networkId = cmd.getNetworkId();
final String sshKeyPair = cmd.getSSHKeyPairName();
@ -745,6 +806,8 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
final String dockerRegistryUrl = cmd.getDockerRegistryUrl();
final Long nodeRootDiskSize = cmd.getNodeRootDiskSize();
final String externalLoadBalancerIpAddress = cmd.getExternalLoadBalancerIpAddress();
final Map<String, Long> serviceOfferingNodeTypeMap = cmd.getServiceOfferingNodeTypeMap();
final Long defaultServiceOfferingId = cmd.getServiceOfferingId();
if (name == null || name.isEmpty()) {
throw new InvalidParameterValueException("Invalid name for the Kubernetes cluster name: " + name);
@ -802,10 +865,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
throw new InvalidParameterValueException(String.format("ISO associated with version ID: %s is not in Ready state for datacenter ID: %s", clusterKubernetesVersion.getUuid(), zone.getUuid()));
}
ServiceOffering serviceOffering = serviceOfferingDao.findById(serviceOfferingId);
if (serviceOffering == null) {
throw new InvalidParameterValueException("No service offering with ID: " + serviceOfferingId);
}
validateServiceOfferingsForNodeTypes(serviceOfferingNodeTypeMap, defaultServiceOfferingId, cmd.getEtcdNodes(), clusterKubernetesVersion);
validateSshKeyPairForKubernetesCreateParameters(sshKeyPair, owner);
@ -813,15 +873,15 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
throw new InvalidParameterValueException(String.format("Invalid value for %s", ApiConstants.NODE_ROOT_DISK_SIZE));
}
if (!validateServiceOffering(serviceOffering, clusterKubernetesVersion)) {
throw new InvalidParameterValueException("Given service offering ID: %s is not suitable for Kubernetes cluster");
}
validateDockerRegistryParams(dockerRegistryUserName, dockerRegistryPassword, dockerRegistryUrl);
Network network = validateAndGetNetworkForKubernetesCreateParameters(networkId);
if (StringUtils.isNotEmpty(externalLoadBalancerIpAddress)) {
NsxProviderVO nsxProviderVO = nsxProviderDao.findByZoneId(zone.getId());
if (Objects.nonNull(nsxProviderVO)) {
throw new InvalidParameterValueException("External load balancer IP address is not supported on NSX-enabled zones");
}
if (!NetUtils.isValidIp4(externalLoadBalancerIpAddress) && !NetUtils.isValidIp6(externalLoadBalancerIpAddress)) {
throw new InvalidParameterValueException("Invalid external load balancer IP address");
}
@ -838,6 +898,37 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
}
}
protected void validateServiceOfferingsForNodeTypes(Map<String, Long> map,
Long defaultServiceOfferingId,
Long etcdNodes,
KubernetesSupportedVersion clusterKubernetesVersion) {
for (String key : CLUSTER_NODES_TYPES_LIST) {
validateServiceOfferingForNode(map, defaultServiceOfferingId, key, etcdNodes, clusterKubernetesVersion);
}
}
protected void validateServiceOfferingForNode(Map<String, Long> map,
Long defaultServiceOfferingId,
String key, Long etcdNodes,
KubernetesSupportedVersion clusterKubernetesVersion) {
if (ETCD.name().equalsIgnoreCase(key) && (etcdNodes == null || etcdNodes == 0)) {
return;
}
Long serviceOfferingId = map.getOrDefault(key, defaultServiceOfferingId);
ServiceOffering serviceOffering = serviceOfferingId != null ? serviceOfferingDao.findById(serviceOfferingId) : null;
if (serviceOffering == null) {
throw new InvalidParameterValueException("No service offering found with ID: " + serviceOfferingId);
}
try {
validateServiceOffering(serviceOffering, clusterKubernetesVersion);
} catch (InvalidParameterValueException e) {
String msg = String.format("Given service offering ID: %s for %s nodes is not suitable for the Kubernetes cluster version %s - %s",
serviceOffering, key, clusterKubernetesVersion, e.getMessage());
LOGGER.error(msg);
throw new InvalidParameterValueException(msg);
}
}
private Network getKubernetesClusterNetworkIfMissing(final String clusterName, final DataCenter zone, final Account owner, final int controlNodesCount,
final int nodesCount, final String externalLoadBalancerIpAddress, final Long networkId) throws CloudRuntimeException {
Network network = null;
@ -941,12 +1032,13 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd cmd) {
final Long kubernetesClusterId = cmd.getId();
final Long serviceOfferingId = cmd.getServiceOfferingId();
final Long clusterSize = cmd.getClusterSize();
final List<Long> nodeIds = cmd.getNodeIds();
final Boolean isAutoscalingEnabled = cmd.isAutoscalingEnabled();
final Long minSize = cmd.getMinSize();
final Long maxSize = cmd.getMaxSize();
final Long defaultServiceOfferingId = cmd.getServiceOfferingId();
final Map<String, Long> serviceOfferingNodeTypeMap = cmd.getServiceOfferingNodeTypeMap();
if (kubernetesClusterId == null || kubernetesClusterId < 1L) {
throw new InvalidParameterValueException("Invalid Kubernetes cluster ID");
@ -962,7 +1054,8 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster : %s", kubernetesCluster.getName()));
}
if (serviceOfferingId == null && clusterSize == null && nodeIds == null && isAutoscalingEnabled == null) {
if (defaultServiceOfferingId == null && isAnyNodeOfferingEmpty(serviceOfferingNodeTypeMap)
&& clusterSize == null && nodeIds == null && isAutoscalingEnabled == null) {
throw new InvalidParameterValueException(String.format("Kubernetes cluster %s cannot be scaled, either service offering or cluster size or nodeids to be removed or autoscaling must be passed", kubernetesCluster.getName()));
}
@ -1009,8 +1102,9 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
}
}
Long workerOfferingId = serviceOfferingNodeTypeMap != null ? serviceOfferingNodeTypeMap.getOrDefault(WORKER.name(), null) : null;
if (nodeIds != null) {
if (clusterSize != null || serviceOfferingId != null) {
if (clusterSize != null || defaultServiceOfferingId != null || workerOfferingId != null) {
throw new InvalidParameterValueException("nodeids can not be passed along with clustersize or service offering");
}
List<KubernetesClusterVmMapVO> nodes = kubernetesClusterVmMapDao.listByClusterIdAndVmIdsIn(kubernetesCluster.getId(), nodeIds);
@ -1030,39 +1124,55 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
}
}
ServiceOffering serviceOffering = null;
if (serviceOfferingId != null) {
serviceOffering = serviceOfferingDao.findById(serviceOfferingId);
if (serviceOffering == null) {
throw new InvalidParameterValueException("Failed to find service offering ID: " + serviceOfferingId);
} else {
if (serviceOffering.isDynamic()) {
throw new InvalidParameterValueException(String.format("Custom service offerings are not supported for Kubernetes clusters. Kubernetes cluster : %s, service offering : %s", kubernetesCluster.getName(), serviceOffering.getName()));
}
if (serviceOffering.getCpu() < MIN_KUBERNETES_CLUSTER_NODE_CPU || serviceOffering.getRamSize() < MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE) {
throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled with service offering : %s, Kubernetes cluster template(CoreOS) needs minimum %d vCPUs and %d MB RAM",
kubernetesCluster.getName(), serviceOffering.getName(), MIN_KUBERNETES_CLUSTER_NODE_CPU, MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE));
}
if (serviceOffering.getCpu() < clusterVersion.getMinimumCpu()) {
throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled with service offering : %s, associated Kubernetes version : %s needs minimum %d vCPUs",
kubernetesCluster.getName(), serviceOffering.getName(), clusterVersion.getName(), clusterVersion.getMinimumCpu()));
}
if (serviceOffering.getRamSize() < clusterVersion.getMinimumRamSize()) {
throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled with service offering : %s, associated Kubernetes version : %s needs minimum %d MB RAM",
kubernetesCluster.getName(), serviceOffering.getName(), clusterVersion.getName(), clusterVersion.getMinimumRamSize()));
}
}
final ServiceOffering existingServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
if (KubernetesCluster.State.Running.equals(kubernetesCluster.getState()) && (serviceOffering.getRamSize() < existingServiceOffering.getRamSize() ||
serviceOffering.getCpu() * serviceOffering.getSpeed() < existingServiceOffering.getCpu() * existingServiceOffering.getSpeed())) {
logAndThrow(Level.WARN, String.format("Kubernetes cluster cannot be scaled down for service offering. Service offering : %s offers lesser resources as compared to service offering : %s of Kubernetes cluster : %s",
serviceOffering.getName(), existingServiceOffering.getName(), kubernetesCluster.getName()));
}
}
validateServiceOfferingsForNodeTypesScale(serviceOfferingNodeTypeMap, defaultServiceOfferingId, kubernetesCluster, clusterVersion);
validateKubernetesClusterScaleSize(kubernetesCluster, clusterSize, maxClusterSize, zone);
}
protected void validateServiceOfferingsForNodeTypesScale(Map<String, Long> map, Long defaultServiceOfferingId, KubernetesClusterVO kubernetesCluster, KubernetesSupportedVersion clusterVersion) {
for (String key : CLUSTER_NODES_TYPES_LIST) {
Long serviceOfferingId = map.getOrDefault(key, defaultServiceOfferingId);
if (serviceOfferingId != null) {
ServiceOffering serviceOffering = serviceOfferingDao.findById(serviceOfferingId);
if (serviceOffering == null) {
throw new InvalidParameterValueException("Failed to find service offering ID: " + serviceOfferingId);
}
checkServiceOfferingForNodesScale(serviceOffering, kubernetesCluster, clusterVersion);
final ServiceOffering existingServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
if (KubernetesCluster.State.Running.equals(kubernetesCluster.getState()) && (serviceOffering.getRamSize() < existingServiceOffering.getRamSize() ||
serviceOffering.getCpu() * serviceOffering.getSpeed() < existingServiceOffering.getCpu() * existingServiceOffering.getSpeed())) {
logAndThrow(Level.WARN, String.format("Kubernetes cluster cannot be scaled down for service offering. Service offering : %s offers lesser resources as compared to service offering : %s of Kubernetes cluster : %s",
serviceOffering.getName(), existingServiceOffering.getName(), kubernetesCluster.getName()));
}
}
}
}
protected void checkServiceOfferingForNodesScale(ServiceOffering serviceOffering, KubernetesClusterVO kubernetesCluster, KubernetesSupportedVersion clusterVersion) {
if (serviceOffering.isDynamic()) {
throw new InvalidParameterValueException(String.format("Custom service offerings are not supported for Kubernetes clusters. Kubernetes cluster : %s, service offering : %s", kubernetesCluster.getName(), serviceOffering.getName()));
}
if (serviceOffering.getCpu() < MIN_KUBERNETES_CLUSTER_NODE_CPU || serviceOffering.getRamSize() < MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE) {
throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled with service offering : %s, Kubernetes cluster template(CoreOS) needs minimum %d vCPUs and %d MB RAM",
kubernetesCluster.getName(), serviceOffering.getName(), MIN_KUBERNETES_CLUSTER_NODE_CPU, MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE));
}
if (serviceOffering.getCpu() < clusterVersion.getMinimumCpu()) {
throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled with service offering : %s, associated Kubernetes version : %s needs minimum %d vCPUs",
kubernetesCluster.getName(), serviceOffering.getName(), clusterVersion.getName(), clusterVersion.getMinimumCpu()));
}
if (serviceOffering.getRamSize() < clusterVersion.getMinimumRamSize()) {
throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled with service offering : %s, associated Kubernetes version : %s needs minimum %d MB RAM",
kubernetesCluster.getName(), serviceOffering.getName(), clusterVersion.getName(), clusterVersion.getMinimumRamSize()));
}
}
protected boolean isAnyNodeOfferingEmpty(Map<String, Long> map) {
if (MapUtils.isEmpty(map)) {
return false;
}
return map.values().stream().anyMatch(Objects::isNull);
}
private void validateKubernetesClusterUpgradeParameters(UpgradeKubernetesClusterCmd cmd) {
// Validate parameters
validateEndpointUrl();
@ -1152,6 +1262,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
final long controlNodeCount = cmd.getControlNodes();
final long clusterSize = Objects.requireNonNullElse(cmd.getClusterSize(), 0L);
final ServiceOffering serviceOffering = serviceOfferingDao.findById(cmd.getServiceOfferingId());
Map<String, Long> nodeTypeOfferingMap = cmd.getServiceOfferingNodeTypeMap();
final Account owner = accountService.getActiveAccountById(cmd.getEntityOwnerId());
final KubernetesSupportedVersion clusterKubernetesVersion = kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId());
@ -1201,39 +1312,65 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
final DataCenter zone = dataCenterDao.findById(cmd.getZoneId());
final long controlNodeCount = cmd.getControlNodes();
final long clusterSize = cmd.getClusterSize();
final long totalNodeCount = controlNodeCount + clusterSize;
final ServiceOffering serviceOffering = serviceOfferingDao.findById(cmd.getServiceOfferingId());
final long etcdNodes = cmd.getEtcdNodes();
final Map<String, Long> nodeTypeCount = Map.of(WORKER.name(), clusterSize,
CONTROL.name(), controlNodeCount, ETCD.name(), etcdNodes);
final Account owner = accountService.getActiveAccountById(cmd.getEntityOwnerId());
final KubernetesSupportedVersion clusterKubernetesVersion = kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId());
DeployDestination deployDestination = null;
try {
deployDestination = plan(totalNodeCount, zone, serviceOffering);
} catch (InsufficientCapacityException e) {
logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to insufficient capacity for %d nodes cluster in zone : %s with service offering : %s", totalNodeCount, zone.getName(), serviceOffering.getName()));
}
if (deployDestination == null || deployDestination.getCluster() == null) {
logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to error while finding suitable deployment plan for cluster in zone : %s", zone.getName()));
}
Map<String, Long> serviceOfferingNodeTypeMap = cmd.getServiceOfferingNodeTypeMap();
Long defaultServiceOfferingId = cmd.getServiceOfferingId();
Hypervisor.HypervisorType hypervisorType = getHypervisorTypeAndValidateNodeDeployments(serviceOfferingNodeTypeMap, defaultServiceOfferingId, nodeTypeCount, zone);
SecurityGroup securityGroup = null;
if (zone.isSecurityGroupEnabled()) {
securityGroup = getOrCreateSecurityGroupForAccount(owner);
}
Map<String, Long> templateNodeTypeMap = cmd.getTemplateNodeTypeMap();
final Network defaultNetwork = getKubernetesClusterNetworkIfMissing(cmd.getName(), zone, owner, (int)controlNodeCount, (int)clusterSize, cmd.getExternalLoadBalancerIpAddress(), cmd.getNetworkId());
final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(zone, deployDestination.getCluster().getHypervisorType());
final long cores = serviceOffering.getCpu() * (controlNodeCount + clusterSize);
final long memory = serviceOffering.getRamSize() * (controlNodeCount + clusterSize);
VMTemplateVO finalTemplate = null;
VMTemplateVO controlNodeTemplate = null;
VMTemplateVO workerNodeTemplate = null;
VMTemplateVO etcdNodeTemplate = null;
finalTemplate = getKubernetesServiceTemplate(zone, hypervisorType, templateNodeTypeMap, DEFAULT);
controlNodeTemplate = getKubernetesServiceTemplate(zone, hypervisorType, templateNodeTypeMap, CONTROL);
workerNodeTemplate = getKubernetesServiceTemplate(zone, hypervisorType, templateNodeTypeMap, WORKER);
etcdNodeTemplate = getKubernetesServiceTemplate(zone, hypervisorType, templateNodeTypeMap, ETCD);
final ServiceOffering defaultServiceOffering = serviceOfferingDao.findById(defaultServiceOfferingId);
Pair<Long, Long> capacityPair = calculateClusterCapacity(serviceOfferingNodeTypeMap, nodeTypeCount, defaultServiceOfferingId);
final long cores = capacityPair.first();
final long memory = capacityPair.second();
final SecurityGroup finalSecurityGroup = securityGroup;
VMTemplateVO finalDefaultTemplate = finalTemplate;
VMTemplateVO finalControlNodeTemplate = controlNodeTemplate;
VMTemplateVO finalEtcdNodeTemplate = etcdNodeTemplate;
VMTemplateVO finalWorkerNodeTemplate = workerNodeTemplate;
final KubernetesClusterVO cluster = Transaction.execute(new TransactionCallback<KubernetesClusterVO>() {
@Override
public KubernetesClusterVO doInTransaction(TransactionStatus status) {
KubernetesClusterVO newCluster = new KubernetesClusterVO(cmd.getName(), cmd.getDisplayName(), zone.getId(), clusterKubernetesVersion.getId(),
serviceOffering.getId(), finalTemplate.getId(), defaultNetwork.getId(), owner.getDomainId(),
owner.getAccountId(), controlNodeCount, clusterSize, KubernetesCluster.State.Created, cmd.getSSHKeyPairName(), cores, memory,
defaultServiceOffering.getId(), Objects.nonNull(finalDefaultTemplate) ? finalDefaultTemplate.getId() : null,
defaultNetwork.getId(), owner.getDomainId(), owner.getAccountId(), controlNodeCount, clusterSize,
KubernetesCluster.State.Created, cmd.getSSHKeyPairName(), cores, memory,
cmd.getNodeRootDiskSize(), "", KubernetesCluster.ClusterType.CloudManaged);
if (serviceOfferingNodeTypeMap.containsKey(WORKER.name())) {
newCluster.setWorkerServiceOfferingId(serviceOfferingNodeTypeMap.get(WORKER.name()));
}
if (serviceOfferingNodeTypeMap.containsKey(CONTROL.name())) {
newCluster.setControlServiceOfferingId(serviceOfferingNodeTypeMap.get(CONTROL.name()));
}
if (etcdNodes > 0) {
newCluster.setEtcdTemplateId(finalEtcdNodeTemplate.getId());
newCluster.setEtcdNodeCount(etcdNodes);
if (serviceOfferingNodeTypeMap.containsKey(ETCD.name())) {
newCluster.setEtcdServiceOfferingId(serviceOfferingNodeTypeMap.get(ETCD.name()));
}
}
newCluster.setWorkerTemplateId(finalWorkerNodeTemplate.getId());
newCluster.setControlTemplateId(finalControlNodeTemplate.getId());
if (zone.isSecurityGroupEnabled()) {
newCluster.setSecurityGroupId(finalSecurityGroup.getId());
}
@ -1250,6 +1387,52 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
return cluster;
}
protected Pair<Long, Long> calculateClusterCapacity(Map<String, Long> map, Map<String, Long> nodeTypeCount, Long defaultServiceOfferingId) {
long cores = 0L;
long memory = 0L;
for (String key : CLUSTER_NODES_TYPES_LIST) {
if (nodeTypeCount.getOrDefault(key, 0L) == 0) {
continue;
}
Long serviceOfferingId = map.getOrDefault(key, defaultServiceOfferingId);
ServiceOffering serviceOffering = serviceOfferingDao.findById(serviceOfferingId);
Long nodes = nodeTypeCount.get(key);
cores = cores + (serviceOffering.getCpu() * nodes);
memory = memory + (serviceOffering.getRamSize() * nodes);
}
return new Pair<>(cores, memory);
}
protected Hypervisor.HypervisorType getHypervisorTypeAndValidateNodeDeployments(Map<String, Long> serviceOfferingNodeTypeMap,
Long defaultServiceOfferingId,
Map<String, Long> nodeTypeCount, DataCenter zone) {
Hypervisor.HypervisorType hypervisorType = null;
for (String nodeType : CLUSTER_NODES_TYPES_LIST) {
if (!nodeTypeCount.containsKey(nodeType)) {
continue;
}
Long serviceOfferingId = serviceOfferingNodeTypeMap.getOrDefault(nodeType, defaultServiceOfferingId);
ServiceOffering serviceOffering = serviceOfferingDao.findById(serviceOfferingId);
Long nodes = nodeTypeCount.getOrDefault(nodeType, defaultServiceOfferingId);
try {
if (nodeType.equalsIgnoreCase(ETCD.name()) &&
(!serviceOfferingNodeTypeMap.containsKey(ETCD.name()) || nodes == 0)) {
continue;
}
DeployDestination deployDestination = plan(nodes, zone, serviceOffering);
if (deployDestination.getCluster() == null) {
logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to error while finding suitable deployment plan for cluster in zone : %s", zone.getName()));
}
if (hypervisorType == null) {
hypervisorType = deployDestination.getCluster().getHypervisorType();
}
} catch (InsufficientCapacityException e) {
logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to insufficient capacity for %d nodes cluster in zone : %s with service offering : %s", nodes, zone.getName(), serviceOffering.getName()));
}
}
return hypervisorType;
}
private SecurityGroup getOrCreateSecurityGroupForAccount(Account owner) {
String securityGroupName = String.format("%s-%s", KubernetesClusterActionWorker.CKS_CLUSTER_SECURITY_GROUP_NAME, owner.getUuid());
String securityGroupDesc = String.format("%s and account %s", KubernetesClusterActionWorker.CKS_SECURITY_GROUP_DESCRIPTION, owner.getName());
@ -1534,12 +1717,13 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled");
}
validateKubernetesClusterScaleParameters(cmd);
KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(cmd.getId());
Map<String, ServiceOffering> nodeToOfferingMap = createNodeTypeToServiceOfferingMap(cmd.getServiceOfferingNodeTypeMap(), cmd.getServiceOfferingId(), kubernetesCluster);
String[] keys = getServiceUserKeys(kubernetesCluster);
KubernetesClusterScaleWorker scaleWorker =
new KubernetesClusterScaleWorker(kubernetesClusterDao.findById(cmd.getId()),
serviceOfferingDao.findById(cmd.getServiceOfferingId()),
nodeToOfferingMap,
cmd.getClusterSize(),
cmd.getNodeIds(),
cmd.isAutoscalingEnabled(),
@ -1551,6 +1735,29 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
return scaleWorker.scaleCluster();
}
/**
* Creates a map for the requested node type service offering
* For the node type ALL: Every node is scaled to the same offering
*/
protected Map<String, ServiceOffering> createNodeTypeToServiceOfferingMap(Map<String, Long> idsMapping,
Long serviceOfferingId, KubernetesClusterVO kubernetesCluster) {
Map<String, ServiceOffering> map = new HashMap<>();
if (MapUtils.isEmpty(idsMapping)) {
ServiceOfferingVO offering = serviceOfferingId != null ?
serviceOfferingDao.findById(serviceOfferingId) :
serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
map.put(DEFAULT.name(), offering);
return map;
}
for (String key : CLUSTER_NODES_TYPES_LIST) {
if (!idsMapping.containsKey(key)) {
continue;
}
map.put(key, serviceOfferingDao.findById(idsMapping.get(key)));
}
return map;
}
@Override
public boolean upgradeKubernetesCluster(UpgradeKubernetesClusterCmd cmd) throws CloudRuntimeException {
if (!KubernetesServiceEnabled.value()) {

View File

@ -117,6 +117,27 @@ public class KubernetesClusterVO implements KubernetesCluster {
@Column(name = "cluster_type")
private ClusterType clusterType;
@Column(name = "control_service_offering_id")
private Long controlServiceOfferingId;
@Column(name = "worker_service_offering_id")
private Long workerServiceOfferingId;
@Column(name = "etcd_service_offering_id")
private Long etcdServiceOfferingId;
@Column(name = "etcd_node_count")
private Long etcdNodeCount;
@Column(name = "control_template_id")
private Long controlTemplateId;
@Column(name = "worker_template_id")
private Long workerTemplateId;
@Column(name = "etcd_template_id")
private Long etcdTemplateId;
@Override
public long getId() {
return id;
@ -236,7 +257,7 @@ public class KubernetesClusterVO implements KubernetesCluster {
@Override
public long getTotalNodeCount() {
return this.controlNodeCount + this.nodeCount;
return this.controlNodeCount + this.nodeCount + this.getEtcdNodeCount();
}
@Override
@ -406,4 +427,60 @@ public class KubernetesClusterVO implements KubernetesCluster {
public Class<?> getEntityType() {
return KubernetesCluster.class;
}
public Long getControlServiceOfferingId() {
return controlServiceOfferingId;
}
public void setControlServiceOfferingId(Long controlServiceOfferingId) {
this.controlServiceOfferingId = controlServiceOfferingId;
}
public Long getWorkerServiceOfferingId() {
return workerServiceOfferingId;
}
public void setWorkerServiceOfferingId(Long workerServiceOfferingId) {
this.workerServiceOfferingId = workerServiceOfferingId;
}
public Long getEtcdServiceOfferingId() {
return etcdServiceOfferingId;
}
public void setEtcdServiceOfferingId(Long etcdServiceOfferingId) {
this.etcdServiceOfferingId = etcdServiceOfferingId;
}
public Long getEtcdNodeCount() {
return etcdNodeCount != null ? etcdNodeCount : 0L;
}
public void setEtcdNodeCount(Long etcdNodeCount) {
this.etcdNodeCount = etcdNodeCount;
}
public Long getEtcdTemplateId() {
return etcdTemplateId;
}
public void setEtcdTemplateId(Long etcdTemplateId) {
this.etcdTemplateId = etcdTemplateId;
}
public Long getWorkerTemplateId() {
return workerTemplateId;
}
public void setWorkerTemplateId(Long workerTemplateId) {
this.workerTemplateId = workerTemplateId;
}
public Long getControlTemplateId() {
return controlTemplateId;
}
public void setControlTemplateId(Long controlTemplateId) {
this.controlTemplateId = controlTemplateId;
}
}

View File

@ -42,6 +42,9 @@ public class KubernetesClusterVmMapVO implements KubernetesClusterVmMap {
@Column(name = "control_node")
boolean controlNode;
@Column(name = "etcd_node")
boolean etcdNode;
public KubernetesClusterVmMapVO() {
}
@ -83,4 +86,12 @@ public class KubernetesClusterVmMapVO implements KubernetesClusterVmMap {
public void setControlNode(boolean controlNode) {
this.controlNode = controlNode;
}
public boolean isEtcdNode() {
return etcdNode;
}
public void setEtcdNode(boolean etcdNode) {
this.etcdNode = etcdNode;
}
}

View File

@ -22,6 +22,7 @@ import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
@ -34,6 +35,8 @@ import javax.inject.Inject;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType;
import com.cloud.offering.ServiceOffering;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.ca.CAManager;
import org.apache.cloudstack.config.ApiServiceConfiguration;
@ -166,6 +169,9 @@ public class KubernetesClusterActionWorker {
protected KubernetesCluster kubernetesCluster;
protected Account owner;
protected VirtualMachineTemplate clusterTemplate;
protected VirtualMachineTemplate controlNodeTemplate;
protected VirtualMachineTemplate workerNodeTemplate;
protected VirtualMachineTemplate etcdTemplate;
protected File sshKeyFile;
protected String publicIpAddress;
protected int sshPort;
@ -197,7 +203,10 @@ public class KubernetesClusterActionWorker {
DataCenterVO dataCenterVO = dataCenterDao.findById(zoneId);
VMTemplateVO template = templateDao.findById(templateId);
Hypervisor.HypervisorType type = template.getHypervisorType();
this.clusterTemplate = manager.getKubernetesServiceTemplate(dataCenterVO, type);
this.clusterTemplate = manager.getKubernetesServiceTemplate(dataCenterVO, type, null, KubernetesClusterNodeType.DEFAULT);
this.controlNodeTemplate = templateDao.findById(this.kubernetesCluster.getControlTemplateId());
this.workerNodeTemplate = templateDao.findById(this.kubernetesCluster.getWorkerTemplateId());
this.etcdTemplate = templateDao.findById(this.kubernetesCluster.getEtcdTemplateId());
this.sshKeyFile = getManagementServerSshPublicKeyFile();
}
@ -270,7 +279,7 @@ public class KubernetesClusterActionWorker {
}
protected void deleteTemplateLaunchPermission() {
if (clusterTemplate != null && owner != null) {
if (isDefaultTemplateUsed() && owner != null) {
logger.info("Revoking launch permission for systemVM template");
launchPermissionDao.removePermissions(clusterTemplate.getId(), Collections.singletonList(owner.getId()));
}
@ -690,4 +699,34 @@ public class KubernetesClusterActionWorker {
public void setKeys(String[] keys) {
this.keys = keys;
}
protected ServiceOffering getServiceOfferingForNodeTypeOnCluster(KubernetesClusterNodeType nodeType,
KubernetesCluster cluster) {
Long offeringId = null;
Long defaultOfferingId = cluster.getServiceOfferingId();
Long controlOfferingId = cluster.getControlServiceOfferingId();
Long workerOfferingId = cluster.getWorkerServiceOfferingId();
Long etcdOfferingId = cluster.getEtcdServiceOfferingId();
if (KubernetesClusterNodeType.CONTROL == nodeType) {
offeringId = controlOfferingId != null ? controlOfferingId : defaultOfferingId;
} else if (KubernetesClusterNodeType.WORKER == nodeType) {
offeringId = workerOfferingId != null ? workerOfferingId : defaultOfferingId;
} else if (KubernetesClusterNodeType.ETCD == nodeType && cluster.getEtcdNodeCount() != null && cluster.getEtcdNodeCount() > 0) {
offeringId = etcdOfferingId != null ? etcdOfferingId : defaultOfferingId;
}
if (offeringId == null) {
String msg = String.format("Cannot find a service offering for the %s nodes on the Kubernetes cluster %s", nodeType.name(), cluster.getName());
logger.error(msg);
throw new CloudRuntimeException(msg);
}
return serviceOfferingDao.findById(offeringId);
}
protected boolean isDefaultTemplateUsed() {
if (Arrays.asList(kubernetesCluster.getControlTemplateId(), kubernetesCluster.getWorkerTemplateId(), kubernetesCluster.getEtcdTemplateId()).contains(kubernetesCluster.getTemplateId())) {
return true;
}
return false;
}
}

View File

@ -17,6 +17,9 @@
package com.cloud.kubernetes.cluster.actionworkers;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.CONTROL;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.ETCD;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.WORKER;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
import java.io.File;
@ -31,6 +34,8 @@ import java.util.stream.Collectors;
import javax.inject.Inject;
import com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType;
import com.cloud.network.rules.FirewallManager;
import com.cloud.offering.NetworkOffering;
import com.cloud.offerings.dao.NetworkOfferingDao;
import org.apache.cloudstack.api.ApiConstants;
@ -136,6 +141,8 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
@Inject
protected RulesService rulesService;
@Inject
protected FirewallManager firewallManager;
@Inject
protected PortForwardingRulesDao portForwardingRulesDao;
@Inject
protected ResourceManager resourceManager;
@ -376,7 +383,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
ResourceUnavailableException, InsufficientCapacityException {
UserVm nodeVm = null;
DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
ServiceOffering serviceOffering = getServiceOfferingForNodeTypeOnCluster(WORKER, kubernetesCluster);
List<Long> networkIds = new ArrayList<Long>();
networkIds.add(kubernetesCluster.getNetworkId());
Account owner = accountDao.findById(kubernetesCluster.getAccountId());
@ -406,12 +413,12 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
if (zone.isSecurityGroupEnabled()) {
List<Long> securityGroupIds = new ArrayList<>();
securityGroupIds.add(kubernetesCluster.getSecurityGroupId());
nodeVm = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, securityGroupIds, owner,
nodeVm = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, workerNodeTemplate, networkIds, securityGroupIds, owner,
hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST,base64UserData, null, null, keypairs,
null, addrs, null, null, null, customParameterMap, null, null, null,
null, true, null, UserVmManager.CKS_NODE);
} else {
nodeVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner,
nodeVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, workerNodeTemplate, networkIds, owner,
hostName, hostName, null, null, null,
Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, null, null, keypairs,
null, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null);
@ -532,16 +539,22 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
protected void removePortForwardingRules(final IpAddress publicIp, final Network network, final Account account, final List<Long> removedVMIds) throws ResourceUnavailableException {
if (!CollectionUtils.isEmpty(removedVMIds)) {
List<PortForwardingRuleVO> pfRules = new ArrayList<>();
List<PortForwardingRuleVO> revokedRules = new ArrayList<>();
for (Long vmId : removedVMIds) {
List<PortForwardingRuleVO> pfRules = portForwardingRulesDao.listByNetwork(network.getId());
pfRules.addAll(portForwardingRulesDao.listByNetwork(network.getId()));
for (PortForwardingRuleVO pfRule : pfRules) {
if (pfRule.getVirtualMachineId() == vmId) {
portForwardingRulesDao.remove(pfRule.getId());
LOGGER.trace("Marking PF rule " + pfRule + " with Revoke state");
pfRule.setState(FirewallRule.State.Revoke);
revokedRules.add(pfRule);
break;
}
}
}
rulesService.applyPortForwardingRules(publicIp.getId(), account);
firewallManager.applyRules(revokedRules, false, true);
}
}
@ -784,7 +797,11 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
}
protected KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, final Long memory, final Long size,
final Long serviceOfferingId, final Boolean autoscaleEnabled, final Long minSize, final Long maxSize) {
final Long serviceOfferingId, final Boolean autoscaleEnabled,
final Long minSize, final Long maxSize,
final KubernetesClusterNodeType nodeType,
final boolean updateNodeOffering,
final boolean updateClusterOffering) {
return Transaction.execute(new TransactionCallback<KubernetesClusterVO>() {
@Override
public KubernetesClusterVO doInTransaction(TransactionStatus status) {
@ -798,7 +815,16 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
if (size != null) {
updatedCluster.setNodeCount(size);
}
if (serviceOfferingId != null) {
if (updateNodeOffering && serviceOfferingId != null && nodeType != null) {
if (WORKER == nodeType) {
updatedCluster.setWorkerServiceOfferingId(serviceOfferingId);
} else if (CONTROL == nodeType) {
updatedCluster.setControlServiceOfferingId(serviceOfferingId);
} else if (ETCD == nodeType) {
updatedCluster.setEtcdServiceOfferingId(serviceOfferingId);
}
}
if (updateClusterOffering && serviceOfferingId != null) {
updatedCluster.setServiceOfferingId(serviceOfferingId);
}
if (autoscaleEnabled != null) {
@ -812,7 +838,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
}
private KubernetesClusterVO updateKubernetesClusterEntry(final Boolean autoscaleEnabled, final Long minSize, final Long maxSize) throws CloudRuntimeException {
KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(null, null, null, null, autoscaleEnabled, minSize, maxSize);
KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(null, null, null, null, autoscaleEnabled, minSize, maxSize, null, false, false);
if (kubernetesClusterVO == null) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to update Kubernetes cluster",
kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);

View File

@ -19,13 +19,17 @@ package com.cloud.kubernetes.cluster.actionworkers;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.inject.Inject;
import com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType;
import com.cloud.service.ServiceOfferingVO;
import org.apache.cloudstack.api.InternalIdentity;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
@ -58,12 +62,17 @@ import com.cloud.vm.VirtualMachine;
import com.cloud.vm.dao.VMInstanceDao;
import org.apache.logging.log4j.Level;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.CONTROL;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.DEFAULT;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.ETCD;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.WORKER;
public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModifierActionWorker {
@Inject
protected VMInstanceDao vmInstanceDao;
private ServiceOffering serviceOffering;
private Map<String, ServiceOffering> serviceOfferingNodeTypeMap;
private Long clusterSize;
private List<Long> nodeIds;
private KubernetesCluster.State originalState;
@ -73,8 +82,12 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
private Boolean isAutoscalingEnabled;
private long scaleTimeoutTime;
protected KubernetesClusterScaleWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) {
super(kubernetesCluster, clusterManager);
}
public KubernetesClusterScaleWorker(final KubernetesCluster kubernetesCluster,
final ServiceOffering serviceOffering,
final Map<String, ServiceOffering> serviceOfferingNodeTypeMap,
final Long clusterSize,
final List<Long> nodeIds,
final Boolean isAutoscalingEnabled,
@ -82,7 +95,7 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
final Long maxSize,
final KubernetesClusterManagerImpl clusterManager) {
super(kubernetesCluster, clusterManager);
this.serviceOffering = serviceOffering;
this.serviceOfferingNodeTypeMap = serviceOfferingNodeTypeMap;
this.nodeIds = nodeIds;
this.isAutoscalingEnabled = isAutoscalingEnabled;
this.minSize = minSize;
@ -174,15 +187,19 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
scaleKubernetesClusterIsolatedNetworkRules(clusterVMIds);
}
private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, final ServiceOffering newServiceOffering) throws CloudRuntimeException {
private KubernetesClusterVO updateKubernetesClusterEntryForNodeType(final Long newWorkerSize, final KubernetesClusterNodeType nodeType,
final ServiceOffering newServiceOffering,
final boolean updateNodeOffering, boolean updateClusterOffering) throws CloudRuntimeException {
final ServiceOffering serviceOffering = newServiceOffering == null ?
serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()) : newServiceOffering;
final Long serviceOfferingId = newServiceOffering == null ? null : serviceOffering.getId();
final long size = newSize == null ? kubernetesCluster.getTotalNodeCount() : (newSize + kubernetesCluster.getControlNodeCount());
final long cores = serviceOffering.getCpu() * size;
final long memory = serviceOffering.getRamSize() * size;
KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newSize, serviceOfferingId,
kubernetesCluster.getAutoscalingEnabled(), kubernetesCluster.getMinSize(), kubernetesCluster.getMaxSize());
Pair<Long, Long> clusterCountAndCapacity = calculateNewClusterCountAndCapacity(newWorkerSize, nodeType, serviceOffering);
long cores = clusterCountAndCapacity.first();
long memory = clusterCountAndCapacity.second();
KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newWorkerSize, serviceOfferingId,
kubernetesCluster.getAutoscalingEnabled(), kubernetesCluster.getMinSize(), kubernetesCluster.getMaxSize(), nodeType, updateNodeOffering, updateClusterOffering);
if (kubernetesClusterVO == null) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to update Kubernetes cluster",
kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
@ -190,6 +207,55 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
return kubernetesClusterVO;
}
protected Pair<Long, Long> calculateNewClusterCountAndCapacity(Long newWorkerSize, KubernetesClusterNodeType nodeType, ServiceOffering serviceOffering) {
long cores;
long memory;
long totalClusterSize = newWorkerSize == null ? kubernetesCluster.getTotalNodeCount() : (newWorkerSize + kubernetesCluster.getControlNodeCount() + kubernetesCluster.getEtcdNodeCount());
if (nodeType == DEFAULT) {
cores = serviceOffering.getCpu() * totalClusterSize;
memory = serviceOffering.getRamSize() * totalClusterSize;
} else {
long nodeCount = getNodeCountForType(nodeType, kubernetesCluster);
Long existingOfferingId = getExistingOfferingIdForNodeType(nodeType, kubernetesCluster);
ServiceOfferingVO previousOffering = serviceOfferingDao.findById(existingOfferingId);
Pair<Long, Long> previousNodesCapacity = calculateNodesCapacity(previousOffering, nodeCount);
if (WORKER == nodeType) {
nodeCount = newWorkerSize == null ? kubernetesCluster.getNodeCount() : newWorkerSize;
}
Pair<Long, Long> newNodesCapacity = calculateNodesCapacity(serviceOffering, nodeCount);
Pair<Long, Long> newClusterCapacity = calculateClusterNewCapacity(kubernetesCluster, previousNodesCapacity, newNodesCapacity);
cores = newClusterCapacity.first();
memory = newClusterCapacity.second();
}
return new Pair<>(cores, memory);
}
private long getNodeCountForType(KubernetesClusterNodeType nodeType, KubernetesCluster kubernetesCluster) {
if (WORKER == nodeType) {
return kubernetesCluster.getNodeCount();
} else if (CONTROL == nodeType) {
return kubernetesCluster.getControlNodeCount();
} else if (ETCD == nodeType) {
return kubernetesCluster.getEtcdNodeCount();
}
return kubernetesCluster.getTotalNodeCount();
}
protected Pair<Long, Long> calculateClusterNewCapacity(KubernetesCluster kubernetesCluster,
Pair<Long, Long> previousNodeTypeCapacity,
Pair<Long, Long> newNodeTypeCapacity) {
long previousCores = kubernetesCluster.getCores();
long previousMemory = kubernetesCluster.getMemory();
long newCores = previousCores - previousNodeTypeCapacity.first() + newNodeTypeCapacity.first();
long newMemory = previousMemory - previousNodeTypeCapacity.second() + newNodeTypeCapacity.second();
return new Pair<>(newCores, newMemory);
}
protected Pair<Long, Long> calculateNodesCapacity(ServiceOffering offering, long nodeCount) {
return new Pair<>(offering.getCpu() * nodeCount, offering.getRamSize() * nodeCount);
}
private boolean removeKubernetesClusterNode(final String ipAddress, final int port, final UserVm userVm, final int retries, final int waitDuration) {
File pkFile = getManagementServerSshPublicKeyFile();
int retryCounter = 0;
@ -280,17 +346,18 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
}
}
private void scaleKubernetesClusterOffering() throws CloudRuntimeException {
private void scaleKubernetesClusterOffering(KubernetesClusterNodeType nodeType, ServiceOffering serviceOffering,
boolean updateNodeOffering, boolean updateClusterOffering) throws CloudRuntimeException {
validateKubernetesClusterScaleOfferingParameters();
if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) {
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleUpRequested);
}
if (KubernetesCluster.State.Created.equals(originalState)) {
kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering);
kubernetesCluster = updateKubernetesClusterEntryForNodeType(null, nodeType, serviceOffering, updateNodeOffering, updateClusterOffering);
return;
}
final long size = kubernetesCluster.getTotalNodeCount();
List<KubernetesClusterVmMapVO> vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
final long size = getNodeCountForType(nodeType, kubernetesCluster);
List<KubernetesClusterVmMapVO> vmList = kubernetesClusterVmMapDao.listByClusterIdAndVmType(kubernetesCluster.getId(), nodeType);
final long tobeScaledVMCount = Math.min(vmList.size(), size);
for (long i = 0; i < tobeScaledVMCount; i++) {
KubernetesClusterVmMapVO vmMapVO = vmList.get((int) i);
@ -308,7 +375,7 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster : %s failed, scaling action timed out", kubernetesCluster.getName()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
}
kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering);
kubernetesCluster = updateKubernetesClusterEntryForNodeType(null, nodeType, serviceOffering, updateNodeOffering, updateClusterOffering);
}
private void removeNodesFromCluster(List<KubernetesClusterVmMapVO> vmMaps) throws CloudRuntimeException {
@ -363,8 +430,10 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleUpRequested);
}
List<UserVm> clusterVMs = new ArrayList<>();
LaunchPermissionVO launchPermission = new LaunchPermissionVO(clusterTemplate.getId(), owner.getId());
launchPermissionDao.persist(launchPermission);
if (isDefaultTemplateUsed()) {
LaunchPermissionVO launchPermission = new LaunchPermissionVO(clusterTemplate.getId(), owner.getId());
launchPermissionDao.persist(launchPermission);
}
try {
clusterVMs = provisionKubernetesClusterNodeVms((int)(newVmCount + kubernetesCluster.getNodeCount()), (int)kubernetesCluster.getNodeCount(), publicIpAddress);
updateLoginUserDetails(clusterVMs.stream().map(InternalIdentity::getId).collect(Collectors.toList()));
@ -389,7 +458,7 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
}
}
private void scaleKubernetesClusterSize() throws CloudRuntimeException {
private void scaleKubernetesClusterSize(KubernetesClusterNodeType nodeType) throws CloudRuntimeException {
validateKubernetesClusterScaleSizeParameters();
final long originalClusterSize = kubernetesCluster.getNodeCount();
final long newVmRequiredCount = clusterSize - originalClusterSize;
@ -397,7 +466,7 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) {
stateTransitTo(kubernetesCluster.getId(), newVmRequiredCount > 0 ? KubernetesCluster.Event.ScaleUpRequested : KubernetesCluster.Event.ScaleDownRequested);
}
kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering);
kubernetesCluster = updateKubernetesClusterEntryForNodeType(null, nodeType, serviceOfferingNodeTypeMap.get(nodeType.name()), false, false);
return;
}
Pair<String, Integer> publicIpSshPort = getKubernetesClusterServerIpSshPort(null);
@ -411,7 +480,7 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
} else { // upscale, same node count handled above
scaleUpKubernetesClusterSize(newVmRequiredCount);
}
kubernetesCluster = updateKubernetesClusterEntry(clusterSize, null);
kubernetesCluster = updateKubernetesClusterEntryForNodeType(clusterSize, nodeType, null, false, false);
}
private boolean isAutoscalingChanged() {
@ -434,37 +503,88 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
}
scaleTimeoutTime = System.currentTimeMillis() + KubernetesClusterService.KubernetesClusterScaleTimeout.value() * 1000;
final long originalClusterSize = kubernetesCluster.getNodeCount();
final ServiceOffering existingServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
if (existingServiceOffering == null) {
logAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster : %s failed, service offering for the Kubernetes cluster not found!", kubernetesCluster.getName()));
if (serviceOfferingNodeTypeMap.containsKey(DEFAULT.name())) {
final ServiceOffering existingServiceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
if (existingServiceOffering == null) {
logAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster : %s failed, service offering for the Kubernetes cluster not found!", kubernetesCluster.getName()));
}
}
final boolean autscalingChanged = isAutoscalingChanged();
final boolean serviceOfferingScalingNeeded = serviceOffering != null && serviceOffering.getId() != existingServiceOffering.getId();
if (autscalingChanged) {
boolean autoScaled = autoscaleCluster(this.isAutoscalingEnabled, minSize, maxSize);
if (autoScaled && serviceOfferingScalingNeeded) {
scaleKubernetesClusterOffering();
final boolean autoscalingChanged = isAutoscalingChanged();
boolean hasDefaultOffering = serviceOfferingNodeTypeMap.containsKey(DEFAULT.name());
Long existingDefaultOfferingId = kubernetesCluster.getServiceOfferingId();
ServiceOffering defaultServiceOffering = serviceOfferingNodeTypeMap.getOrDefault(DEFAULT.name(), null);
for (KubernetesClusterNodeType nodeType : Arrays.asList(CONTROL, ETCD, WORKER)) {
if (!hasDefaultOffering && !serviceOfferingNodeTypeMap.containsKey(nodeType.name())) {
continue;
}
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
return autoScaled;
}
final boolean clusterSizeScalingNeeded = clusterSize != null && clusterSize != originalClusterSize;
final long newVMRequired = clusterSize == null ? 0 : clusterSize - originalClusterSize;
if (serviceOfferingScalingNeeded && clusterSizeScalingNeeded) {
if (newVMRequired > 0) {
scaleKubernetesClusterOffering();
scaleKubernetesClusterSize();
} else {
scaleKubernetesClusterSize();
scaleKubernetesClusterOffering();
boolean isWorkerNodeOrAllNodes = WORKER == nodeType;
boolean serviceOfferingScalingNeeded = isServiceOfferingScalingNeededForNodeType(nodeType, serviceOfferingNodeTypeMap, kubernetesCluster, existingDefaultOfferingId);
ServiceOffering serviceOffering = serviceOfferingNodeTypeMap.getOrDefault(nodeType.name(), defaultServiceOffering);
boolean updateNodeOffering = serviceOfferingNodeTypeMap.containsKey(nodeType.name());
boolean updateClusterOffering = isWorkerNodeOrAllNodes && hasDefaultOffering;
if (isWorkerNodeOrAllNodes && autoscalingChanged) {
boolean autoScaled = autoscaleCluster(this.isAutoscalingEnabled, minSize, maxSize);
if (autoScaled && serviceOfferingScalingNeeded) {
scaleKubernetesClusterOffering(nodeType, serviceOffering, updateNodeOffering, updateClusterOffering);
}
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
return autoScaled;
}
final boolean clusterSizeScalingNeeded = isWorkerNodeOrAllNodes && clusterSize != null && clusterSize != originalClusterSize;
final long newVMRequired = (!isWorkerNodeOrAllNodes || clusterSize == null) ? 0 : clusterSize - originalClusterSize;
if (serviceOfferingScalingNeeded && clusterSizeScalingNeeded) {
if (newVMRequired > 0) {
scaleKubernetesClusterOffering(nodeType, serviceOffering, updateNodeOffering, updateClusterOffering);
scaleKubernetesClusterSize(nodeType);
} else {
scaleKubernetesClusterSize(nodeType);
scaleKubernetesClusterOffering(nodeType, serviceOffering, updateNodeOffering, updateClusterOffering);
}
} else if (serviceOfferingScalingNeeded) {
scaleKubernetesClusterOffering(nodeType, serviceOffering, updateNodeOffering, updateClusterOffering);
} else if (clusterSizeScalingNeeded) {
scaleKubernetesClusterSize(nodeType);
}
} else if (serviceOfferingScalingNeeded) {
scaleKubernetesClusterOffering();
} else if (clusterSizeScalingNeeded) {
scaleKubernetesClusterSize();
}
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
return true;
}
protected boolean isServiceOfferingScalingNeededForNodeType(KubernetesClusterNodeType nodeType,
Map<String, ServiceOffering> map, KubernetesCluster kubernetesCluster,
Long existingDefaultOfferingId) {
Long existingOfferingId = map.containsKey(DEFAULT.name()) ?
existingDefaultOfferingId :
getExistingOfferingIdForNodeType(nodeType, kubernetesCluster);
if (existingOfferingId == null) {
logAndThrow(Level.ERROR, String.format("The Kubernetes cluster %s does not have a global service offering set", kubernetesCluster.getName()));
}
ServiceOffering existingOffering = serviceOfferingDao.findById(existingOfferingId);
if (existingOffering == null) {
logAndThrow(Level.ERROR, String.format("Cannot find the global service offering with ID %s set on the Kubernetes cluster %s", existingOfferingId, kubernetesCluster.getName()));
}
ServiceOffering newOffering = map.containsKey(DEFAULT.name()) ? map.get(DEFAULT.name()) : map.get(nodeType.name());
if (newOffering == null) {
logAndThrow(Level.ERROR, String.format("Cannot find the requested service offering with ID %s", newOffering));
}
return newOffering != null && newOffering.getId() != existingOffering.getId();
}
protected Long getExistingOfferingIdForNodeType(KubernetesClusterNodeType nodeType, KubernetesCluster kubernetesCluster) {
Long offeringId = null;
if (WORKER == nodeType) {
offeringId = kubernetesCluster.getWorkerServiceOfferingId();
} else if (CONTROL == nodeType) {
offeringId = kubernetesCluster.getControlServiceOfferingId();
} else if (ETCD == nodeType) {
offeringId = kubernetesCluster.getEtcdServiceOfferingId();
}
if (offeringId == null) {
offeringId = kubernetesCluster.getServiceOfferingId();
}
return offeringId;
}
}

View File

@ -75,6 +75,8 @@ import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VmDetailConstants;
import org.apache.logging.log4j.Level;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.CONTROL;
public class KubernetesClusterStartWorker extends KubernetesClusterResourceModifierActionWorker {
private KubernetesSupportedVersion kubernetesClusterVersion;
@ -183,7 +185,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
ResourceUnavailableException, InsufficientCapacityException {
UserVm controlVm = null;
DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
ServiceOffering serviceOffering = getServiceOfferingForNodeTypeOnCluster(CONTROL, kubernetesCluster);
List<Long> networkIds = new ArrayList<Long>();
networkIds.add(kubernetesCluster.getNetworkId());
Pair<String, Map<Long, Network.IpAddresses>> ipAddresses = getKubernetesControlNodeIpAddresses(zone, network, owner);
@ -215,15 +217,16 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
if (StringUtils.isNotBlank(kubernetesCluster.getKeyPair())) {
keypairs.add(kubernetesCluster.getKeyPair());
}
if (zone.isSecurityGroupEnabled()) {
List<Long> securityGroupIds = new ArrayList<>();
securityGroupIds.add(kubernetesCluster.getSecurityGroupId());
controlVm = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, securityGroupIds, owner,
controlVm = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, controlNodeTemplate, networkIds, securityGroupIds, owner,
hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST,base64UserData, null, null, keypairs,
requestedIps, addrs, null, null, null, customParameterMap, null, null, null,
null, true, null, UserVmManager.CKS_NODE);
} else {
controlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner,
controlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, controlNodeTemplate, networkIds, owner,
hostName, hostName, null, null, null,
Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, null, null, keypairs,
requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null);
@ -263,7 +266,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
ResourceUnavailableException, InsufficientCapacityException {
UserVm additionalControlVm = null;
DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId());
ServiceOffering serviceOffering = getServiceOfferingForNodeTypeOnCluster(CONTROL, kubernetesCluster);
List<Long> networkIds = new ArrayList<Long>();
networkIds.add(kubernetesCluster.getNetworkId());
Network.IpAddresses addrs = new Network.IpAddresses(null, null);
@ -289,15 +292,16 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
if (StringUtils.isNotBlank(kubernetesCluster.getKeyPair())) {
keypairs.add(kubernetesCluster.getKeyPair());
}
if (zone.isSecurityGroupEnabled()) {
List<Long> securityGroupIds = new ArrayList<>();
securityGroupIds.add(kubernetesCluster.getSecurityGroupId());
additionalControlVm = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, securityGroupIds, owner,
additionalControlVm = userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, controlNodeTemplate, networkIds, securityGroupIds, owner,
hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST,base64UserData, null, null, keypairs,
null, addrs, null, null, null, customParameterMap, null, null, null,
null, true, null, UserVmManager.CKS_NODE);
} else {
additionalControlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner,
additionalControlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, controlNodeTemplate, networkIds, owner,
hostName, hostName, null, null, null,
Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, null, null, keypairs,
null, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE, null);
@ -493,8 +497,10 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster : %s as no public IP found for the cluster" , kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed);
}
// Allow account creating the kubernetes cluster to access systemVM template
LaunchPermissionVO launchPermission = new LaunchPermissionVO(clusterTemplate.getId(), owner.getId());
launchPermissionDao.persist(launchPermission);
if (isDefaultTemplateUsed()) {
LaunchPermissionVO launchPermission = new LaunchPermissionVO(kubernetesCluster.getTemplateId(), owner.getId());
launchPermissionDao.persist(launchPermission);
}
List<UserVm> clusterVMs = new ArrayList<>();
UserVm k8sControlVM = null;
@ -568,6 +574,8 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
return true;
}
public boolean startStoppedKubernetesCluster() throws CloudRuntimeException {
init();
if (logger.isInfoEnabled()) {

View File

@ -16,6 +16,7 @@
// under the License.
package com.cloud.kubernetes.cluster.dao;
import com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType;
import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO;
import com.cloud.utils.db.GenericDao;
@ -30,4 +31,6 @@ public interface KubernetesClusterVmMapDao extends GenericDao<KubernetesClusterV
int removeByClusterIdAndVmIdsIn(long clusterId, List<Long> vmIds);
public int removeByClusterId(long clusterId);
List<KubernetesClusterVmMapVO> listByClusterIdAndVmType(long clusterId, KubernetesClusterNodeType nodeType);
}

View File

@ -18,6 +18,7 @@ package com.cloud.kubernetes.cluster.dao;
import java.util.List;
import com.cloud.kubernetes.cluster.KubernetesClusterHelper;
import org.springframework.stereotype.Component;
import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO;
@ -26,6 +27,9 @@ import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.CONTROL;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.ETCD;
@Component
public class KubernetesClusterVmMapDaoImpl extends GenericDaoBase<KubernetesClusterVmMapVO, Long> implements KubernetesClusterVmMapDao {
@ -37,6 +41,8 @@ public class KubernetesClusterVmMapDaoImpl extends GenericDaoBase<KubernetesClus
clusterIdSearch = createSearchBuilder();
clusterIdSearch.and("clusterId", clusterIdSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
clusterIdSearch.and("vmIdsIN", clusterIdSearch.entity().getVmId(), SearchCriteria.Op.IN);
clusterIdSearch.and("controlNode", clusterIdSearch.entity().isControlNode(), SearchCriteria.Op.EQ);
clusterIdSearch.and("etcdNode", clusterIdSearch.entity().isEtcdNode(), SearchCriteria.Op.EQ);
clusterIdSearch.done();
vmIdSearch = createSearchBuilder();
@ -81,4 +87,21 @@ public class KubernetesClusterVmMapDaoImpl extends GenericDaoBase<KubernetesClus
sc.setParameters("clusterId", clusterId);
return remove(sc);
}
@Override
public List<KubernetesClusterVmMapVO> listByClusterIdAndVmType(long clusterId, KubernetesClusterHelper.KubernetesClusterNodeType nodeType) {
SearchCriteria<KubernetesClusterVmMapVO> sc = clusterIdSearch.create();
sc.setParameters("clusterId", clusterId);
if (CONTROL == nodeType) {
sc.setParameters("controlNode", true);
sc.setParameters("etcdNode", false);
} else if (ETCD == nodeType) {
sc.setParameters("controlNode", false);
sc.setParameters("etcdNode", true);
} else {
sc.setParameters("controlNode", false);
sc.setParameters("etcdNode", false);
}
return listBy(sc);
}
}

View File

@ -17,10 +17,12 @@
package org.apache.cloudstack.api.command.user.kubernetes.cluster;
import java.security.InvalidParameterException;
import java.util.Map;
import javax.inject.Inject;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.kubernetes.cluster.KubernetesClusterHelper;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.acl.SecurityChecker.AccessType;
import org.apache.cloudstack.api.ACL;
@ -60,6 +62,8 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd {
@Inject
public KubernetesClusterService kubernetesClusterService;
@Inject
protected KubernetesClusterHelper kubernetesClusterHelper;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
@ -83,7 +87,23 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd {
@ACL(accessType = AccessType.UseEntry)
@Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class,
description = "the ID of the service offering for the virtual machines in the cluster.")
private Long serviceOfferingId;
protected Long serviceOfferingId;
@ACL(accessType = AccessType.UseEntry)
@Parameter(name = ApiConstants.NODE_TYPE_OFFERING_MAP, type = CommandType.MAP,
description = "(Optional) Node Type to Service Offering ID mapping. If provided, it overrides the serviceofferingid parameter")
protected Map<String, Map<String, String>> serviceOfferingNodeTypeMap;
@ACL(accessType = AccessType.UseEntry)
@Parameter(name = ApiConstants.NODE_TYPE_TEMPLATE_MAP, type = CommandType.MAP,
description = "(Optional) Node Type to Template ID mapping. If provided, it overrides the default template: System VM template")
protected Map<String, Map<String, String>> templateNodeTypeMap;
@ACL(accessType = AccessType.UseEntry)
@Parameter(name = ApiConstants.ETCD_NODES, type = CommandType.LONG,
description = "(Optional) Number of Kubernetes cluster etcd nodes, default is 0." +
"In case the number is greater than 0, etcd nodes are separate from master nodes and are provisioned accordingly")
protected Long etcdNodes;
@ACL(accessType = AccessType.UseEntry)
@Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "an optional account for the" +
@ -204,6 +224,10 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd {
return controlNodes;
}
public long getEtcdNodes() {
return etcdNodes == null ? 0 : etcdNodes;
}
public String getExternalLoadBalancerIpAddress() {
return externalLoadBalancerIpAddress;
}
@ -242,6 +266,14 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd {
return clusterType;
}
public Map<String, Long> getServiceOfferingNodeTypeMap() {
return kubernetesClusterHelper.getServiceOfferingNodeTypeMap(serviceOfferingNodeTypeMap);
}
public Map<String, Long> getTemplateNodeTypeMap() {
return kubernetesClusterHelper.getTemplateNodeTypeMap(templateNodeTypeMap);
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -17,9 +17,11 @@
package org.apache.cloudstack.api.command.user.kubernetes.cluster;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
import com.cloud.kubernetes.cluster.KubernetesClusterHelper;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.acl.SecurityChecker;
import org.apache.cloudstack.api.ACL;
@ -53,6 +55,8 @@ public class ScaleKubernetesClusterCmd extends BaseAsyncCmd {
@Inject
public KubernetesClusterService kubernetesClusterService;
@Inject
protected KubernetesClusterHelper kubernetesClusterHelper;
/////////////////////////////////////////////////////
//////////////// API parameters /////////////////////
@ -67,6 +71,11 @@ public class ScaleKubernetesClusterCmd extends BaseAsyncCmd {
description = "the ID of the service offering for the virtual machines in the cluster.")
private Long serviceOfferingId;
@ACL(accessType = SecurityChecker.AccessType.UseEntry)
@Parameter(name = ApiConstants.NODE_TYPE_OFFERING_MAP, type = CommandType.MAP,
description = "(Optional) Node Type to Service Offering ID mapping. If provided, it overrides the serviceofferingid parameter")
protected Map<String, Map<String, String>> serviceOfferingNodeTypeMap;
@Parameter(name=ApiConstants.SIZE, type = CommandType.LONG,
description = "number of Kubernetes cluster nodes")
private Long clusterSize;
@ -102,6 +111,10 @@ public class ScaleKubernetesClusterCmd extends BaseAsyncCmd {
return serviceOfferingId;
}
public Map<String, Long> getServiceOfferingNodeTypeMap() {
return kubernetesClusterHelper.getServiceOfferingNodeTypeMap(this.serviceOfferingNodeTypeMap);
}
public Long getClusterSize() {
return clusterSize;
}

View File

@ -58,6 +58,34 @@ public class KubernetesClusterResponse extends BaseResponseWithAnnotations imple
@Param(description = "the name of the service offering of the Kubernetes cluster")
private String serviceOfferingName;
@SerializedName(ApiConstants.WORKER_SERVICE_OFFERING_ID)
@Param(description = "the ID of the service offering of the worker nodes on the Kubernetes cluster")
private String workerOfferingId;
@SerializedName(ApiConstants.WORKER_SERVICE_OFFERING_NAME)
@Param(description = "the name of the service offering of the worker nodes on the Kubernetes cluster")
private String workerOfferingName;
@SerializedName(ApiConstants.CONTROL_SERVICE_OFFERING_ID)
@Param(description = "the ID of the service offering of the control nodes on the Kubernetes cluster")
private String controlOfferingId;
@SerializedName(ApiConstants.CONTROL_SERVICE_OFFERING_NAME)
@Param(description = "the name of the service offering of the control nodes on the Kubernetes cluster")
private String controlOfferingName;
@SerializedName(ApiConstants.ETCD_SERVICE_OFFERING_ID)
@Param(description = "the ID of the service offering of the etcd nodes on the Kubernetes cluster")
private String etcdOfferingId;
@SerializedName(ApiConstants.ETCD_SERVICE_OFFERING_NAME)
@Param(description = "the name of the service offering of the etcd nodes on the Kubernetes cluster")
private String etcdOfferingName;
@SerializedName(ApiConstants.ETCD_NODES)
@Param(description = "the number of the etcd nodes on the Kubernetes cluster")
private Long etcdNodes;
@SerializedName(ApiConstants.TEMPLATE_ID)
@Param(description = "the ID of the template of the Kubernetes cluster")
private String templateId;
@ -359,6 +387,62 @@ public class KubernetesClusterResponse extends BaseResponseWithAnnotations imple
this.serviceOfferingName = serviceOfferingName;
}
public String getWorkerOfferingId() {
return workerOfferingId;
}
public void setWorkerOfferingId(String workerOfferingId) {
this.workerOfferingId = workerOfferingId;
}
public String getWorkerOfferingName() {
return workerOfferingName;
}
public void setWorkerOfferingName(String workerOfferingName) {
this.workerOfferingName = workerOfferingName;
}
public String getControlOfferingId() {
return controlOfferingId;
}
public void setControlOfferingId(String controlOfferingId) {
this.controlOfferingId = controlOfferingId;
}
public String getControlOfferingName() {
return controlOfferingName;
}
public void setControlOfferingName(String controlOfferingName) {
this.controlOfferingName = controlOfferingName;
}
public String getEtcdOfferingId() {
return etcdOfferingId;
}
public void setEtcdOfferingId(String etcdOfferingId) {
this.etcdOfferingId = etcdOfferingId;
}
public String getEtcdOfferingName() {
return etcdOfferingName;
}
public void setEtcdOfferingName(String etcdOfferingName) {
this.etcdOfferingName = etcdOfferingName;
}
public Long getEtcdNodes() {
return etcdNodes;
}
public void setEtcdNodes(Long etcdNodes) {
this.etcdNodes = etcdNodes;
}
public void setVirtualMachines(List<UserVmResponse> virtualMachines) {
this.virtualMachines = virtualMachines;
}

View File

@ -0,0 +1,145 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.vm.VmDetailConstants;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.CONTROL;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.ETCD;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.WORKER;
@RunWith(MockitoJUnitRunner.class)
public class KubernetesClusterHelperImplTest {
@Mock
private ServiceOfferingDao serviceOfferingDao;
@Mock
private ServiceOfferingVO workerServiceOffering;
@Mock
private ServiceOfferingVO controlServiceOffering;
@Mock
private ServiceOfferingVO etcdServiceOffering;
private static final String workerNodesOfferingId = UUID.randomUUID().toString();
private static final String controlNodesOfferingId = UUID.randomUUID().toString();
private static final String etcdNodesOfferingId = UUID.randomUUID().toString();
private static final Long workerOfferingId = 1L;
private static final Long controlOfferingId = 2L;
private static final Long etcdOfferingId = 3L;
private final KubernetesClusterHelperImpl helper = new KubernetesClusterHelperImpl();
@Before
public void setUp() {
helper.serviceOfferingDao = serviceOfferingDao;
Mockito.when(serviceOfferingDao.findByUuid(workerNodesOfferingId)).thenReturn(workerServiceOffering);
Mockito.when(serviceOfferingDao.findByUuid(controlNodesOfferingId)).thenReturn(controlServiceOffering);
Mockito.when(serviceOfferingDao.findByUuid(etcdNodesOfferingId)).thenReturn(etcdServiceOffering);
Mockito.when(workerServiceOffering.getId()).thenReturn(workerOfferingId);
Mockito.when(controlServiceOffering.getId()).thenReturn(controlOfferingId);
Mockito.when(etcdServiceOffering.getId()).thenReturn(etcdOfferingId);
}
@Test
public void testIsValidNodeTypeEmptyNodeType() {
Assert.assertFalse(helper.isValidNodeType(null));
}
@Test
public void testIsValidNodeTypeInvalidNodeType() {
String nodeType = "invalidNodeType";
Assert.assertFalse(helper.isValidNodeType(nodeType));
}
@Test
public void testIsValidNodeTypeValidNodeTypeLowercase() {
String nodeType = KubernetesClusterHelper.KubernetesClusterNodeType.WORKER.name().toLowerCase();
Assert.assertTrue(helper.isValidNodeType(nodeType));
}
private Map<String, String> createMapEntry(KubernetesClusterHelper.KubernetesClusterNodeType nodeType,
String nodeTypeOfferingUuid) {
Map<String, String> map = new HashMap<>();
map.put(VmDetailConstants.CKS_NODE_TYPE, nodeType.name().toLowerCase());
map.put(VmDetailConstants.OFFERING, nodeTypeOfferingUuid);
return map;
}
@Test
public void testNodeOfferingMap() {
Map<String, Map<String, String>> serviceOfferingNodeTypeMap = new HashMap<>();
Map<String, String> firstMap = createMapEntry(WORKER, workerNodesOfferingId);
Map<String, String> secondMap = createMapEntry(CONTROL, controlNodesOfferingId);
serviceOfferingNodeTypeMap.put("map1", firstMap);
serviceOfferingNodeTypeMap.put("map2", secondMap);
Map<String, Long> map = helper.getServiceOfferingNodeTypeMap(serviceOfferingNodeTypeMap);
Assert.assertNotNull(map);
Assert.assertEquals(2, map.size());
Assert.assertTrue(map.containsKey(WORKER.name()) && map.containsKey(CONTROL.name()));
Assert.assertEquals(workerOfferingId, map.get(WORKER.name()));
Assert.assertEquals(controlOfferingId, map.get(CONTROL.name()));
}
@Test
public void testNodeOfferingMapNullMap() {
Map<String, Long> map = helper.getServiceOfferingNodeTypeMap(null);
Assert.assertTrue(map.isEmpty());
}
@Test
public void testNodeOfferingMapEtcdNodes() {
Map<String, Map<String, String>> serviceOfferingNodeTypeMap = new HashMap<>();
Map<String, String> firstMap = createMapEntry(ETCD, etcdNodesOfferingId);
serviceOfferingNodeTypeMap.put("map1", firstMap);
Map<String, Long> map = helper.getServiceOfferingNodeTypeMap(serviceOfferingNodeTypeMap);
Assert.assertNotNull(map);
Assert.assertEquals(1, map.size());
Assert.assertTrue(map.containsKey(ETCD.name()));
Assert.assertEquals(etcdOfferingId, map.get(ETCD.name()));
}
@Test(expected = InvalidParameterValueException.class)
public void testCheckNodeTypeOfferingEntryCompletenessInvalidParameters() {
helper.checkNodeTypeOfferingEntryCompleteness(WORKER.name(), null);
}
@Test(expected = InvalidParameterValueException.class)
public void testCheckNodeTypeOfferingEntryValuesInvalidNodeType() {
String invalidNodeType = "invalidNodeTypeName";
helper.checkNodeTypeOfferingEntryValues(invalidNodeType, workerServiceOffering, workerNodesOfferingId);
}
@Test(expected = InvalidParameterValueException.class)
public void testCheckNodeTypeOfferingEntryValuesEmptyOffering() {
String nodeType = WORKER.name();
helper.checkNodeTypeOfferingEntryValues(nodeType, null, workerNodesOfferingId);
}
}

View File

@ -27,16 +27,21 @@ import com.cloud.exception.PermissionDeniedException;
import com.cloud.kubernetes.cluster.actionworkers.KubernetesClusterActionWorker;
import com.cloud.kubernetes.cluster.dao.KubernetesClusterDao;
import com.cloud.kubernetes.cluster.dao.KubernetesClusterVmMapDao;
import com.cloud.kubernetes.version.KubernetesSupportedVersion;
import com.cloud.network.Network;
import com.cloud.network.dao.FirewallRulesDao;
import com.cloud.network.rules.FirewallRule;
import com.cloud.network.rules.FirewallRuleVO;
import com.cloud.network.vpc.NetworkACL;
import com.cloud.offering.ServiceOffering;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.user.Account;
import com.cloud.user.AccountManager;
import com.cloud.user.User;
import com.cloud.utils.Pair;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.dao.VMInstanceDao;
import org.apache.cloudstack.api.BaseCmd;
@ -44,6 +49,7 @@ import org.apache.cloudstack.api.command.user.kubernetes.cluster.AddVirtualMachi
import org.apache.cloudstack.api.command.user.kubernetes.cluster.RemoveVirtualMachinesFromKubernetesClusterCmd;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.commons.collections.MapUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@ -59,7 +65,14 @@ import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.CONTROL;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.DEFAULT;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.ETCD;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.WORKER;
@RunWith(MockitoJUnitRunner.class)
public class KubernetesClusterManagerImplTest {
@ -85,6 +98,9 @@ public class KubernetesClusterManagerImplTest {
@Mock
private AccountManager accountManager;
@Mock
private ServiceOfferingDao serviceOfferingDao;
@Spy
@InjectMocks
KubernetesClusterManagerImpl kubernetesClusterManager;
@ -292,4 +308,117 @@ public class KubernetesClusterManagerImplTest {
Mockito.when(kubernetesClusterDao.findById(Mockito.anyLong())).thenReturn(cluster);
Assert.assertTrue(kubernetesClusterManager.removeVmsFromCluster(cmd).size() > 0);
}
@Test
public void testValidateServiceOfferingNodeType() {
Map<String, Long> map = new HashMap<>();
map.put(WORKER.name(), 1L);
map.put(CONTROL.name(), 2L);
ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class);
Mockito.when(serviceOfferingDao.findById(1L)).thenReturn(serviceOffering);
Mockito.when(serviceOffering.isDynamic()).thenReturn(false);
Mockito.when(serviceOffering.getCpu()).thenReturn(2);
Mockito.when(serviceOffering.getRamSize()).thenReturn(2048);
KubernetesSupportedVersion version = Mockito.mock(KubernetesSupportedVersion.class);
Mockito.when(version.getMinimumCpu()).thenReturn(2);
Mockito.when(version.getMinimumRamSize()).thenReturn(2048);
kubernetesClusterManager.validateServiceOfferingForNode(map, 1L, WORKER.name(), null, version);
Mockito.verify(kubernetesClusterManager).validateServiceOffering(serviceOffering, version);
}
@Test(expected = InvalidParameterValueException.class)
public void testValidateServiceOfferingNodeTypeInvalidOffering() {
Map<String, Long> map = new HashMap<>();
map.put(WORKER.name(), 1L);
map.put(CONTROL.name(), 2L);
ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class);
Mockito.when(serviceOfferingDao.findById(1L)).thenReturn(serviceOffering);
Mockito.when(serviceOffering.isDynamic()).thenReturn(true);
kubernetesClusterManager.validateServiceOfferingForNode(map, 1L, WORKER.name(), null, null);
}
@Test
public void testClusterCapacity() {
long workerOfferingId = 1L;
long controlOfferingId = 2L;
long workerCount = 2L;
long controlCount = 2L;
int workerOfferingCpus = 4;
int workerOfferingMemory = 4096;
int controlOfferingCpus = 2;
int controlOfferingMemory = 2048;
Map<String, Long> map = Map.of(WORKER.name(), workerOfferingId, CONTROL.name(), controlOfferingId);
Map<String, Long> nodeCount = Map.of(WORKER.name(), workerCount, CONTROL.name(), controlCount);
ServiceOfferingVO workerOffering = Mockito.mock(ServiceOfferingVO.class);
Mockito.when(serviceOfferingDao.findById(workerOfferingId)).thenReturn(workerOffering);
ServiceOfferingVO controlOffering = Mockito.mock(ServiceOfferingVO.class);
Mockito.when(serviceOfferingDao.findById(controlOfferingId)).thenReturn(controlOffering);
Mockito.when(workerOffering.getCpu()).thenReturn(workerOfferingCpus);
Mockito.when(workerOffering.getRamSize()).thenReturn(workerOfferingMemory);
Mockito.when(controlOffering.getCpu()).thenReturn(controlOfferingCpus);
Mockito.when(controlOffering.getRamSize()).thenReturn(controlOfferingMemory);
Pair<Long, Long> pair = kubernetesClusterManager.calculateClusterCapacity(map, nodeCount, 1L);
Long expectedCpu = (workerOfferingCpus * workerCount) + (controlOfferingCpus * controlCount);
Long expectedMemory = (workerOfferingMemory * workerCount) + (controlOfferingMemory * controlCount);
Assert.assertEquals(expectedCpu, pair.first());
Assert.assertEquals(expectedMemory, pair.second());
}
@Test
public void testIsAnyNodeOfferingEmptyNullMap() {
Assert.assertFalse(kubernetesClusterManager.isAnyNodeOfferingEmpty(null));
}
@Test
public void testIsAnyNodeOfferingEmptyNullValue() {
Map<String, Long> map = new HashMap<>();
map.put(WORKER.name(), 1L);
map.put(CONTROL.name(), null);
map.put(ETCD.name(), 2L);
Assert.assertTrue(kubernetesClusterManager.isAnyNodeOfferingEmpty(map));
}
@Test
public void testIsAnyNodeOfferingEmpty() {
Map<String, Long> map = new HashMap<>();
map.put(WORKER.name(), 1L);
map.put(CONTROL.name(), 2L);
Assert.assertFalse(kubernetesClusterManager.isAnyNodeOfferingEmpty(map));
}
@Test
public void testCreateNodeTypeToServiceOfferingMapNullMap() {
KubernetesClusterVO clusterVO = Mockito.mock(KubernetesClusterVO.class);
Mockito.when(clusterVO.getServiceOfferingId()).thenReturn(1L);
ServiceOfferingVO offering = Mockito.mock(ServiceOfferingVO.class);
Mockito.when(serviceOfferingDao.findById(1L)).thenReturn(offering);
Map<String, ServiceOffering> mapping = kubernetesClusterManager.createNodeTypeToServiceOfferingMap(new HashMap<>(), null, clusterVO);
Assert.assertFalse(MapUtils.isEmpty(mapping));
Assert.assertTrue(mapping.containsKey(DEFAULT.name()));
Assert.assertEquals(offering, mapping.get(DEFAULT.name()));
}
@Test
public void testCreateNodeTypeToServiceOfferingMap() {
Map<String, Long> idsMap = new HashMap<>();
long workerOfferingId = 1L;
long controlOfferingId = 2L;
idsMap.put(WORKER.name(), workerOfferingId);
idsMap.put(CONTROL.name(), controlOfferingId);
ServiceOfferingVO workerOffering = Mockito.mock(ServiceOfferingVO.class);
Mockito.when(serviceOfferingDao.findById(workerOfferingId)).thenReturn(workerOffering);
ServiceOfferingVO controlOffering = Mockito.mock(ServiceOfferingVO.class);
Mockito.when(serviceOfferingDao.findById(controlOfferingId)).thenReturn(controlOffering);
Map<String, ServiceOffering> mapping = kubernetesClusterManager.createNodeTypeToServiceOfferingMap(idsMap, null, null);
Assert.assertEquals(2, mapping.size());
Assert.assertTrue(mapping.containsKey(WORKER.name()) && mapping.containsKey(CONTROL.name()));
Assert.assertEquals(workerOffering, mapping.get(WORKER.name()));
Assert.assertEquals(controlOffering, mapping.get(CONTROL.name()));
}
}

View File

@ -0,0 +1,130 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.kubernetes.cluster.actionworkers;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
import com.cloud.offering.ServiceOffering;
import com.cloud.service.ServiceOfferingVO;
import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.utils.Pair;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import java.util.Map;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.DEFAULT;
import static com.cloud.kubernetes.cluster.KubernetesClusterHelper.KubernetesClusterNodeType.CONTROL;
@RunWith(MockitoJUnitRunner.class)
public class KubernetesClusterScaleWorkerTest {
@Mock
private KubernetesCluster kubernetesCluster;
@Mock
private KubernetesClusterManagerImpl clusterManager;
@Mock
private ServiceOfferingDao serviceOfferingDao;
private KubernetesClusterScaleWorker worker;
private static final Long defaultOfferingId = 1L;
@Before
public void setUp() {
worker = new KubernetesClusterScaleWorker(kubernetesCluster, clusterManager);
worker.serviceOfferingDao = serviceOfferingDao;
}
@Test
public void testIsServiceOfferingScalingNeededForNodeTypeAllNodesSameOffering() {
ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class);
Map<String, ServiceOffering> map = Map.of(DEFAULT.name(), serviceOffering);
Mockito.when(serviceOfferingDao.findById(defaultOfferingId)).thenReturn(serviceOffering);
Assert.assertFalse(worker.isServiceOfferingScalingNeededForNodeType(DEFAULT, map, kubernetesCluster, defaultOfferingId));
}
@Test
public void testIsServiceOfferingScalingNeededForNodeTypeAllNodesDifferentOffering() {
ServiceOfferingVO serviceOffering = Mockito.mock(ServiceOfferingVO.class);
Mockito.when(serviceOffering.getId()).thenReturn(defaultOfferingId);
ServiceOfferingVO newOffering = Mockito.mock(ServiceOfferingVO.class);
Mockito.when(newOffering.getId()).thenReturn(4L);
Map<String, ServiceOffering> map = Map.of(DEFAULT.name(), newOffering);
Mockito.when(serviceOfferingDao.findById(defaultOfferingId)).thenReturn(serviceOffering);
Assert.assertTrue(worker.isServiceOfferingScalingNeededForNodeType(DEFAULT, map, kubernetesCluster, defaultOfferingId));
}
@Test
public void testCalculateNewClusterCountAndCapacityAllNodesScaleSize() {
long controlNodes = 3L;
long etcdNodes = 2L;
Mockito.when(kubernetesCluster.getControlNodeCount()).thenReturn(controlNodes);
Mockito.when(kubernetesCluster.getEtcdNodeCount()).thenReturn(etcdNodes);
ServiceOffering newOffering = Mockito.mock(ServiceOffering.class);
int newCores = 4;
int newMemory = 4096;
Mockito.when(newOffering.getCpu()).thenReturn(newCores);
Mockito.when(newOffering.getRamSize()).thenReturn(newMemory);
long newWorkerSize = 4L;
Pair<Long, Long> newClusterCapacity = worker.calculateNewClusterCountAndCapacity(newWorkerSize, DEFAULT, newOffering);
long expectedCores = (newCores * newWorkerSize) + (newCores * controlNodes) + (newCores * etcdNodes);
long expectedMemory = (newMemory * newWorkerSize) + (newMemory * controlNodes) + (newMemory * etcdNodes);
Assert.assertEquals(expectedCores, newClusterCapacity.first().longValue());
Assert.assertEquals(expectedMemory, newClusterCapacity.second().longValue());
}
@Test
public void testCalculateNewClusterCountAndCapacityNodeTypeScaleControlOffering() {
long controlNodes = 2L;
Mockito.when(kubernetesCluster.getControlNodeCount()).thenReturn(controlNodes);
ServiceOfferingVO existingOffering = Mockito.mock(ServiceOfferingVO.class);
int existingCores = 2;
int existingMemory = 2048;
Mockito.when(existingOffering.getCpu()).thenReturn(existingCores);
Mockito.when(existingOffering.getRamSize()).thenReturn(existingMemory);
int remainingClusterCpu = 8;
int remainingClusterMemory = 12288;
Mockito.when(kubernetesCluster.getCores()).thenReturn(remainingClusterCpu + (controlNodes * existingCores));
Mockito.when(kubernetesCluster.getMemory()).thenReturn(remainingClusterMemory + (controlNodes * existingMemory));
Mockito.when(kubernetesCluster.getControlServiceOfferingId()).thenReturn(1L);
Mockito.when(serviceOfferingDao.findById(1L)).thenReturn(existingOffering);
ServiceOfferingVO newOffering = Mockito.mock(ServiceOfferingVO.class);
int newCores = 4;
int newMemory = 2048;
Mockito.when(newOffering.getCpu()).thenReturn(newCores);
Mockito.when(newOffering.getRamSize()).thenReturn(newMemory);
Pair<Long, Long> newClusterCapacity = worker.calculateNewClusterCountAndCapacity(null, CONTROL, newOffering);
long expectedCores = remainingClusterCpu + (controlNodes * newCores);
long expectedMemory = remainingClusterMemory + (controlNodes * newMemory);
Assert.assertEquals(expectedCores, newClusterCapacity.first().longValue());
Assert.assertEquals(expectedMemory, newClusterCapacity.second().longValue());
}
}

View File

@ -4385,12 +4385,13 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
}
}
Boolean isVnf = cmd.getVnf();
Boolean forCks = cmd.getForCks();
return searchForTemplatesInternal(id, cmd.getTemplateName(), cmd.getKeyword(), templateFilter, false,
null, cmd.getPageSizeVal(), cmd.getStartIndex(), cmd.getZoneId(), cmd.getStoragePoolId(),
cmd.getImageStoreId(), hypervisorType, showDomr, cmd.listInReadyState(), permittedAccounts, caller,
listProjectResourcesCriteria, tags, showRemovedTmpl, cmd.getIds(), parentTemplateId, cmd.getShowUnique(),
templateType, isVnf);
templateType, isVnf, forCks);
}
private Pair<List<TemplateJoinVO>, Integer> searchForTemplatesInternal(Long templateId, String name, String keyword,
@ -4399,7 +4400,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
boolean showDomr, boolean onlyReady, List<Account> permittedAccounts, Account caller,
ListProjectResourcesCriteria listProjectResourcesCriteria, Map<String, String> tags,
boolean showRemovedTmpl, List<Long> ids, Long parentTemplateId, Boolean showUnique, String templateType,
Boolean isVnf) {
Boolean isVnf, Boolean forCks) {
// check if zone is configured, if not, just return empty list
List<HypervisorType> hypers = null;
@ -4581,7 +4582,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
applyPublicTemplateSharingRestrictions(sc, caller);
return templateChecks(isIso, hypers, tags, name, keyword, hyperType, onlyReady, bootable, zoneId, showDomr, caller,
showRemovedTmpl, parentTemplateId, showUnique, templateType, isVnf, searchFilter, sc);
showRemovedTmpl, parentTemplateId, showUnique, templateType, isVnf, forCks, searchFilter, sc);
}
/**
@ -4635,7 +4636,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
private Pair<List<TemplateJoinVO>, Integer> templateChecks(boolean isIso, List<HypervisorType> hypers, Map<String, String> tags, String name, String keyword,
HypervisorType hyperType, boolean onlyReady, Boolean bootable, Long zoneId, boolean showDomr, Account caller,
boolean showRemovedTmpl, Long parentTemplateId, Boolean showUnique, String templateType, Boolean isVnf,
boolean showRemovedTmpl, Long parentTemplateId, Boolean showUnique, String templateType, Boolean isVnf, Boolean forCks,
Filter searchFilter, SearchCriteria<TemplateJoinVO> sc) {
if (!isIso) {
// add hypervisor criteria for template case
@ -4729,6 +4730,10 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
}
}
if (forCks != null) {
sc.addAnd("forCks", SearchCriteria.Op.EQ, forCks);
}
// don't return removed template, this should not be needed since we
// changed annotation for removed field in TemplateJoinVO.
// sc.addAnd("removed", SearchCriteria.Op.NULL);
@ -4822,7 +4827,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
return searchForTemplatesInternal(cmd.getId(), cmd.getIsoName(), cmd.getKeyword(), isoFilter, true, cmd.isBootable(),
cmd.getPageSizeVal(), cmd.getStartIndex(), cmd.getZoneId(), cmd.getStoragePoolId(), cmd.getImageStoreId(),
hypervisorType, true, cmd.listInReadyState(), permittedAccounts, caller, listProjectResourcesCriteria,
tags, showRemovedISO, null, null, cmd.getShowUnique(), null, null);
tags, showRemovedISO, null, null, cmd.getShowUnique(), null, null, null);
}
@Override

View File

@ -317,6 +317,7 @@ public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation<Templa
templateResponse.setDetails(details);
setDeployAsIsDetails(template, templateResponse);
templateResponse.setForCks(template.isForCks());
}
// update tag information

View File

@ -238,6 +238,9 @@ public class TemplateJoinVO extends BaseViewWithTagInformationVO implements Cont
@Column(name = "deploy_as_is")
private boolean deployAsIs;
@Column(name = "for_cks")
private boolean forCks;
@Column(name = "user_data_id")
private Long userDataId;
@ -516,6 +519,10 @@ public class TemplateJoinVO extends BaseViewWithTagInformationVO implements Cont
return deployAsIs;
}
public boolean isForCks() {
return forCks;
}
public Object getParentTemplateId() {
return parentTemplateId;
}

View File

@ -53,6 +53,7 @@ public class TemplateProfile {
TemplateType templateType;
Boolean directDownload;
Boolean deployAsIs;
Boolean forCks;
Long size;
public TemplateProfile(Long templateId, Long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHvm, String url,
@ -337,4 +338,12 @@ public class TemplateProfile {
public boolean isDeployAsIs() {
return this.deployAsIs;
}
public Boolean isForCks() {
return forCks;
}
public void setForCks(Boolean forCks) {
this.forCks = forCks;
}
}

View File

@ -29,10 +29,10 @@ public class TemplateUploadParams extends UploadParamsBase {
Long zoneId, Hypervisor.HypervisorType hypervisorType, String chksum,
String templateTag, long templateOwnerId,
Map details, Boolean sshkeyEnabled,
Boolean isDynamicallyScalable, Boolean isRoutingType, boolean deployAsIs) {
Boolean isDynamicallyScalable, Boolean isRoutingType, boolean deployAsIs, boolean forCks) {
super(userId, name, displayText, bits, passwordEnabled, requiresHVM, isPublic, featured, isExtractable,
format, guestOSId, zoneId, hypervisorType, chksum, templateTag, templateOwnerId, details,
sshkeyEnabled, isDynamicallyScalable, isRoutingType, deployAsIs);
sshkeyEnabled, isDynamicallyScalable, isRoutingType, deployAsIs, forCks);
setBootable(true);
}
}

View File

@ -45,6 +45,7 @@ public abstract class UploadParamsBase implements UploadParams {
private boolean isDynamicallyScalable;
private boolean isRoutingType;
private boolean deployAsIs;
private boolean forCks;
UploadParamsBase(long userId, String name, String displayText,
Integer bits, boolean passwordEnabled, boolean requiresHVM,
@ -53,7 +54,7 @@ public abstract class UploadParamsBase implements UploadParams {
Long zoneId, Hypervisor.HypervisorType hypervisorType, String checksum,
String templateTag, long templateOwnerId,
Map details, boolean sshkeyEnabled,
boolean isDynamicallyScalable, boolean isRoutingType, boolean deployAsIs) {
boolean isDynamicallyScalable, boolean isRoutingType, boolean deployAsIs, boolean forCks) {
this.userId = userId;
this.name = name;
this.displayText = displayText;
@ -229,6 +230,10 @@ public abstract class UploadParamsBase implements UploadParams {
this.bootable = bootable;
}
void setForCks(boolean forCks) {
this.forCks = forCks;
}
void setBits(Integer bits) {
this.bits = bits;
}

View File

@ -244,6 +244,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
Long templateSize = performDirectDownloadUrlValidation(cmd.getFormat(),
hypervisor, url, cmd.getZoneIds(), followRedirects);
profile.setSize(templateSize);
profile.setForCks(cmd.isForCks());
}
profile.setUrl(url);
// Check that the resource limit for secondary storage won't be exceeded

View File

@ -78,6 +78,6 @@ public interface TemplateAdapter extends Adapter {
TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url, Boolean isPublic,
Boolean featured, Boolean isExtractable, String format, Long guestOSId, List<Long> zoneId, HypervisorType hypervisorType, String chksum, Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshKeyEnabled, String imageStoreUuid, Boolean isDynamicallyScalable,
TemplateType templateType, boolean directDownload, boolean deployAsIs) throws ResourceAllocationException;
TemplateType templateType, boolean directDownload, boolean deployAsIs, boolean forCks) throws ResourceAllocationException;
}

View File

@ -134,14 +134,14 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat
Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List<Long> zoneId, HypervisorType hypervisorType, String accountName,
Long domainId, String chksum, Boolean bootable, Map details, boolean directDownload, boolean deployAsIs) throws ResourceAllocationException {
return prepare(isIso, userId, name, displayText, bits, passwordEnabled, requiresHVM, url, isPublic, featured, isExtractable, format, guestOSId, zoneId,
hypervisorType, chksum, bootable, null, null, details, false, null, false, TemplateType.USER, directDownload, deployAsIs);
hypervisorType, chksum, bootable, null, null, details, false, null, false, TemplateType.USER, directDownload, deployAsIs, false);
}
@Override
public TemplateProfile prepare(boolean isIso, long userId, String name, String displayText, Integer bits, Boolean passwordEnabled, Boolean requiresHVM, String url,
Boolean isPublic, Boolean featured, Boolean isExtractable, String format, Long guestOSId, List<Long> zoneIdList, HypervisorType hypervisorType, String chksum,
Boolean bootable, String templateTag, Account templateOwner, Map details, Boolean sshkeyEnabled, String imageStoreUuid, Boolean isDynamicallyScalable,
TemplateType templateType, boolean directDownload, boolean deployAsIs) throws ResourceAllocationException {
TemplateType templateType, boolean directDownload, boolean deployAsIs, boolean forCks) throws ResourceAllocationException {
//Long accountId = null;
// parameters verification
@ -262,9 +262,11 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat
Long id = _tmpltDao.getNextInSequence(Long.class, "id");
CallContext.current().setEventDetails("Id: " + id + " name: " + name);
return new TemplateProfile(id, userId, name, displayText, bits, passwordEnabled, requiresHVM, url, isPublic, featured, isExtractable, imgfmt, guestOSId, zoneIdList,
TemplateProfile profile = new TemplateProfile(id, userId, name, displayText, bits, passwordEnabled, requiresHVM, url, isPublic, featured, isExtractable, imgfmt, guestOSId, zoneIdList,
hypervisorType, templateOwner.getAccountName(), templateOwner.getDomainId(), templateOwner.getAccountId(), chksum, bootable, templateTag, details,
sshkeyEnabled, null, isDynamicallyScalable, templateType, directDownload, deployAsIs);
profile.setForCks(forCks);
return profile;
}
@ -309,7 +311,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat
return prepare(false, CallContext.current().getCallingUserId(), cmd.getTemplateName(), cmd.getDisplayText(), cmd.getBits(), cmd.isPasswordEnabled(), cmd.getRequiresHvm(),
cmd.getUrl(), cmd.isPublic(), cmd.isFeatured(), cmd.isExtractable(), cmd.getFormat(), cmd.getOsTypeId(), zoneId, hypervisorType, cmd.getChecksum(), true,
cmd.getTemplateTag(), owner, details, cmd.isSshKeyEnabled(), null, cmd.isDynamicallyScalable(), templateType,
cmd.isDirectDownload(), cmd.isDeployAsIs());
cmd.isDirectDownload(), cmd.isDeployAsIs(), cmd.isForCks());
}
@ -342,7 +344,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat
params.isExtractable(), params.getFormat(), params.getGuestOSId(), zoneList,
params.getHypervisorType(), params.getChecksum(), params.isBootable(), params.getTemplateTag(), owner,
params.getDetails(), params.isSshKeyEnabled(), params.getImageStoreUuid(),
params.isDynamicallyScalable(), params.isRoutingType() ? TemplateType.ROUTING : TemplateType.USER, params.isDirectDownload(), params.isDeployAsIs());
params.isDynamicallyScalable(), params.isRoutingType() ? TemplateType.ROUTING : TemplateType.USER, params.isDirectDownload(), params.isDeployAsIs(), false);
}
private Long getDefaultDeployAsIsGuestOsId() {
@ -363,7 +365,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat
BooleanUtils.toBoolean(cmd.isFeatured()), BooleanUtils.toBoolean(cmd.isExtractable()), cmd.getFormat(), osTypeId,
cmd.getZoneId(), HypervisorType.getType(cmd.getHypervisor()), cmd.getChecksum(),
cmd.getTemplateTag(), cmd.getEntityOwnerId(), cmd.getDetails(), BooleanUtils.toBoolean(cmd.isSshKeyEnabled()),
BooleanUtils.toBoolean(cmd.isDynamicallyScalable()), BooleanUtils.toBoolean(cmd.isRoutingType()), cmd.isDeployAsIs());
BooleanUtils.toBoolean(cmd.isDynamicallyScalable()), BooleanUtils.toBoolean(cmd.isRoutingType()), cmd.isDeployAsIs(), cmd.isForCks());
return prepareUploadParamsInternal(params);
}
@ -394,7 +396,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat
return prepare(true, CallContext.current().getCallingUserId(), cmd.getIsoName(), cmd.getDisplayText(), 64, cmd.isPasswordEnabled(), true, cmd.getUrl(), cmd.isPublic(),
cmd.isFeatured(), cmd.isExtractable(), ImageFormat.ISO.toString(), cmd.getOsTypeId(), zoneList, HypervisorType.None, cmd.getChecksum(), cmd.isBootable(), null,
owner, null, false, cmd.getImageStoreUuid(), cmd.isDynamicallyScalable(), TemplateType.USER, cmd.isDirectDownload(), false);
owner, null, false, cmd.getImageStoreUuid(), cmd.isDynamicallyScalable(), TemplateType.USER, cmd.isDirectDownload(), false, false);
}
protected VMTemplateVO persistTemplate(TemplateProfile profile, VirtualMachineTemplate.State initialState) {
@ -405,6 +407,7 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat
profile.getDisplayText(), profile.isPasswordEnabled(), profile.getGuestOsId(), profile.isBootable(), profile.getHypervisorType(),
profile.getTemplateTag(), profile.getDetails(), profile.isSshKeyEnabled(), profile.IsDynamicallyScalable(), profile.isDirectDownload(), profile.isDeployAsIs());
template.setState(initialState);
template.setForCks(profile.isForCks());
if (profile.isDirectDownload()) {
template.setSize(profile.getSize());

View File

@ -110,7 +110,11 @@ class CsDhcp(CsDataBag):
if gn.get_dns() and device:
sline = "dhcp-option=tag:interface-%s-%s,6" % (device, idx)
dns_list = [x for x in gn.get_dns() if x]
if self.config.is_dhcp() and not self.config.use_extdns():
if (self.config.is_vpc() or self.config.is_router()) and ('is_vr_guest_gateway' in gn.data and gn.data['is_vr_guest_gateway']):
if gateway in dns_list:
dns_list.remove(gateway)
dns_list.insert(0, ip)
elif self.config.is_dhcp() and not self.config.use_extdns():
guest_ip = self.config.address().get_guest_ip()
if guest_ip and guest_ip in dns_list and ip not in dns_list:
# Replace the default guest IP in VR with the ip in additional IP ranges, if shared network has multiple IP ranges.
@ -142,9 +146,9 @@ class CsDhcp(CsDataBag):
else:
listen_address.append(ip)
# Add localized "data-server" records in /etc/hosts for VPC routers
if self.config.is_vpc() or self.config.is_router():
if (self.config.is_vpc() or self.config.is_router()) and ('is_vr_guest_gateway' not in gn.data or (not gn.data['is_vr_guest_gateway'])):
self.add_host(gateway, "%s data-server" % CsHelper.get_hostname())
elif self.config.is_dhcp():
elif self.config.is_dhcp() or (self.config.is_vpc() or self.config.is_router() and gn.data['is_vr_guest_gateway']) :
self.add_host(ip, "%s data-server" % CsHelper.get_hostname())
idx += 1

View File

@ -0,0 +1,54 @@
{
"_license": "Apache License 2.0",
"builders": [
{
"accelerator": "kvm",
"boot_command": [
"c<wait>linux /casper/vmlinuz --- autoinstall ds='nocloud-net;seedfrom=http://{{ .HTTPIP }}:{{ .HTTPPort }}/'",
"<enter><wait>",
"initrd /casper/initrd",
"<enter><wait><wait><wait>",
"boot",
"<enter>"
],
"vm_name": "cks-ubuntu-2204",
"iso_checksum": "sha256:5e38b55d57d94ff029719342357325ed3bda38fa80054f9330dc789cd2d43931",
"iso_url": "https://old-releases.ubuntu.com/releases/jammy/ubuntu-22.04.2-live-server-amd64.iso",
"shutdown_command": "sudo shutdown -P now",
"net_device": "virtio-net",
"output_directory": "../dist",
"format": "qcow2",
"headless": true,
"http_directory": "http",
"ssh_password": "cloud",
"ssh_timeout": "30m",
"ssh_username": "cloud",
"type": "qemu",
"disk_interface": "virtio",
"disk_size": "5000M",
"qemuargs": [
[
"-m",
"2048M"
],
[
"-smp",
"1"
]
]
}
],
"description": "CloudStack SystemVM template",
"provisioners": [
{
"execute_command": "echo 'cloud' | sudo -u root -S bash {{.Path}}",
"scripts": [
"scripts/apt_upgrade.sh",
"scripts/configure_networking.sh",
"scripts/configure-cloud-init.sh",
"scripts/cleanup.sh"
],
"type": "shell"
}
]
}

View File

@ -0,0 +1,16 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

View File

@ -0,0 +1,103 @@
#cloud-config
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
autoinstall:
version: 1
# Disable ssh server during installation, otherwise packer tries to connect and exceed max attempts
early-commands:
- systemctl stop ssh
# Configure the locale
locale: en_US
keyboard:
layout: us
refresh-installer:
update: yes
channel: stable
# Create a single-partition with no swap space. Kubernetes
# really dislikes the idea of anyone else managing memory.
# For more information on how partitioning is configured,
# please refer to https://curtin.readthedocs.io/en/latest/topics/storage.html.
storage:
swap:
size: 0
grub:
replace_linux_default: false
config:
- type: disk
id: disk-0
size: smallest
grub_device: true
preserve: false
ptable: msdos
wipe: superblock
- type: partition
id: partition-0
device: disk-0
size: -1
number: 1
preserve: false
flag: boot
- type: format
id: format-0
volume: partition-0
fstype: ext4
preserve: false
- type: mount
id: mount-0
device: format-0
path: /
updates: 'all'
ssh:
install-server: true
allow-pw: true
# Customize the list of packages installed.
packages:
- open-vm-tools
- openssh-server
- cloud-init
- wget
- tasksel
# Create the default user.
# Ensures the "cloud" user doesn't require a password to use sudo.
user-data:
disable_root: false
timezone: UTC
users:
- name: cloud
# openssl passwd -6 -stdin <<< cloud
passwd: $6$pAFEBhaCDzN4ZmrO$kMmUuxhPMx447lJ8Mtas8n6uqkojh94nQ7I8poI6Kl4vRGeZKE57utub1cudS1fGyG8HUxK9YHIygd7vCpRFN0
groups: [adm, cdrom, dip, plugdev, lxd, sudo]
lock-passwd: false
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
# This command runs after all other steps; it:
# 1. Disables swapfiles
# 2. Removes the existing swapfile
# 3. Removes the swapfile entry from /etc/fstab
# 4. Removes snapd, https://bugs.launchpad.net/subiquity/+bug/1946609
# 5. Cleans up any packages that are no longer required
# 6. Removes the cached list of packages
late-commands:
- curtin in-target --target=/target -- swapoff -a
- curtin in-target --target=/target -- rm -f /swap.img
- curtin in-target --target=/target -- sed -ri '/\sswap\s/s/^#?/#/' /etc/fstab
- chroot /target apt-get purge -y snapd
- curtin in-target --target=/target -- apt-get purge --auto-remove -y
- curtin in-target --target=/target -- apt-get clean
- curtin in-target --target=/target -- rm -rf /var/lib/apt/lists/*

View File

@ -0,0 +1,37 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e
set -x
function apt_upgrade() {
DEBIAN_FRONTEND=noninteractive
DEBIAN_PRIORITY=critical
rm -fv /root/*.iso
apt-get -q -y update
apt-get -q -y upgrade
apt-get -q -y dist-upgrade
apt-get -y autoremove --purge
apt-get autoclean
apt-get clean
}
return 2>/dev/null || apt_upgrade

View File

@ -0,0 +1,80 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e
function cleanup_apt() {
export DEBIAN_FRONTEND=noninteractive
apt-get -y remove --purge dictionaries-common busybox \
task-english task-ssh-server tasksel tasksel-data laptop-detect wamerican sharutils \
nano util-linux-locales krb5-locales
apt-get -y autoremove --purge
apt-get autoclean
apt-get clean
}
# Removing leftover leases and persistent rules
function cleanup_dhcp() {
rm -f /var/lib/dhcp/*
}
# Make sure Udev doesn't block our network
function cleanup_dev() {
echo "cleaning up udev rules"
rm -f /etc/udev/rules.d/70-persistent-net.rules
rm -rf /dev/.udev/
rm -f /lib/udev/rules.d/75-persistent-net-generator.rules
}
function cleanup_misc() {
# Scripts
rm -fr /home/cloud/cloud_scripts*
rm -f /usr/share/cloud/cloud-scripts.tar
rm -f /root/.rnd
rm -f /var/www/html/index.html
# Logs
rm -f /var/log/*.log
rm -f /var/log/apache2/*
rm -f /var/log/messages
rm -f /var/log/syslog
rm -f /var/log/messages
rm -fr /var/log/apt
rm -fr /var/log/installer
# Docs and data files
rm -fr /var/lib/apt/*
rm -fr /var/cache/apt/*
rm -fr /var/cache/debconf/*old
rm -fr /usr/share/doc
rm -fr /usr/share/man
rm -fr /usr/share/info
rm -fr /usr/share/lintian
rm -fr /usr/share/apache2/icons
find /usr/share/locale -type f | grep -v en_US | xargs rm -fr
find /usr/share/zoneinfo -type f | grep -v UTC | xargs rm -fr
rm -fr /tmp/*
}
function cleanup() {
cleanup_apt
cleanup_dhcp
cleanup_dev
cleanup_misc
}
return 2>/dev/null || cleanup

View File

@ -0,0 +1,52 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
function install_packages() {
apt-get install -y rsyslog logrotate cron net-tools ifupdown cloud-guest-utils conntrack apt-transport-https ca-certificates curl \
gnupg gnupg-agent software-properties-common gnupg lsb-release
apt-get install -y python3-json-pointer python3-jsonschema cloud-init
sudo mkdir -p /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
apt update
apt install containerd.io
systemctl start containerd
systemctl enable containerd
}
function configure_services() {
install_packages
systemctl daemon-reload
cat <<EOF > /etc/cloud/cloud.cfg.d/cloudstack.cfg
datasource_list: ['CloudStack']
datasource:
CloudStack:
max_wait: 120
timeout: 50
EOF
}
configure_services

View File

@ -0,0 +1,73 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e
set -x
HOSTNAME=cksnode
function configure_resolv_conf() {
grep 8.8.8.8 /etc/resolv.conf && grep 8.8.4.4 /etc/resolv.conf && return
cat > /etc/resolv.conf << EOF
nameserver 8.8.8.8
nameserver 8.8.4.4
EOF
}
# Delete entry in /etc/hosts derived from dhcp
function delete_dhcp_ip() {
result=$(grep 127.0.1.1 /etc/hosts || true)
[ "${result}" == "" ] && return
sed -i '/127.0.1.1/d' /etc/hosts
}
function configure_hostname() {
sed -i "s/root@\(.*\)$/root@$HOSTNAME/g" /etc/ssh/ssh_host_*.pub
echo "$HOSTNAME" > /etc/hostname
hostname $HOSTNAME
}
function configure_interfaces() {
cat > /etc/network/interfaces << EOF
source /etc/network/interfaces.d/*
# The loopback network interface
auto lo
iface lo inet loopback
# The primary network interface
auto ens35
iface ens35 inet dhcp
EOF
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
sysctl -p /etc/sysctl.conf
}
function configure_networking() {
configure_interfaces
configure_resolv_conf
delete_dhcp_ip
configure_hostname
}
return 2>/dev/null || configure_networking

View File

@ -0,0 +1,346 @@
#!/bin/bash -l
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# build script which wraps around packer and virtualbox to create the systemvm template
function usage() {
cat <<END
Usage:
./build.sh [template] [version] [BUILD_NUMBER]
* Set \$appliance to provide definition name to build
(or use command line arg, default systemvmtemplate)
* Set \$version to provide version to apply to built appliance
(or use command line arg, default empty)
* Set \$BUILD_NUMBER to provide build number to apply to built appliance
(or use command line arg, default empty)
* Set \$DEBUG=1 to enable debug logging
* Set \$TRACE=1 to enable trace logging
END
exit 0
}
for i in $@; do
if [ "$i" == "-h" -o "$i" == "--help" -o "$i" == "help" ]; then
usage
fi
done
# requires 32-bit vhd-util and faketime binaries to be available (even for 64 bit builds)
# Something like (on centos 6.5)...
# * faketime
# wget -q http://bits.xensource.com/oss-xen/release/4.2.0/xen-4.2.0.tar.gz
# sudo yum -y install libuuid.i686
# cd repo/libfaketime/
# vim Makefile
# # (tune 32 bit)
# make
# sudo make install
# * vhd-util
# Install on yum-based:
# sudo yum -y install python-devel dev86 iasl iasl-devel libuuid libuuid-devel \
# glib-devel glib2 glib2-devel yajl yajl-devel
# Install on apt-based:
# sudo apt-get install -y python python-dev bcc bin86 iasl uuid-dev \
# libglib2.0-dev libyajl-dev build-essential libc6-dev zlib1g-dev libncurses5-dev \
# patch iasl libbz2-dev e2fslibs-dev xz-utils gettext
# wget -q http://bits.xensource.com/oss-xen/release/4.2.0/xen-4.2.0.tar.gz
# tar xzvf xen-4.2.0.tar.gz
# cd xen-4.2.0/tools/
# wget https://github.com/citrix-openstack/xenserver-utils/raw/master/blktap2.patch -qO - | patch -p0
# ./configure --disable-monitors --disable-ocamltools --disable-rombios --disable-seabios
# make
# sudo cp ./blktap2/vhd/lib/libvhd.so.1.0 /usr/lib64/
# ldconfig
# sudo ldconfig
# sudo cp blktap2/vhd/vhd-util /usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver
# faketime 2010-01-01 vhd-util convert
#
set -e
###
### Configuration
###
# whether to show DEBUG logs
DEBUG="${DEBUG:-}"
# whether to have other commands trace their actions
TRACE="${TRACE:-0}"
JENKINS_HOME=${JENKINS_HOME:-}
if [[ ! -z "${JENKINS_HOME}" ]]; then
DEBUG=1
fi
# which packer definition to use
appliance="${1:-${appliance:-ckstemplate}}"
# optional version tag to put into the image filename
version="${2:-${version:-}}"
# optional (jenkins) build number tag to put into the image filename
BUILD_NUMBER="${4:-${BUILD_NUMBER:-}}"
version_tag=
if [ ! -z "${version}" ]; then
if [ ! -z "${BUILD_NUMBER}" ]; then
version="${version}.${BUILD_NUMBER}"
fi
version_tag="-${version}"
elif [ ! -z "${BUILD_NUMBER}" ]; then
version="${BUILD_NUMBER}"
version_tag="-${BUILD_NUMBER}"
fi
appliance_build_name=${appliance}${version_tag}
###
### Generic helper functions
###
# how to tell sed to use extended regular expressions
os=`uname`
sed_regex_option="-E"
if [ "${os}" == "Linux" ]; then
sed_regex_option="-r"
fi
# logging support
if [[ "${DEBUG}" == "1" ]]; then
set -x
fi
function log() {
local level=${1?}
shift
if [[ "${DEBUG}" != "1" && "${level}" == "DEBUG" ]]; then
return
fi
local code=
local line="[$(date '+%F %T')] $level: $*"
if [ -t 2 ]
then
case "$level" in
INFO) code=36 ;;
DEBUG) code=30 ;;
WARN) code=33 ;;
ERROR) code=31 ;;
*) code=37 ;;
esac
echo -e "\033[${code}m${line}\033[0m"
else
echo "$line"
fi >&2
}
function error() {
log ERROR $@
exit 1
}
# cleanup code support
declare -a on_exit_items
function on_exit() {
for (( i=${#on_exit_items[@]}-1 ; i>=0 ; i-- )) ; do
sleep 2
log DEBUG "on_exit: ${on_exit_items[i]}"
eval ${on_exit_items[i]}
done
}
function add_on_exit() {
local n=${#on_exit_items[*]}
on_exit_items[${n}]="$*"
if [ ${n} -eq 0 ]; then
log DEBUG "Setting trap"
trap on_exit EXIT
fi
}
# retry code support
function retry() {
local times=$1
shift
local count=0
while [ ${count} -lt ${times} ]; do
"$@" && break
count=$(( $count + 1 ))
sleep ${count}
done
if [ ${count} -eq ${times} ]; then
error "Failed ${times} times: $@"
fi
}
###
### Script logic
###
function prepare() {
log INFO "preparing for build"
rm -rf dist *.ova *.vhd *.vdi *.qcow* *.bz2 *.vmdk *.ovf
}
function packer_build() {
log INFO "building new image with packer"
#cd ${appliance_build_name} && packer build template.json && cd ..
cd 22.04 && packer build ${appliance_build_name}.json && cd ..
}
function stage_vmx() {
cat << VMXFILE > "${1}.vmx"
.encoding = "UTF-8"
displayname = "${1}"
annotation = "${1}"
guestos = "otherlinux-64"
virtualHW.version = "11"
config.version = "8"
numvcpus = "1"
cpuid.coresPerSocket = "1"
memsize = "256"
pciBridge0.present = "TRUE"
pciBridge4.present = "TRUE"
pciBridge4.virtualDev = "pcieRootPort"
pciBridge4.functions = "8"
pciBridge5.present = "TRUE"
pciBridge5.virtualDev = "pcieRootPort"
pciBridge5.functions = "8"
pciBridge6.present = "TRUE"
pciBridge6.virtualDev = "pcieRootPort"
pciBridge6.functions = "8"
pciBridge7.present = "TRUE"
pciBridge7.virtualDev = "pcieRootPort"
pciBridge7.functions = "8"
vmci0.present = "TRUE"
floppy0.present = "FALSE"
ide0:0.clientDevice = "FALSE"
ide0:0.present = "TRUE"
ide0:0.deviceType = "atapi-cdrom"
ide0:0.autodetect = "TRUE"
ide0:0.startConnected = "FALSE"
mks.enable3d = "false"
svga.autodetect = "false"
svga.vramSize = "4194304"
scsi0:0.present = "TRUE"
scsi0:0.deviceType = "disk"
scsi0:0.fileName = "$2"
scsi0:0.mode = "persistent"
scsi0:0.writeThrough = "false"
scsi0.virtualDev = "lsilogic"
scsi0.present = "TRUE"
vmci0.unrestricted = "false"
vcpu.hotadd = "false"
vcpu.hotremove = "false"
firmware = "bios"
mem.hotadd = "false"
VMXFILE
}
function xen_server_export() {
log INFO "creating xen server export"
set +e
which faketime >/dev/null 2>&1 && which vhd-util >/dev/null 2>&1
local result=$?
set -e
if [ ${result} == 0 ]; then
qemu-img convert -f qcow2 -O raw "dist/${appliance}" img.raw
vhd-util convert -s 0 -t 1 -i img.raw -o stagefixed.vhd
faketime '2010-01-01' vhd-util convert -s 1 -t 2 -i stagefixed.vhd -o "${appliance_build_name}-xen.vhd"
rm -f *.bak
bzip2 "${appliance_build_name}-xen.vhd"
mv "${appliance_build_name}-xen.vhd.bz2" dist/
log INFO "${appliance} exported for XenServer: dist/${appliance_build_name}-xen.vhd.bz2"
else
log WARN "** Skipping ${appliance_build_name} export for XenServer: faketime or vhd-util command is missing. **"
log WARN "** faketime source code is available from https://github.com/wolfcw/libfaketime **"
fi
}
function ovm_export() {
log INFO "creating OVM export"
qemu-img convert -f qcow2 -O raw "dist/${appliance}" "dist/${appliance_build_name}-ovm.raw"
cd dist && bzip2 "${appliance_build_name}-ovm.raw" && cd ..
log INFO "${appliance} exported for OracleVM: dist/${appliance_build_name}-ovm.raw.bz2"
}
function kvm_export() {
log INFO "creating kvm export"
set +e
qemu-img convert -o compat=0.10 -f qcow2 -c -O qcow2 "dist/${appliance}" "dist/${appliance_build_name}-kvm.qcow2"
local qemuresult=$?
cd dist && bzip2 "${appliance_build_name}-kvm.qcow2" && cd ..
log INFO "${appliance} exported for KVM: dist/${appliance_build_name}-kvm.qcow2.bz2"
}
function vmware_export() {
log INFO "creating vmware export"
qemu-img convert -f qcow2 -O vmdk "dist/${appliance}" "dist/${appliance_build_name}-vmware.vmdk"
if ! ovftool_loc="$(type -p "ovftool")" || [ -z "$ovftool_loc" ]; then
log INFO "ovftool not found, skipping ova generation for VMware"
return
fi
log INFO "ovftool found, using it to export ova file"
CDIR=$PWD
cd dist
chmod 666 ${appliance_build_name}-vmware.vmdk
stage_vmx ${appliance_build_name}-vmware ${appliance_build_name}-vmware.vmdk
ovftool ${appliance_build_name}-vmware.vmx ${appliance_build_name}-vmware.ova
rm -f *vmx *vmdk
cd $CDIR
log INFO "${appliance} exported for VMWare: dist/${appliance_build_name}-vmware.ova"
}
function hyperv_export() {
log INFO "creating hyperv export"
qemu-img convert -f qcow2 -O vpc "dist/${appliance}" "dist/${appliance_build_name}-hyperv.vhd"
CDIR=$PWD
cd dist
zip "${appliance_build_name}-hyperv.vhd.zip" "${appliance_build_name}-hyperv.vhd"
rm -f *vhd
cd $CDIR
log INFO "${appliance} exported for HyperV: dist/${appliance_build_name}-hyperv.vhd.zip"
}
###
### Main invocation
###
function main() {
prepare
packer_build
# process the disk at dist
kvm_export
ovm_export
xen_server_export
vmware_export
hyperv_export
rm -f "dist/${appliance}"
cd dist && chmod +r * && cd ..
cd dist && md5sum * > md5sum.txt && cd ..
cd dist && sha512sum * > sha512sum.txt && cd ..
add_on_exit log INFO "BUILD SUCCESSFUL"
}
# we only run main() if not source-d
return 2>/dev/null || main

View File

@ -459,9 +459,16 @@
"label.cisco.nexus1000v.password": "Nexus 1000v password",
"label.cisco.nexus1000v.username": "Nexus 1000v username",
"label.cks.cluster.autoscalingenabled": "Enable auto scaling on this cluster",
"label.cks.cluster.control.nodes.offeringid": "Service Offering for Control Nodes",
"label.cks.cluster.control.nodes.templateid": "Template for Control Nodes",
"label.cks.cluster.etcd.nodes": "Etcd Nodes",
"label.cks.cluster.etcd.nodes.offeringid": "Service Offering for etcd Nodes",
"label.cks.cluster.etcd.nodes.templateid": "Template for etcd Nodes",
"label.cks.cluster.maxsize": "Maximum cluster size (Worker nodes)",
"label.cks.cluster.minsize": "Minimum cluster size (Worker nodes)",
"label.cks.cluster.size": "Cluster size (Worker nodes)",
"label.cks.cluster.worker.nodes.offeringid": "Service Offering for Worker Nodes",
"label.cks.cluster.worker.nodes.templateid": "Template for Worker Nodes",
"label.cleanup": "Clean up",
"label.clear": "Clear",
"label.clear.list": "Clear list",
@ -915,6 +922,7 @@
"label.fix.errors": "Fix errors",
"label.fixed": "Fixed offering",
"label.for": "for",
"label.for.cks": "For CKS",
"label.forbidden": "Forbidden",
"label.forced": "Force",
"label.force.stop": "Force stop",
@ -1901,6 +1909,9 @@
"label.service.lb.netscaler.servicepackages": "Netscaler service packages",
"label.service.lb.netscaler.servicepackages.description": "Service package description",
"label.service.offering": "Service offering",
"label.service.offering.controlnodes": "Compute offering for Control Nodes",
"label.service.offering.etcdnodes": "Compute offering for etcd Nodes",
"label.service.offering.workernodes": "Compute offering for Worker Nodes",
"label.service.staticnat.associatepublicip": "Associate public IP",
"label.service.staticnat.elasticipcheckbox": "Elastic IP",
"label.servicegroupuuid": "Service Group",
@ -2420,6 +2431,7 @@
"label.bucket.delete": "Delete Bucket",
"label.quotagb": "Quota in GB",
"label.encryption": "Encryption",
"label.etcdnodes": "Number of etcd nodes",
"label.versioning": "Versioning",
"label.objectlocking": "Object Lock",
"label.bucket.policy": "Bucket Policy",

View File

@ -544,6 +544,39 @@
<span v-else>{{ resource.serviceofferingname || resource.serviceofferingid }}</span>
</div>
</div>
<div class="resource-detail-item" v-if="resource.controlofferingname && resource.controlofferingid">
<div class="resource-detail-item__label">{{ $t('label.service.offering.controlnodes') }}</div>
<div class="resource-detail-item__details">
<cloud-outlined />
<router-link v-if="!isStatic && ($route.meta.name === 'router' || $route.meta.name === 'systemvm')" :to="{ path: '/systemoffering/' + resource.controlofferingid}">{{ resource.controlofferingname || resource.controlofferingid }} </router-link>
<router-link v-else-if="$router.resolve('/computeoffering/' + resource.controlofferingid).matched[0].redirect !== '/exception/404'" :to="{ path: '/computeoffering/' + resource.controlofferingid }">{{ resource.controlofferingname || resource.controlofferingid }} </router-link>
<span v-else>{{ resource.controlofferingname || resource.controlofferingid }}</span>
</div>
</div>
<div class="resource-detail-item" v-if="resource.workerofferingname && resource.workerofferingid">
<div class="resource-detail-item__label">{{ $t('label.service.offering.workernodes') }}</div>
<div class="resource-detail-item__details">
<cloud-outlined />
<router-link v-if="!isStatic && ($route.meta.name === 'router' || $route.meta.name === 'systemvm')" :to="{ path: '/systemoffering/' + resource.workerofferingid}">{{ resource.workerofferingname || resource.workerofferingid }} </router-link>
<router-link v-else-if="$router.resolve('/computeoffering/' + resource.workerofferingid).matched[0].redirect !== '/exception/404'" :to="{ path: '/computeoffering/' + resource.workerofferingid }">{{ resource.workerofferingname || resource.workerofferingid }} </router-link>
<span v-else>{{ resource.workerofferingname || resource.workerofferingid }}</span>
</div>
</div>
<div class="resource-detail-item" v-if="resource.etcdofferingname && resource.etcdofferingid">
<div class="resource-detail-item__label">{{ $t('label.service.offering.etcdnodes') }}</div>
<div class="resource-detail-item__details">
<cloud-outlined />
<router-link v-if="!isStatic && ($route.meta.name === 'router' || $route.meta.name === 'systemvm')" :to="{ path: '/systemoffering/' + resource.etcdofferingid}">{{ resource.etcdofferingname || resource.etcdofferingid }} </router-link>
<router-link v-else-if="$router.resolve('/computeoffering/' + resource.etcdfferingid).matched[0].redirect !== '/exception/404'" :to="{ path: '/computeoffering/' + resource.etcdofferingid }">{{ resource.etcdofferingname || resource.etcdofferingid }} </router-link>
<span v-else>{{ resource.etcdofferingname || resource.etcdofferingid }}</span>
</div>
</div>
<div class="resource-detail-item" v-if="resource.etcdnodes">
<div class="resource-detail-item__label">{{ $t('label.etcdnodes') }}</div>
<div class="resource-detail-item__details">
<bulb-outlined />{{ resource.etcdnodes }}
</div>
</div>
<div class="resource-detail-item" v-if="resource.diskofferingname && resource.diskofferingid">
<div class="resource-detail-item__label">{{ $t('label.diskoffering') }}</div>
<div class="resource-detail-item__details">

View File

@ -145,7 +145,7 @@
v-model:value="form.controlnodes"
:placeholder="apiParams.controlnodes.description"/>
</a-form-item>
<a-form-item v-if="form.haenable" name="externalloadbalanceripaddress" ref="externalloadbalanceripaddress">
<a-form-item v-if="form.haenable && !selectedZone.isnsxenabled" name="externalloadbalanceripaddress" ref="externalloadbalanceripaddress">
<template #label>
<tooltip-label :title="$t('label.externalloadbalanceripaddress')" :tooltip="apiParams.externalloadbalanceripaddress.description"/>
</template>
@ -180,6 +180,138 @@
</a-select-option>
</a-select>
</a-form-item>
<!-- Advanced configurations -->
<a-form-item name="advancedmode" ref="advancedmode">
<template #label>
<tooltip-label :title="$t('label.isadvanced')" />
</template>
<a-switch v-model:checked="form.advancedmode" />
</a-form-item>
<a-form-item v-if="form.advancedmode" name="controlofferingid" ref="controlofferingid">
<template #label>
<tooltip-label :title="$t('label.cks.cluster.control.nodes.offeringid')" :tooltip="$t('label.cks.cluster.control.nodes.offeringid')"/>
</template>
<a-select
id="control-offering-selection"
v-model:value="form.controlofferingid"
showSearch
optionFilterProp="label"
:filterOption="(input, option) => {
return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0
}"
:loading="serviceOfferingLoading"
:placeholder="$t('label.cks.cluster.control.nodes.offeringid')">
<a-select-option v-for="(opt, optIndex) in serviceOfferings" :key="optIndex" :label="opt.name || opt.description">
{{ opt.name || opt.description }}
</a-select-option>
</a-select>
</a-form-item>
<a-form-item v-if="form.advancedmode" name="controltemplateid" ref="controltemplateid">
<template #label>
<tooltip-label :title="$t('label.cks.cluster.control.nodes.templateid')" :tooltip="$t('label.cks.cluster.control.nodes.templateid')"/>
</template>
<a-select
id="control-template-selection"
v-model:value="form.controltemplateid"
showSearch
optionFilterProp="label"
:filterOption="(input, option) => {
return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0
}"
:loading="serviceOfferingLoading"
:placeholder="$t('label.cks.cluster.control.nodes.templateid')">
<a-select-option v-for="(opt, optIndex) in templates" :key="optIndex" :label="opt.name || opt.description">
{{ opt.name || opt.description }}
</a-select-option>
</a-select>
</a-form-item>
<a-form-item v-if="form.advancedmode" name="workerofferingid" ref="workerofferingid">
<template #label>
<tooltip-label :title="$t('label.cks.cluster.worker.nodes.offeringid')" :tooltip="$t('label.cks.cluster.worker.nodes.offeringid')"/>
</template>
<a-select
id="worker-offering-selection"
v-model:value="form.workerofferingid"
showSearch
optionFilterProp="label"
:filterOption="(input, option) => {
return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0
}"
:loading="serviceOfferingLoading"
:placeholder="$t('label.cks.cluster.worker.nodes.offeringid')">
<a-select-option v-for="(opt, optIndex) in serviceOfferings" :key="optIndex" :label="opt.name || opt.description">
{{ opt.name || opt.description }}
</a-select-option>
</a-select>
</a-form-item>
<a-form-item v-if="form.advancedmode" name="workertemplateid" ref="workertemplateid">
<template #label>
<tooltip-label :title="$t('label.cks.cluster.worker.nodes.templateid')" :tooltip="$t('label.cks.cluster.worker.nodes.templateid')"/>
</template>
<a-select
id="worker-template-selection"
v-model:value="form.workertemplateid"
showSearch
optionFilterProp="label"
:filterOption="(input, option) => {
return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0
}"
:loading="serviceOfferingLoading"
:placeholder="$t('label.cks.cluster.worker.nodes.templateid')">
<a-select-option v-for="(opt, optIndex) in templates" :key="optIndex" :label="opt.name || opt.description">
{{ opt.name || opt.description }}
</a-select-option>
</a-select>
</a-form-item>
<a-form-item v-if="form.advancedmode" name="etcdnodes" ref="etcdnodes">
<template #label>
<tooltip-label :title="$t('label.cks.cluster.etcd.nodes')" :tooltip="apiParams.controlnodes.description"/>
</template>
<a-input
v-model:value="form.etcdnodes"
:placeholder="apiParams.controlnodes.description"/>
</a-form-item>
<a-form-item v-if="form.advancedmode && form.etcdnodes && form.etcdnodes > 0" name="etcdofferingid" ref="etcdofferingid">
<template #label>
<tooltip-label :title="$t('label.cks.cluster.etcd.nodes.offeringid')" :tooltip="$t('label.cks.cluster.etcd.nodes.offeringid')"/>
</template>
<a-select
id="etcd-offering-selection"
v-model:value="form.etcdofferingid"
showSearch
optionFilterProp="label"
:filterOption="(input, option) => {
return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0
}"
:loading="serviceOfferingLoading"
:placeholder="$t('label.cks.cluster.etcd.nodes.offeringid')">
<a-select-option v-for="(opt, optIndex) in serviceOfferings" :key="optIndex" :label="opt.name || opt.description">
{{ opt.name || opt.description }}
</a-select-option>
</a-select>
</a-form-item>
<a-form-item v-if="form.advancedmode && form?.etcdnodes > 0" name="controltemplateid" ref="controltemplateid">
<template #label>
<tooltip-label :title="$t('label.cks.cluster.etcd.nodes.templateid')" :tooltip="$t('label.cks.cluster.etcd.nodes.templateid')"/>
</template>
<a-select
id="etcd-template-selection"
v-model:value="form.etcdtemplateid"
showSearch
optionFilterProp="label"
:filterOption="(input, option) => {
return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0
}"
:loading="serviceOfferingLoading"
:placeholder="$t('label.cks.cluster.etcd.nodes.templateid')">
<a-select-option v-for="(opt, optIndex) in templates" :key="optIndex" :label="opt.name || opt.description">
{{ opt.name || opt.description }}
</a-select-option>
</a-select>
</a-form-item>
<!-- Experimentation Features -->
<div v-if="$store.getters.features.kubernetesclusterexperimentalfeaturesenabled">
<a-form-item name="privateregistry" ref="privateregistry" :label="$t('label.private.registry')">
<template #label>
@ -252,7 +384,9 @@ export default {
networkLoading: false,
keyPairs: [],
keyPairLoading: false,
loading: false
loading: false,
templates: [],
templateLoading: false
}
},
beforeCreate () {
@ -317,6 +451,7 @@ export default {
fetchData () {
this.fetchZoneData()
this.fetchKeyPairData()
this.fetchCksTemplates()
},
isValidValueForKey (obj, key) {
return key in obj && obj[key] != null
@ -401,9 +536,24 @@ export default {
this.serviceOfferingLoading = false
if (this.arrayHasItems(this.serviceOfferings)) {
this.form.serviceofferingid = 0
this.form.controlofferingid = undefined
this.form.workerofferingid = undefined
this.form.etcdofferingid = undefined
}
})
},
fetchCksTemplates () {
const params = {
templatefilter: 'all',
forcks: true
}
this.templateLoading = true
api('listTemplates', params).then(json => {
this.templates = json?.listtemplatesresponse?.template || []
}).finally(() => {
this.templateLoading = false
})
},
fetchNetworkData () {
const params = {}
if (!this.isObjectEmpty(this.selectedZone)) {
@ -461,6 +611,44 @@ export default {
size: values.size,
clustertype: 'CloudManaged'
}
var advancedOfferings = 0
if (this.isValidValueForKey(values, 'advancedmode') && values.advancedmode && this.isValidValueForKey(values, 'controlofferingid') && this.arrayHasItems(this.serviceOfferings) && this.serviceOfferings[values.controlofferingid].id != null) {
params['nodeofferings[' + advancedOfferings + '].node'] = 'control'
params['nodeofferings[' + advancedOfferings + '].offering'] = this.serviceOfferings[values.controlofferingid].id
advancedOfferings++
}
if (this.isValidValueForKey(values, 'advancedmode') && values.advancedmode && this.isValidValueForKey(values, 'workerofferingid') && this.arrayHasItems(this.serviceOfferings) && this.serviceOfferings[values.workerofferingid].id != null) {
params['nodeofferings[' + advancedOfferings + '].node'] = 'worker'
params['nodeofferings[' + advancedOfferings + '].offering'] = this.serviceOfferings[values.workerofferingid].id
advancedOfferings++
}
if (this.isValidValueForKey(values, 'advancedmode') && values.advancedmode && this.isValidValueForKey(values, 'etcdnodes') && values.etcdnodes > 0) {
params.etcdnodes = values.etcdnodes
if (this.isValidValueForKey(values, 'etcdofferingid') && this.arrayHasItems(this.serviceOfferings) && this.serviceOfferings[values.etcdofferingid].id != null) {
params['nodeofferings[' + advancedOfferings + '].node'] = 'etcd'
params['nodeofferings[' + advancedOfferings + '].offering'] = this.serviceOfferings[values.etcdofferingid].id
advancedOfferings++
}
}
var advancedTemplates = 0
if (this.isValidValueForKey(values, 'advancedmode') && values.advancedmode && this.isValidValueForKey(values, 'controltemplateid') && this.arrayHasItems(this.templates) && this.templates[values.controltemplateid].id != null) {
params['nodetemplates[' + advancedTemplates + '].node'] = 'control'
params['nodetemplates[' + advancedTemplates + '].template'] = this.templates[values.controltemplateid].id
advancedTemplates++
}
if (this.isValidValueForKey(values, 'advancedmode') && values.advancedmode && this.isValidValueForKey(values, 'workertemplateid') && this.arrayHasItems(this.templates) && this.templates[values.workertemplateid].id != null) {
params['nodetemplates[' + advancedTemplates + '].node'] = 'worker'
params['nodetemplates[' + advancedTemplates + '].template'] = this.templates[values.workertemplateid].id
advancedTemplates++
}
if (this.isValidValueForKey(values, 'advancedmode') && values.advancedmode && this.isValidValueForKey(values, 'etcdnodes') && values.etcdnodes > 0) {
params.etcdnodes = values.etcdnodes
if (this.isValidValueForKey(values, 'etcdtemplateid') && this.arrayHasItems(this.templates) && this.templates[values.etcdtemplateid].id != null) {
params['nodetemplates[' + advancedTemplates + '].node'] = 'etcd'
params['nodetemplates[' + advancedTemplates + '].template'] = this.templates[values.etcdtemplateid].id
advancedTemplates++
}
}
if (this.isValidValueForKey(values, 'noderootdisksize') && values.noderootdisksize > 0) {
params.noderootdisksize = values.noderootdisksize
}

View File

@ -28,7 +28,7 @@
:rules="rules"
@finish="handleSubmit"
layout="vertical">
<a-form-item name="serviceofferingid" ref="serviceofferingid">
<a-form-item name="serviceofferingid" ref="serviceofferingid" v-if="!this.resource.workerofferingid && !this.resource.controlofferingid && !this.resource.etcdofferingid">
<template #label>
<tooltip-label :title="$t('label.serviceofferingid')" :tooltip="apiParams.serviceofferingid.description"/>
</template>
@ -47,6 +47,63 @@
</a-select-option>
</a-select>
</a-form-item>
<a-form-item name="workerofferingid" ref="workerofferingid">
<template #label>
<tooltip-label :title="$t('label.service.offering.workernodes')" :tooltip="apiParams.serviceofferingid.description"/>
</template>
<a-select
id="offering-selection-worker"
v-model:value="form.workerofferingid"
showSearch
optionFilterProp="label"
:filterOption="(input, option) => {
return option.label.children.toLowerCase().indexOf(input.toLowerCase()) >= 0
}"
:loading="serviceOfferingLoading"
:placeholder="apiParams.serviceofferingid.description">
<a-select-option v-for="(opt, optIndex) in workerOfferings" :key="optIndex" :label="opt.name || opt.description">
{{ opt.name || opt.description }}
</a-select-option>
</a-select>
</a-form-item>
<a-form-item name="controlofferingid" ref="controlofferingid">
<template #label>
<tooltip-label :title="$t('label.service.offering.controlnodes')" :tooltip="apiParams.serviceofferingid.description"/>
</template>
<a-select
id="offering-selection-control"
v-model:value="form.controlofferingid"
showSearch
optionFilterProp="label"
:filterOption="(input, option) => {
return option.label.children.toLowerCase().indexOf(input.toLowerCase()) >= 0
}"
:loading="serviceOfferingLoading"
:placeholder="apiParams.serviceofferingid.description">
<a-select-option v-for="(opt, optIndex) in controlOfferings" :key="optIndex" :label="opt.name || opt.description">
{{ opt.name || opt.description }}
</a-select-option>
</a-select>
</a-form-item>
<a-form-item name="etcdofferingid" ref="etcdofferingid" v-if="this.resource.etcdnodes && this.resource.etcdnodes > 0 && this.resource.etcdofferingid">
<template #label>
<tooltip-label :title="$t('label.service.offering.etcdnodes')" :tooltip="apiParams.serviceofferingid.description"/>
</template>
<a-select
id="offering-selection-etcd"
v-model:value="form.etcdofferingid"
showSearch
optionFilterProp="label"
:filterOption="(input, option) => {
return option.label.children.toLowerCase().indexOf(input.toLowerCase()) >= 0
}"
:loading="serviceOfferingLoading"
:placeholder="apiParams.serviceofferingid.description">
<a-select-option v-for="(opt, optIndex) in etcdOfferings" :key="optIndex" :label="opt.name || opt.description">
{{ opt.name || opt.description }}
</a-select-option>
</a-select>
</a-form-item>
<a-form-item name="autoscalingenabled" ref="autoscalingenabled" v-if="apiParams.autoscalingenabled">
<template #label>
<tooltip-label :title="$t('label.cks.cluster.autoscalingenabled')" :tooltip="apiParams.autoscalingenabled.description"/>
@ -118,7 +175,10 @@ export default {
originalSize: 1,
autoscalingenabled: null,
minsize: null,
maxsize: null
maxsize: null,
controlOfferings: [],
workerOfferings: [],
etcdOfferings: []
}
},
beforeCreate () {
@ -153,7 +213,12 @@ export default {
},
fetchData () {
if (this.resource.state === 'Running') {
this.fetchKubernetesClusterServiceOfferingData()
this.fetchKubernetesClusterServiceOfferingData(this.resource.serviceofferingid, 'default')
this.fetchKubernetesClusterServiceOfferingData(this.resource.workerofferingid, 'worker')
this.fetchKubernetesClusterServiceOfferingData(this.resource.controlofferingid, 'control')
if (this.resource.etcdofferingid && this.resource.etcdnodes && this.resource.etcdnodes > 0) {
this.fetchKubernetesClusterServiceOfferingData(this.resource.controlofferingid, 'etcd')
}
return
}
this.fetchKubernetesVersionData()
@ -167,19 +232,21 @@ export default {
isObjectEmpty (obj) {
return !(obj !== null && obj !== undefined && Object.keys(obj).length > 0 && obj.constructor === Object)
},
fetchKubernetesClusterServiceOfferingData () {
fetchKubernetesClusterServiceOfferingData (offeringId, type) {
const params = {}
if (!this.isObjectEmpty(this.resource)) {
params.id = this.resource.serviceofferingid
params.id = offeringId
}
var minCpu = 0
var minMemory = 0
api('listServiceOfferings', params).then(json => {
var items = json?.listserviceofferingsresponse?.serviceoffering || []
if (this.arrayHasItems(items) && !this.isObjectEmpty(items[0])) {
this.minCpu = items[0].cpunumber
this.minMemory = items[0].memory
minCpu = items[0].cpunumber
minMemory = items[0].memory
}
}).finally(() => {
this.fetchServiceOfferingData()
this.fetchServiceOfferingData(minCpu, minMemory, type)
})
},
fetchKubernetesVersionData () {
@ -187,21 +254,28 @@ export default {
if (!this.isObjectEmpty(this.resource)) {
params.id = this.resource.kubernetesversionid
}
var minCpu = 0
var minMemory = 0
api('listKubernetesSupportedVersions', params).then(json => {
const versionObjs = json?.listkubernetessupportedversionsresponse?.kubernetessupportedversion || []
if (this.arrayHasItems(versionObjs) && !this.isObjectEmpty(versionObjs[0])) {
this.minCpu = versionObjs[0].mincpunumber
this.minMemory = versionObjs[0].minmemory
minCpu = versionObjs[0].mincpunumber
minMemory = versionObjs[0].minmemory
}
}).finally(() => {
this.fetchServiceOfferingData()
this.fetchServiceOfferingData(minCpu, minMemory, 'default')
this.fetchServiceOfferingData(minCpu, minMemory, 'worker')
this.fetchServiceOfferingData(minCpu, minMemory, 'control')
if (this.resource.etcdofferingid && this.resource.etcdnodes && this.resource.etcdnodes > 0) {
this.fetchServiceOfferingData(minCpu, minMemory, 'etcd')
}
})
},
fetchServiceOfferingData () {
this.serviceOfferings = []
fetchServiceOfferingData (minCpu, minMemory, type) {
var offerings = []
const params = {
cpunumber: this.minCpu,
memory: this.minMemory
cpunumber: minCpu,
memory: minMemory
}
this.serviceOfferingLoading = true
api('listServiceOfferings', params).then(json => {
@ -209,17 +283,35 @@ export default {
if (this.arrayHasItems(items)) {
for (var i = 0; i < items.length; i++) {
if (items[i].iscustomized === false) {
this.serviceOfferings.push(items[i])
offerings.push(items[i])
}
}
}
}).finally(() => {
this.serviceOfferingLoading = false
if (this.arrayHasItems(this.serviceOfferings)) {
for (var i = 0; i < this.serviceOfferings.length; i++) {
if (this.serviceOfferings[i].id === this.resource.serviceofferingid) {
if (this.arrayHasItems(offerings)) {
if (type === 'default') {
this.serviceOfferings = offerings
} else if (type === 'worker') {
this.workerOfferings = offerings
} else if (type === 'control') {
this.controlOfferings = offerings
} else if (type === 'etcd') {
this.etcdOfferings = offerings
}
for (var i = 0; i < offerings.length; i++) {
if (type === 'default' && offerings[i].id === this.resource.serviceofferingid) {
this.form.serviceofferingid = i
break
} else if (type === 'worker' && offerings[i].id === this.resource.workerofferingid) {
this.form.workerofferingid = i
break
} else if (type === 'control' && offerings[i].id === this.resource.controlofferingid) {
this.form.controlofferingid = i
break
} else if (type === 'etcd' && offerings[i].id === this.resource.etcdofferingid) {
this.form.etcdofferingid = i
break
}
}
}
@ -250,6 +342,22 @@ export default {
if (this.isValidValueForKey(values, 'maxsize')) {
params.maxsize = values.maxsize
}
var advancedOfferings = 0
if (this.isValidValueForKey(values, 'controlofferingid') && this.arrayHasItems(this.controlOfferings) && this.controlOfferings[values.controlofferingid].id != null) {
params['nodeofferings[' + advancedOfferings + '].node'] = 'control'
params['nodeofferings[' + advancedOfferings + '].offering'] = this.controlOfferings[values.controlofferingid].id
advancedOfferings++
}
if (this.isValidValueForKey(values, 'workerofferingid') && this.arrayHasItems(this.workerOfferings) && this.workerOfferings[values.workerofferingid].id != null) {
params['nodeofferings[' + advancedOfferings + '].node'] = 'worker'
params['nodeofferings[' + advancedOfferings + '].offering'] = this.workerOfferings[values.workerofferingid].id
advancedOfferings++
}
if (this.isValidValueForKey(values, 'etcdofferingid') && this.arrayHasItems(this.etcdOfferings) && this.etcdOfferings[values.etcdofferingid].id != null) {
params['nodeofferings[' + advancedOfferings + '].node'] = 'etcd'
params['nodeofferings[' + advancedOfferings + '].offering'] = this.etcdOfferings[values.etcdofferingid].id
advancedOfferings++
}
api('scaleKubernetesCluster', params).then(json => {
const jobId = json.scalekubernetesclusterresponse.jobid
this.$pollJob({

View File

@ -433,6 +433,11 @@
{{ $t('label.ispublic') }}
</a-checkbox>
</a-col>
<a-col :span="12">
<a-checkbox value="forCks" v-if="currentForm === 'Create'">
{{ $t('label.for.cks') }}
</a-checkbox>
</a-col>
</a-row>
</a-checkbox-group>
</a-form-item>