Adding AutoScaling for cks + CKS CoreOS EOL update + systemvmtemplate improvements (#4329)

Adding AutoScaling support for cks
Kubernetes PR : kubernetes/autoscaler#3629
Also replaces CoreOS with Debian
Fixes #4198

Co-authored-by: Pearl Dsilva <pearl1594@gmail.com>
Co-authored-by: Pearl Dsilva <pearl.dsilva@shapeblue.com>
Co-authored-by: Wei Zhou <w.zhou@global.leaseweb.com>
Co-authored-by: Rohit Yadav <rohit.yadav@shapeblue.com>
This commit is contained in:
davidjumani 2021-10-06 21:17:41 +05:30 committed by GitHub
parent e4beb1f955
commit 6ac834a358
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
98 changed files with 3555 additions and 1129 deletions

View File

@ -379,7 +379,7 @@ public interface UserVmService {
String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData,
String sshKeyPair, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard, List<Long> affinityGroupIdList,
Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap, Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap,
Map<String, String> templateOvfPropertiesMap, boolean dynamicScalingEnabled)
Map<String, String> templateOvfPropertiesMap, boolean dynamicScalingEnabled, String type)
throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException;

View File

@ -838,11 +838,16 @@ public class ApiConstants {
public static final String KUBERNETES_VERSION_ID = "kubernetesversionid";
public static final String KUBERNETES_VERSION_NAME = "kubernetesversionname";
public static final String MASTER_NODES = "masternodes";
public static final String NODE_IDS = "nodeids";
public static final String CONTROL_NODES = "controlnodes";
public static final String MIN_SEMANTIC_VERSION = "minimumsemanticversion";
public static final String MIN_KUBERNETES_VERSION_ID = "minimumkubernetesversionid";
public static final String NODE_ROOT_DISK_SIZE = "noderootdisksize";
public static final String SUPPORTS_HA = "supportsha";
public static final String SUPPORTS_AUTOSCALING = "supportsautoscaling";
public static final String AUTOSCALING_ENABLED = "autoscalingenabled";
public static final String MIN_SIZE = "minsize";
public static final String MAX_SIZE = "maxsize";
public static final String BOOT_TYPE = "boottype";
public static final String BOOT_MODE = "bootmode";

4
debian/rules vendored
View File

@ -66,17 +66,21 @@ override_dh_auto_install:
mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management
mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/lib
mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/setup
mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/systemvm
mkdir $(DESTDIR)/var/log/$(PACKAGE)/management
mkdir $(DESTDIR)/var/cache/$(PACKAGE)/management
mkdir $(DESTDIR)/var/log/$(PACKAGE)/ipallocator
mkdir $(DESTDIR)/var/lib/$(PACKAGE)/management
mkdir $(DESTDIR)/var/lib/$(PACKAGE)/mnt
cp -r client/target/utilities/scripts/db/* $(DESTDIR)/usr/share/$(PACKAGE)-management/setup/
cp -r client/target/classes/META-INF/webapp $(DESTDIR)/usr/share/$(PACKAGE)-management/webapp
cp server/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/
cp client/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/
cp client/target/cloud-client-ui-$(VERSION).jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/cloudstack-$(VERSION).jar
cp client/target/lib/*jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/
cp -r engine/schema/dist/systemvm-templates/* $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/systemvm/
rm -rf $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/systemvm/md5sum.txt
# nast hack for a couple of configuration files
mv $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/cloudstack-limits.conf $(DESTDIR)/$(SYSCONFDIR)/security/limits.d/

View File

@ -20,6 +20,10 @@ import com.cloud.agent.api.Answer;
import com.cloud.agent.manager.Commands;
import com.cloud.deploy.DeployDestination;
import com.cloud.exception.ResourceUnavailableException;
import java.nio.charset.StandardCharsets;
import java.util.Base64;
/**
* A VirtualMachineGuru knows how to process a certain type of virtual machine.
*
@ -60,4 +64,12 @@ public interface VirtualMachineGuru {
void prepareStop(VirtualMachineProfile profile);
void finalizeUnmanage(VirtualMachine vm);
static String getEncodedMsPublicKey(String pubKey) {
String base64EncodedPublicKey = null;
if (pubKey != null) {
base64EncodedPublicKey = Base64.getEncoder().encodeToString(pubKey.getBytes(StandardCharsets.UTF_8));
}
return base64EncodedPublicKey;
}
}

View File

@ -413,6 +413,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
static final ConfigKey<Boolean> HaVmRestartHostUp = new ConfigKey<Boolean>("Advanced", Boolean.class, "ha.vm.restart.hostup", "true",
"If an out-of-band stop of a VM is detected and its host is up, then power on the VM", true);
static final ConfigKey<Long> SystemVmRootDiskSize = new ConfigKey<Long>("Advanced",
Long.class, "systemvm.root.disk.size", "-1",
"Size of root volume (in GB) of system VMs and virtual routers", true);
ScheduledExecutorService _executor = null;
private long _nodeId;
@ -461,6 +465,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmFinal, template, serviceOffering, null, null);
Long rootDiskSize = rootDiskOfferingInfo.getSize();
if (vm.getType().isUsedBySystem() && SystemVmRootDiskSize.value() != null && SystemVmRootDiskSize.value() > 0L) {
rootDiskSize = SystemVmRootDiskSize.value();
}
final Long rootDiskSizeFinal = rootDiskSize;
Transaction.execute(new TransactionCallbackWithExceptionNoReturn<InsufficientCapacityException>() {
@Override
public void doInTransactionWithoutResult(final TransactionStatus status) throws InsufficientCapacityException {
@ -486,7 +496,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
} else if (template.getFormat() == ImageFormat.BAREMETAL) {
// Do nothing
} else {
volumeMgr.allocateTemplatedVolumes(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(),
volumeMgr.allocateTemplatedVolumes(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskSizeFinal,
rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), template, vmFinal, owner);
}
@ -1729,7 +1739,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
final UserVmVO userVm = _userVmDao.findById(vm.getId());
if (vm.getType() == VirtualMachine.Type.User) {
if (userVm != null){
if (userVm != null) {
userVm.setPowerState(PowerState.PowerOff);
_userVmDao.update(userVm.getId(), userVm);
}
@ -4833,7 +4843,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
return new ConfigKey<?>[] { ClusterDeltaSyncInterval, StartRetry, VmDestroyForcestop, VmOpCancelInterval, VmOpCleanupInterval, VmOpCleanupWait,
VmOpLockStateRetry, VmOpWaitInterval, ExecuteInSequence, VmJobCheckInterval, VmJobTimeout, VmJobStateReportInterval,
VmConfigDriveLabel, VmConfigDriveOnPrimaryPool, VmConfigDriveForceHostCacheUse, VmConfigDriveUseHostCacheOnUnsupportedPool,
HaVmRestartHostUp, ResourceCountRunningVMsonly, AllowExposeHypervisorHostname, AllowExposeHypervisorHostnameAccountLevel };
HaVmRestartHostUp, ResourceCountRunningVMsonly, AllowExposeHypervisorHostname, AllowExposeHypervisorHostnameAccountLevel, SystemVmRootDiskSize };
}
public List<StoragePoolAllocator> getStoragePoolAllocators() {

View File

@ -52,5 +52,175 @@
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
</dependency>
<dependency>
<groupId>org.ini4j</groupId>
<artifactId>ini4j</artifactId>
<version>${cs.ini.version}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.codehaus.gmaven</groupId>
<artifactId>gmaven-plugin</artifactId>
<version>1.5</version>
<executions>
<execution>
<id>setproperty</id>
<phase>validate</phase>
<goals>
<goal>execute</goal>
</goals>
<configuration>
<source>
def projectVersion = project.version
String[] versionParts = projectVersion.tokenize('.')
pom.properties['cs.version'] = versionParts[0] + "." + versionParts[1]
pom.properties['patch.version'] = versionParts[2]
</source>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>com.googlecode.maven-download-plugin</groupId>
<artifactId>download-maven-plugin</artifactId>
<version>1.6.3</version>
<executions>
<execution>
<id>download-checksums</id>
<phase>validate</phase>
<goals>
<goal>wget</goal>
</goals>
<configuration>
<url>https://download.cloudstack.org/systemvm/${cs.version}/md5sum.txt</url>
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
<skipCache>true</skipCache>
<overwrite>true</overwrite>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.gmaven</groupId>
<artifactId>gmaven-plugin</artifactId>
<version>1.5</version>
<executions>
<execution>
<id>set-properties</id>
<phase>generate-sources</phase>
<goals>
<goal>execute</goal>
</goals>
<configuration>
<source>
def csVersion = pom.properties['cs.version']
def patch = pom.properties['patch.version']
def templateList = []
templateList.add("systemvmtemplate-${csVersion}.${patch}-kvm")
templateList.add("systemvmtemplate-${csVersion}.${patch}-vmware")
templateList.add("systemvmtemplate-${csVersion}.${patch}-xen")
templateList.add("systemvmtemplate-${csVersion}.${patch}-ovm")
templateList.add("systemvmtemplate-${csVersion}.${patch}-hyperv")
File file = new File("./engine/schema/dist/systemvm-templates/md5sum.txt")
def lines = file.readLines()
for (template in templateList) {
def data = lines.findAll { it.contains(template) }
if (data != null) {
def hypervisor = template.tokenize('-')[-1]
pom.properties["$hypervisor" + ".checksum"] = data[0].tokenize(' ')[0]
}
}
</source>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<version>1.2.1</version>
<executions>
<execution>
<id>systemvm-template-metadata</id>
<phase>package</phase>
<goals>
<goal>exec</goal>
</goals>
<configuration>
<workingDirectory>${basedir}/</workingDirectory>
<executable>bash</executable>
<arguments>
<argument>templateConfig.sh</argument>
<armument>${project.version}</armument>
</arguments>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>template-create</id>
<activation>
<property>
<name>noredist</name>
</property>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-resources-plugin</artifactId>
<version>${cs.resources-plugin.version}</version>
</plugin>
<plugin>
<groupId>com.googlecode.maven-download-plugin</groupId>
<artifactId>download-maven-plugin</artifactId>
<version>1.6.3</version>
<executions>
<execution>
<id>download-kvm-template</id>
<goals>
<goal>wget</goal>
</goals>
<configuration>
<checkSignature>true</checkSignature>
<url>https://download.cloudstack.org/systemvm/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-kvm.qcow2.bz2</url>
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
<md5>${kvm.checksum}</md5>
</configuration>
</execution>
<execution>
<id>download-vmware-template</id>
<goals>
<goal>wget</goal>
</goals>
<configuration>
<checkSignature>true</checkSignature>
<url>https://download.cloudstack.org/systemvm/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-vmware.ova</url>
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
<md5>${vmware.checksum}</md5>
</configuration>
</execution>
<execution>
<id>download-xenserver-template</id>
<goals>
<goal>wget</goal>
</goals>
<configuration>
<checkSignature>true</checkSignature>
<url>https://download.cloudstack.org/systemvm/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-xen.vhd.bz2</url>
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
<md5>${xen.checksum}</md5>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@ -22,6 +22,7 @@ import com.cloud.utils.db.GenericDao;
import java.util.List;
import java.util.Map;
import java.util.Set;
public interface ClusterDao extends GenericDao<ClusterVO, Long> {
List<ClusterVO> listByPodId(long podId);
@ -34,6 +35,8 @@ public interface ClusterDao extends GenericDao<ClusterVO, Long> {
List<HypervisorType> getAvailableHypervisorInZone(Long zoneId);
Set<HypervisorType> getDistictAvailableHypervisorsAcrossClusters();
List<ClusterVO> listByDcHyType(long dcId, String hyType);
Map<Long, List<Long>> getPodClusterIdMap(List<Long> clusterIds);

View File

@ -39,8 +39,10 @@ import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
@Component
public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements ClusterDao {
@ -51,6 +53,7 @@ public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements C
protected final SearchBuilder<ClusterVO> ZoneSearch;
protected final SearchBuilder<ClusterVO> ZoneHyTypeSearch;
protected final SearchBuilder<ClusterVO> ZoneClusterSearch;
protected final SearchBuilder<ClusterVO> ClusterSearch;
protected GenericSearchBuilder<ClusterVO, Long> ClusterIdSearch;
@ -97,6 +100,10 @@ public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements C
ClusterIdSearch.selectFields(ClusterIdSearch.entity().getId());
ClusterIdSearch.and("dataCenterId", ClusterIdSearch.entity().getDataCenterId(), Op.EQ);
ClusterIdSearch.done();
ClusterSearch = createSearchBuilder();
ClusterSearch.select(null, Func.DISTINCT, ClusterSearch.entity().getHypervisorType());
ClusterIdSearch.done();
}
@Override
@ -154,6 +161,17 @@ public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements C
return hypers;
}
@Override
public Set<HypervisorType> getDistictAvailableHypervisorsAcrossClusters() {
SearchCriteria<ClusterVO> sc = ClusterSearch.create();
List<ClusterVO> clusters = listBy(sc);
Set<HypervisorType> hypers = new HashSet<>();
for (ClusterVO cluster : clusters) {
hypers.add(cluster.getHypervisorType());
}
return hypers;
}
@Override
public Map<Long, List<Long>> getPodClusterIdMap(List<Long> clusterIds) {
TransactionLegacy txn = TransactionLegacy.currentTxn();

View File

@ -20,6 +20,7 @@ import java.util.List;
import java.util.Map;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.Storage;
import com.cloud.storage.VMTemplateVO;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.utils.db.GenericDao;
@ -72,6 +73,8 @@ public interface VMTemplateDao extends GenericDao<VMTemplateVO, Long>, StateDao<
VMTemplateVO findRoutingTemplate(HypervisorType type, String templateName);
VMTemplateVO findLatestTemplateByTypeAndHypervisor(HypervisorType hypervisorType, Storage.TemplateType type);
public Long countTemplatesForAccount(long accountId);
public List<VMTemplateVO> listUnRemovedTemplatesByStates(VirtualMachineTemplate.State ...states);
@ -81,4 +84,6 @@ public interface VMTemplateDao extends GenericDao<VMTemplateVO, Long>, StateDao<
void saveDetails(VMTemplateVO tmpl);
List<VMTemplateVO> listByParentTemplatetId(long parentTemplatetId);
VMTemplateVO findLatestTemplateByName(String name);
}

View File

@ -97,6 +97,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
private SearchBuilder<VMTemplateVO> AllFieldsSearch;
protected SearchBuilder<VMTemplateVO> ParentTemplateIdSearch;
private SearchBuilder<VMTemplateVO> InactiveUnremovedTmpltSearch;
private SearchBuilder<VMTemplateVO> LatestTemplateByHypervisorTypeSearch;
@Inject
ResourceTagDao _tagsDao;
@ -105,6 +106,11 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
private String consoleProxyTmpltName;
public VMTemplateDaoImpl() {
super();
LatestTemplateByHypervisorTypeSearch = createSearchBuilder();
LatestTemplateByHypervisorTypeSearch.and("hypervisorType", LatestTemplateByHypervisorTypeSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ);
LatestTemplateByHypervisorTypeSearch.and("templateType", LatestTemplateByHypervisorTypeSearch.entity().getTemplateType(), SearchCriteria.Op.EQ);
LatestTemplateByHypervisorTypeSearch.and("removed", LatestTemplateByHypervisorTypeSearch.entity().getRemoved(), SearchCriteria.Op.NULL);
}
@Override
@ -229,6 +235,20 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
return listIncludingRemovedBy(sc);
}
@Override
public VMTemplateVO findLatestTemplateByName(String name) {
SearchCriteria<VMTemplateVO> sc = createSearchCriteria();
sc.addAnd("name", SearchCriteria.Op.EQ, name);
sc.addAnd("removed", SearchCriteria.Op.NULL);
Filter filter = new Filter(VMTemplateVO.class, "id", false, null, 1L);
List<VMTemplateVO> templates = listBy(sc, filter);
if ((templates != null) && !templates.isEmpty()) {
return templates.get(0);
}
return null;
}
@Override
public List<VMTemplateVO> findIsosByIdAndPath(Long domainId, Long accountId, String path) {
SearchCriteria<VMTemplateVO> sc = createSearchCriteria();
@ -587,6 +607,19 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
}
}
@Override
public VMTemplateVO findLatestTemplateByTypeAndHypervisor(HypervisorType hypervisorType, TemplateType type) {
SearchCriteria<VMTemplateVO> sc = LatestTemplateByHypervisorTypeSearch.create();
sc.setParameters("hypervisorType", hypervisorType);
sc.setParameters("templateType", type);
Filter filter = new Filter(VMTemplateVO.class, "id", false, null, 1L);
List<VMTemplateVO> templates = listBy(sc, filter);
if (templates != null && !templates.isEmpty()) {
return templates.get(0);
}
return null;
}
@Override
public Long countTemplatesForAccount(long accountId) {
SearchCriteria<Long> sc = CountTemplatesByAccount.create();

View File

@ -274,8 +274,6 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
final DbUpgrade[] upgrades = calculateUpgradePath(dbVersion, currentVersion);
updateSystemVmTemplates(upgrades);
for (DbUpgrade upgrade : upgrades) {
VersionVO version;
s_logger.debug("Running upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade
@ -346,6 +344,7 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
txn.close();
}
}
updateSystemVmTemplates(upgrades);
}
@Override
@ -366,7 +365,11 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
return;
}
SystemVmTemplateRegistration.parseMetadataFile();
final CloudStackVersion currentVersion = CloudStackVersion.parse(currentVersionValue);
SystemVmTemplateRegistration.CS_MAJOR_VERSION = String.valueOf(currentVersion.getMajorRelease()) + "." + String.valueOf(currentVersion.getMinorRelease());
SystemVmTemplateRegistration.CS_TINY_VERSION = String.valueOf(currentVersion.getPatchRelease());
s_logger.info("DB version = " + dbVersion + " Code Version = " + currentVersion);
if (dbVersion.compareTo(currentVersion) > 0) {

View File

@ -0,0 +1,849 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.upgrade;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.ClusterDaoImpl;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.DataCenterDaoImpl;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.VMTemplateStorageResourceAssoc;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VMTemplateDaoImpl;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.upgrade.dao.BasicTemplateDataStoreDaoImpl;
import com.cloud.user.Account;
import com.cloud.utils.DateUtil;
import com.cloud.utils.Pair;
import com.cloud.utils.UriUtils;
import com.cloud.utils.db.GlobalLock;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.db.TransactionCallbackNoReturn;
import com.cloud.utils.db.TransactionStatus;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
import com.cloud.vm.dao.VMInstanceDao;
import com.cloud.vm.dao.VMInstanceDaoImpl;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.framework.config.dao.ConfigurationDaoImpl;
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
import org.apache.cloudstack.storage.datastore.db.ImageStoreDaoImpl;
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.log4j.Logger;
import org.ini4j.Ini;
import javax.inject.Inject;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.sql.Connection;
import java.sql.Date;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.stream.Collectors;
public class SystemVmTemplateRegistration {
private static final Logger LOGGER = Logger.getLogger(SystemVmTemplateRegistration.class);
private static final String MOUNT_COMMAND = "sudo mount -t nfs %s %s";
private static final String UMOUNT_COMMAND = "sudo umount %s";
private static final String RELATIVE_TEMPLATE_PATH = "./engine/schema/dist/systemvm-templates/";
private static final String ABSOLUTE_TEMPLATE_PATH = "/usr/share/cloudstack-management/templates/systemvm/";
private static final String TEMPLATES_PATH = fetchTemplatesPath();
private static final String METADATA_FILE_NAME = "metadata.ini";
private static final String METADATA_FILE = TEMPLATES_PATH + METADATA_FILE_NAME;
public static final String TEMPORARY_SECONDARY_STORE = "tmp";
private static final String PARTIAL_TEMPLATE_FOLDER = String.format("/template/tmpl/%d/", Account.ACCOUNT_ID_SYSTEM);
private static final String storageScriptsDir = "scripts/storage/secondary";
private static final Integer OTHER_LINUX_ID = 99;
private static final Integer LINUX_5_ID = 15;
private static final Integer LINUX_7_ID = 183;
private static final Integer SCRIPT_TIMEOUT = 1800000;
private static final Integer LOCK_WAIT_TIMEOUT = 1200;
public static String CS_MAJOR_VERSION = null;
public static String CS_TINY_VERSION = null;
@Inject
DataCenterDao dataCenterDao;
@Inject
VMTemplateDao vmTemplateDao;
@Inject
TemplateDataStoreDao templateDataStoreDao;
@Inject
VMInstanceDao vmInstanceDao;
@Inject
ImageStoreDao imageStoreDao;
@Inject
ClusterDao clusterDao;
@Inject
ConfigurationDao configurationDao;
public SystemVmTemplateRegistration() {
dataCenterDao = new DataCenterDaoImpl();
vmTemplateDao = new VMTemplateDaoImpl();
templateDataStoreDao = new BasicTemplateDataStoreDaoImpl();
vmInstanceDao = new VMInstanceDaoImpl();
imageStoreDao = new ImageStoreDaoImpl();
clusterDao = new ClusterDaoImpl();
configurationDao = new ConfigurationDaoImpl();
}
private static class SystemVMTemplateDetails {
Long id;
String uuid;
String name;
String uniqueName;
Date created;
String url;
String checksum;
ImageFormat format;
Integer guestOsId;
Hypervisor.HypervisorType hypervisorType;
Long storeId;
Long size;
Long physicalSize;
String installPath;
boolean deployAsIs;
Date updated;
SystemVMTemplateDetails(String uuid, String name, Date created, String url, String checksum,
ImageFormat format, Integer guestOsId, Hypervisor.HypervisorType hypervisorType,
Long storeId) {
this.uuid = uuid;
this.name = name;
this.created = created;
this.url = url;
this.checksum = checksum;
this.format = format;
this.guestOsId = guestOsId;
this.hypervisorType = hypervisorType;
this.storeId = storeId;
}
public void setId(Long id) {
this.id = id;
}
public Long getId() {
return id;
}
public String getUuid() {
return uuid;
}
public String getName() {
return name;
}
public Date getCreated() {
return created;
}
public String getUrl() {
return url;
}
public String getChecksum() {
return checksum;
}
public ImageFormat getFormat() {
return format;
}
public Integer getGuestOsId() {
return guestOsId;
}
public Hypervisor.HypervisorType getHypervisorType() {
return hypervisorType;
}
public Long getStoreId() {
return storeId;
}
public Long getSize() {
return size;
}
public void setSize(Long size) {
this.size = size;
}
public Long getPhysicalSize() {
return physicalSize;
}
public void setPhysicalSize(Long physicalSize) {
this.physicalSize = physicalSize;
}
public String getInstallPath() {
return installPath;
}
public void setInstallPath(String installPath) {
this.installPath = installPath;
}
public String getUniqueName() {
return uniqueName;
}
public void setUniqueName(String uniqueName) {
this.uniqueName = uniqueName;
}
public boolean isDeployAsIs() {
return deployAsIs;
}
public void setDeployAsIs(boolean deployAsIs) {
this.deployAsIs = deployAsIs;
}
public Date getUpdated() {
return updated;
}
public void setUpdated(Date updated) {
this.updated = updated;
}
}
public static final List<Hypervisor.HypervisorType> hypervisorList = Arrays.asList(Hypervisor.HypervisorType.KVM,
Hypervisor.HypervisorType.VMware,
Hypervisor.HypervisorType.XenServer,
Hypervisor.HypervisorType.Hyperv,
Hypervisor.HypervisorType.LXC,
Hypervisor.HypervisorType.Ovm3
);
public static final Map<Hypervisor.HypervisorType, String> NewTemplateNameList = new HashMap<Hypervisor.HypervisorType, String>();
public static final Map<Hypervisor.HypervisorType, String> FileNames = new HashMap<Hypervisor.HypervisorType, String>();
public static final Map<Hypervisor.HypervisorType, String> NewTemplateUrl = new HashMap<Hypervisor.HypervisorType, String>();
public static final Map<Hypervisor.HypervisorType, String> NewTemplateChecksum = new HashMap<Hypervisor.HypervisorType, String>();
public static final Map<Hypervisor.HypervisorType, String> RouterTemplateConfigurationNames = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(Hypervisor.HypervisorType.KVM, "router.template.kvm");
put(Hypervisor.HypervisorType.VMware, "router.template.vmware");
put(Hypervisor.HypervisorType.XenServer, "router.template.xenserver");
put(Hypervisor.HypervisorType.Hyperv, "router.template.hyperv");
put(Hypervisor.HypervisorType.LXC, "router.template.lxc");
put(Hypervisor.HypervisorType.Ovm3, "router.template.ovm3");
}
};
public static final Map<Hypervisor.HypervisorType, Integer> hypervisorGuestOsMap = new HashMap<Hypervisor.HypervisorType, Integer>() {
{
put(Hypervisor.HypervisorType.KVM, LINUX_5_ID);
put(Hypervisor.HypervisorType.XenServer, OTHER_LINUX_ID);
put(Hypervisor.HypervisorType.VMware, OTHER_LINUX_ID);
put(Hypervisor.HypervisorType.Hyperv, LINUX_5_ID);
put(Hypervisor.HypervisorType.LXC, LINUX_5_ID);
put(Hypervisor.HypervisorType.Ovm3, LINUX_7_ID);
}
};
public static final Map<Hypervisor.HypervisorType, ImageFormat> hypervisorImageFormat = new HashMap<Hypervisor.HypervisorType, ImageFormat>() {
{
put(Hypervisor.HypervisorType.KVM, ImageFormat.QCOW2);
put(Hypervisor.HypervisorType.XenServer, ImageFormat.VHD);
put(Hypervisor.HypervisorType.VMware, ImageFormat.OVA);
put(Hypervisor.HypervisorType.Hyperv, ImageFormat.VHD);
put(Hypervisor.HypervisorType.LXC, ImageFormat.QCOW2);
put(Hypervisor.HypervisorType.Ovm3, ImageFormat.RAW);
}
};
public static boolean validateIfSeeded(String url, String path) {
String filePath = null;
try {
filePath = Files.createTempDirectory(TEMPORARY_SECONDARY_STORE).toString();
if (filePath == null) {
throw new CloudRuntimeException("Failed to create temporary directory to mount secondary store");
}
mountStore(url, filePath);
int lastIdx = path.lastIndexOf(File.separator);
String partialDirPath = path.substring(0, lastIdx);
String templatePath = filePath + File.separator + partialDirPath;
File templateProps = new File(templatePath + "/template.properties");
if (templateProps.exists()) {
LOGGER.info("SystemVM template already seeded, skipping registration");
return true;
}
LOGGER.info("SystemVM template not seeded");
return false;
} catch (Exception e) {
LOGGER.error("Failed to verify if the template is seeded", e);
throw new CloudRuntimeException("Failed to verify if the template is seeded", e);
} finally {
unmountStore(filePath);
try {
Files.delete(Path.of(filePath));
} catch (IOException e) {
LOGGER.error(String.format("Failed to delete temporary directory: %s", filePath));
}
}
}
private String calculateChecksum(File file) {
try (InputStream is = Files.newInputStream(Paths.get(file.getPath()))) {
return DigestUtils.md5Hex(is);
} catch (IOException e) {
String errMsg = "Failed to calculate template checksum";
LOGGER.error(errMsg, e);
throw new CloudRuntimeException(errMsg, e);
}
}
public Long getRegisteredTemplateId(Pair<Hypervisor.HypervisorType, String> hypervisorAndTemplateName) {
VMTemplateVO vmTemplate = vmTemplateDao.findLatestTemplateByName(hypervisorAndTemplateName.second());
Long templateId = null;
if (vmTemplate != null) {
templateId = vmTemplate.getId();
}
return templateId;
}
private static String fetchTemplatesPath() {
String filePath = RELATIVE_TEMPLATE_PATH + METADATA_FILE_NAME;
LOGGER.debug(String.format("Looking for file [ %s ] in the classpath.", filePath));
File metaFile = new File(filePath);
String templatePath = null;
if (metaFile.exists()) {
templatePath = RELATIVE_TEMPLATE_PATH;
}
if (templatePath == null) {
filePath = ABSOLUTE_TEMPLATE_PATH + METADATA_FILE_NAME;
metaFile = new File(filePath);
templatePath = ABSOLUTE_TEMPLATE_PATH;
LOGGER.debug(String.format("Looking for file [ %s ] in the classpath.", filePath));
if (!metaFile.exists()) {
String errMsg = String.format("Unable to locate metadata file in your setup at %s", filePath.toString());
LOGGER.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
}
return templatePath;
}
private String getHypervisorName(String name) {
if (name.equals("xenserver")) {
return "xen";
}
if (name.equals("ovm3")) {
return "ovm";
}
return name;
}
private Hypervisor.HypervisorType getHypervisorType(String hypervisor) {
if (hypervisor.equalsIgnoreCase("xen")) {
hypervisor = "xenserver";
} else if (hypervisor.equalsIgnoreCase("ovm")) {
hypervisor = "ovm3";
}
return Hypervisor.HypervisorType.getType(hypervisor);
}
private List<Long> getEligibleZoneIds() {
List<Long> zoneIds = new ArrayList<>();
List<ImageStoreVO> stores = imageStoreDao.findByProtocol("nfs");
for (ImageStoreVO store : stores) {
if (!zoneIds.contains(store.getDataCenterId())) {
zoneIds.add(store.getDataCenterId());
}
}
return zoneIds;
}
private Pair<String, Long> getNfsStoreInZone(Long zoneId) {
String url = null;
Long storeId = null;
ImageStoreVO storeVO = imageStoreDao.findOneByZoneAndProtocol(zoneId, "nfs");
if (storeVO == null) {
String errMsg = String.format("Failed to fetch NFS store in zone = %s for SystemVM template registration", zoneId);
LOGGER.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
url = storeVO.getUrl();
storeId = storeVO.getId();
return new Pair<>(url, storeId);
}
public static void mountStore(String storeUrl, String path) {
try {
if (storeUrl != null) {
URI uri = new URI(UriUtils.encodeURIComponent(storeUrl));
String host = uri.getHost();
String mountPath = uri.getPath();
String mount = String.format(MOUNT_COMMAND, host + ":" + mountPath, path);
Script.runSimpleBashScript(mount);
}
} catch (Exception e) {
String msg = "NFS Store URL is not in the correct format";
LOGGER.error(msg, e);
throw new CloudRuntimeException(msg, e);
}
}
private List<String> fetchAllHypervisors(Long zoneId) {
List<String> hypervisorList = new ArrayList<>();
List<Hypervisor.HypervisorType> hypervisorTypes = clusterDao.getAvailableHypervisorInZone(zoneId);
hypervisorList = hypervisorTypes.stream().distinct().map(Enum::name).collect(Collectors.toList());
return hypervisorList;
}
private Long createTemplateObjectInDB(SystemVMTemplateDetails details) {
Long templateId = vmTemplateDao.getNextInSequence(Long.class, "id");
VMTemplateVO template = new VMTemplateVO();
template.setUuid(details.getUuid());
template.setUniqueName(String.format("routing-%s" , String.valueOf(templateId)));
template.setName(details.getName());
template.setPublicTemplate(false);
template.setFeatured(false);
template.setTemplateType(Storage.TemplateType.SYSTEM);
template.setRequiresHvm(true);
template.setBits(64);
template.setAccountId(Account.ACCOUNT_ID_SYSTEM);
template.setUrl(details.getUrl());
template.setChecksum(details.getChecksum());
template.setEnablePassword(false);
template.setDisplayText(details.getName());
template.setFormat(details.getFormat());
template.setGuestOSId(details.getGuestOsId());
template.setCrossZones(true);
template.setHypervisorType(details.getHypervisorType());
template.setState(VirtualMachineTemplate.State.Inactive);
template.setDeployAsIs(Hypervisor.HypervisorType.VMware.equals(details.getHypervisorType()));
template = vmTemplateDao.persist(template);
if (template == null) {
return null;
}
return template.getId();
}
private void createTemplateStoreRefEntry(SystemVMTemplateDetails details) {
TemplateDataStoreVO templateDataStoreVO = new TemplateDataStoreVO(details.storeId, details.getId(), details.getCreated(), 0,
VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED, null, null, null, details.getInstallPath(), details.getUrl());
templateDataStoreVO.setDataStoreRole(DataStoreRole.Image);
templateDataStoreVO = templateDataStoreDao.persist(templateDataStoreVO);
if (templateDataStoreVO == null) {
throw new CloudRuntimeException(String.format("Failed to create template_store_ref record for the systemVM template for hypervisor: %s", details.getHypervisorType().name()));
}
}
public void updateTemplateDetails(SystemVMTemplateDetails details, boolean updateTemplateDetails) {
VMTemplateVO template = vmTemplateDao.findById(details.getId());
if (updateTemplateDetails) {
template.setSize(details.getSize());
template.setState(VirtualMachineTemplate.State.Active);
vmTemplateDao.update(template.getId(), template);
}
TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByStoreTemplate(details.getStoreId(), template.getId());
templateDataStoreVO.setSize(details.getSize());
templateDataStoreVO.setPhysicalSize(details.getPhysicalSize());
templateDataStoreVO.setDownloadPercent(100);
templateDataStoreVO.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOADED);
templateDataStoreVO.setLastUpdated(details.getUpdated());
templateDataStoreVO.setState(ObjectInDataStoreStateMachine.State.Ready);
boolean updated = templateDataStoreDao.update(templateDataStoreVO.getId(), templateDataStoreVO);
if (!updated) {
throw new CloudRuntimeException("Failed to update template_store_ref entry for registered systemVM template");
}
}
public void updateSystemVMEntries(Long templateId, Hypervisor.HypervisorType hypervisorType) {
vmInstanceDao.updateSystemVmTemplateId(templateId, hypervisorType);
}
public void updateConfigurationParams(Map<String, String> configParams) {
for (Map.Entry<String, String> config : configParams.entrySet()) {
boolean updated = configurationDao.update(config.getKey(), config.getValue());
if (!updated) {
throw new CloudRuntimeException(String.format("Failed to update configuration parameter %s", config.getKey()));
}
}
}
private static void readTemplateProperties(String path, SystemVMTemplateDetails details) {
File tmpFile = new File(path);
Long size = null;
Long physicalSize = 0L;
try (FileReader fr = new FileReader(tmpFile); BufferedReader brf = new BufferedReader(fr);) {
String line = null;
while ((line = brf.readLine()) != null) {
if (line.startsWith("size=")) {
physicalSize = Long.parseLong(line.split("=")[1]);
} else if (line.startsWith("virtualsize=")) {
size = Long.parseLong(line.split("=")[1]);
}
if (size == null) {
size = physicalSize;
}
}
} catch (IOException ex) {
LOGGER.warn("Failed to read from template.properties", ex);
}
details.setSize(size);
details.setPhysicalSize(physicalSize);
}
private void updateTemplateTablesOnFailure(long templateId) {
VMTemplateVO template = vmTemplateDao.createForUpdate(templateId);
template.setState(VirtualMachineTemplate.State.Inactive);
vmTemplateDao.update(template.getId(), template);
vmTemplateDao.remove(templateId);
TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByTemplate(template.getId(), DataStoreRole.Image);
templateDataStoreDao.remove(templateDataStoreVO.getId());
}
public static void unmountStore(String filePath) {
try {
LOGGER.info("Unmounting store");
String umountCmd = String.format(UMOUNT_COMMAND, filePath);
Script.runSimpleBashScript(umountCmd);
try {
Files.deleteIfExists(Paths.get(filePath));
} catch (IOException e) {
LOGGER.error(String.format("Failed to cleanup mounted store at: %s", filePath), e);
}
} catch (Exception e) {
String msg = String.format("Failed to unmount store mounted at %s", filePath);
LOGGER.error(msg, e);
throw new CloudRuntimeException(msg, e);
}
}
private void setupTemplate(String templateName, Pair<Hypervisor.HypervisorType, String> hypervisorAndTemplateName,
String destTempFolder) throws CloudRuntimeException {
String setupTmpltScript = Script.findScript(storageScriptsDir, "setup-sysvm-tmplt");
if (setupTmpltScript == null) {
throw new CloudRuntimeException("Unable to find the createtmplt.sh");
}
Script scr = new Script(setupTmpltScript, SCRIPT_TIMEOUT, LOGGER);
scr.add("-u", templateName);
scr.add("-f", TEMPLATES_PATH + FileNames.get(hypervisorAndTemplateName.first()));
scr.add("-h", hypervisorAndTemplateName.first().name().toLowerCase(Locale.ROOT));
scr.add("-d", destTempFolder);
String result = scr.execute();
if (result != null) {
String errMsg = String.format("failed to create template: %s ", result);
LOGGER.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
}
private Long performTemplateRegistrationOperations(Pair<Hypervisor.HypervisorType, String> hypervisorAndTemplateName,
String url, String checksum, ImageFormat format, long guestOsId,
Long storeId, Long templateId, String filePath, boolean updateTmpltDetails) {
Hypervisor.HypervisorType hypervisor = hypervisorAndTemplateName.first();
String templateName = UUID.randomUUID().toString();
Date created = new Date(DateUtil.currentGMTTime().getTime());
SystemVMTemplateDetails details = new SystemVMTemplateDetails(templateName, hypervisorAndTemplateName.second(), created,
url, checksum, format, (int) guestOsId, hypervisor, storeId);
if (templateId == null) {
templateId = createTemplateObjectInDB(details);
}
if (templateId == null) {
throw new CloudRuntimeException(String.format("Failed to register template for hypervisor: %s", hypervisor.name()));
}
details.setId(templateId);
String destTempFolderName = String.valueOf(templateId);
String destTempFolder = filePath + PARTIAL_TEMPLATE_FOLDER + destTempFolderName;
details.setInstallPath(PARTIAL_TEMPLATE_FOLDER + destTempFolderName + File.separator + templateName + "." + hypervisorImageFormat.get(hypervisor).getFileExtension());
createTemplateStoreRefEntry(details);
setupTemplate(templateName, hypervisorAndTemplateName, destTempFolder);
readTemplateProperties(destTempFolder + "/template.properties", details);
details.setUpdated(new Date(DateUtil.currentGMTTime().getTime()));
updateTemplateDetails(details, updateTmpltDetails);
return templateId;
}
public void registerTemplate(Pair<Hypervisor.HypervisorType, String> hypervisorAndTemplateName,
Pair<String, Long> storeUrlAndId, VMTemplateVO templateVO, String filePath) {
Long templateId = null;
try {
templateId = templateVO.getId();
performTemplateRegistrationOperations(hypervisorAndTemplateName, templateVO.getUrl(), templateVO.getChecksum(),
templateVO.getFormat(), templateVO.getGuestOSId(), storeUrlAndId.second(), templateId, filePath, false);
} catch (Exception e) {
String errMsg = String.format("Failed to register template for hypervisor: %s", hypervisorAndTemplateName.first());
LOGGER.error(errMsg, e);
if (templateId != null) {
updateTemplateTablesOnFailure(templateId);
cleanupStore(templateId, filePath);
}
throw new CloudRuntimeException(errMsg, e);
}
}
public void registerTemplate(Pair<Hypervisor.HypervisorType, String> hypervisorAndTemplateName, Pair<String, Long> storeUrlAndId, String filePath) {
Long templateId = null;
try {
Hypervisor.HypervisorType hypervisor = hypervisorAndTemplateName.first();
templateId = performTemplateRegistrationOperations(hypervisorAndTemplateName, NewTemplateUrl.get(hypervisor), NewTemplateChecksum.get(hypervisor),
hypervisorImageFormat.get(hypervisor), hypervisorGuestOsMap.get(hypervisor), storeUrlAndId.second(), null, filePath, true);
Map<String, String> configParams = new HashMap<>();
configParams.put(RouterTemplateConfigurationNames.get(hypervisorAndTemplateName.first()), hypervisorAndTemplateName.second());
configParams.put("minreq.sysvmtemplate.version", CS_MAJOR_VERSION + "." + CS_TINY_VERSION);
updateConfigurationParams(configParams);
updateSystemVMEntries(templateId, hypervisorAndTemplateName.first());
} catch (Exception e) {
String errMsg = String.format("Failed to register template for hypervisor: %s", hypervisorAndTemplateName.first());
LOGGER.error(errMsg, e);
if (templateId != null) {
updateTemplateTablesOnFailure(templateId);
cleanupStore(templateId, filePath);
}
throw new CloudRuntimeException(errMsg, e);
}
}
public static void parseMetadataFile() {
try {
Ini ini = new Ini();
ini.load(new FileReader(METADATA_FILE));
for (Hypervisor.HypervisorType hypervisorType : hypervisorList) {
String hypervisor = hypervisorType.name().toLowerCase(Locale.ROOT);
Ini.Section section = ini.get(hypervisor);
NewTemplateNameList.put(hypervisorType, section.get("templatename"));
FileNames.put(hypervisorType, section.get("filename"));
NewTemplateChecksum.put(hypervisorType, section.get("checksum"));
NewTemplateUrl.put(hypervisorType, section.get("downloadurl"));
}
} catch (Exception e) {
String errMsg = String.format("Failed to parse systemVM template metadata file: %s", METADATA_FILE);
LOGGER.error(errMsg, e);
throw new CloudRuntimeException(errMsg, e);
}
}
private static void cleanupStore(Long templateId, String filePath) {
String destTempFolder = filePath + PARTIAL_TEMPLATE_FOLDER + String.valueOf(templateId);
try {
Files.deleteIfExists(Paths.get(destTempFolder));
} catch (IOException e) {
LOGGER.error(String.format("Failed to cleanup mounted store at: %s", filePath), e);
}
}
private void validateTemplates(Set<Hypervisor.HypervisorType> hypervisorsInUse) {
Set<String> hypervisors = hypervisorsInUse.stream().map(Enum::name).
map(name -> name.toLowerCase(Locale.ROOT)).map(this::getHypervisorName).collect(Collectors.toSet());
List<String> templates = new ArrayList<>();
for (Hypervisor.HypervisorType hypervisorType : hypervisorsInUse) {
templates.add(FileNames.get(hypervisorType));
}
boolean templatesFound = true;
for (String hypervisor : hypervisors) {
String matchedTemplate = templates.stream().filter(x -> x.contains(hypervisor)).findAny().orElse(null);
if (matchedTemplate == null) {
templatesFound = false;
break;
}
File tempFile = new File(TEMPLATES_PATH + matchedTemplate);
String templateChecksum = calculateChecksum(tempFile);
if (!templateChecksum.equals(NewTemplateChecksum.get(getHypervisorType(hypervisor)))) {
LOGGER.error(String.format("Checksum mismatch: %s != %s ", templateChecksum, NewTemplateChecksum.get(getHypervisorType(hypervisor))));
templatesFound = false;
break;
}
}
if (!templatesFound) {
String errMsg = "SystemVm template not found. Cannot upgrade system Vms";
LOGGER.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
}
public void registerTemplates(Set<Hypervisor.HypervisorType> hypervisorsInUse) {
GlobalLock lock = GlobalLock.getInternLock("UpgradeDatabase-Lock");
try {
LOGGER.info("Grabbing lock to register templates.");
if (!lock.lock(LOCK_WAIT_TIMEOUT)) {
throw new CloudRuntimeException("Unable to acquire lock to register SystemVM template.");
}
try {
validateTemplates(hypervisorsInUse);
// Perform Registration if templates not already registered
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(final TransactionStatus status) {
List<Long> zoneIds = getEligibleZoneIds();
for (Long zoneId : zoneIds) {
String filePath = null;
try {
filePath = Files.createTempDirectory(TEMPORARY_SECONDARY_STORE).toString();
if (filePath == null) {
throw new CloudRuntimeException("Failed to create temporary file path to mount the store");
}
Pair<String, Long> storeUrlAndId = getNfsStoreInZone(zoneId);
mountStore(storeUrlAndId.first(), filePath);
List<String> hypervisorList = fetchAllHypervisors(zoneId);
for (String hypervisor : hypervisorList) {
Hypervisor.HypervisorType name = Hypervisor.HypervisorType.getType(hypervisor);
String templateName = NewTemplateNameList.get(name);
Pair<Hypervisor.HypervisorType, String> hypervisorAndTemplateName = new Pair<Hypervisor.HypervisorType, String>(name, templateName);
Long templateId = getRegisteredTemplateId(hypervisorAndTemplateName);
if (templateId != null) {
VMTemplateVO templateVO = vmTemplateDao.findById(templateId);
TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByTemplate(templateId, DataStoreRole.Image);
String installPath = templateDataStoreVO.getInstallPath();
if (validateIfSeeded(storeUrlAndId.first(), installPath)) {
continue;
} else if (templateVO != null) {
registerTemplate(hypervisorAndTemplateName, storeUrlAndId, templateVO, filePath);
continue;
}
}
registerTemplate(hypervisorAndTemplateName, storeUrlAndId, filePath);
}
unmountStore(filePath);
} catch (Exception e) {
unmountStore(filePath);
throw new CloudRuntimeException("Failed to register systemVM template. Upgrade Failed");
}
}
}
});
} catch (Exception e) {
throw new CloudRuntimeException("Failed to register systemVM template. Upgrade Failed");
}
} finally {
lock.unlock();
lock.releaseRef();
}
}
private void updateRegisteredTemplateDetails(Long templateId, Map.Entry<Hypervisor.HypervisorType, String> hypervisorAndTemplateName) {
VMTemplateVO templateVO = vmTemplateDao.findById(templateId);
templateVO.setTemplateType(Storage.TemplateType.SYSTEM);
if (Hypervisor.HypervisorType.VMware == templateVO.getHypervisorType()) {
templateVO.setDeployAsIs(true);
}
boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO);
if (!updated) {
String errMsg = String.format("updateSystemVmTemplates:Exception while updating template with id %s to be marked as 'system'", templateId);
LOGGER.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
updateSystemVMEntries(templateId, hypervisorAndTemplateName.getKey());
// Change value of global configuration parameter router.template.* for the corresponding hypervisor and minreq.sysvmtemplate.version for the ACS version
Map<String, String> configParams = new HashMap<>();
configParams.put(RouterTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()), hypervisorAndTemplateName.getValue());
configParams.put("minreq.sysvmtemplate.version", CS_MAJOR_VERSION + "." + CS_TINY_VERSION);
updateConfigurationParams(configParams);
}
private void updateTemplateUrlAndChecksum(VMTemplateVO templateVO, Map.Entry<Hypervisor.HypervisorType, String> hypervisorAndTemplateName) {
templateVO.setUrl(NewTemplateUrl.get(hypervisorAndTemplateName.getKey()));
templateVO.setChecksum(NewTemplateChecksum.get(hypervisorAndTemplateName.getKey()));
if (Hypervisor.HypervisorType.VMware == templateVO.getHypervisorType()) {
templateVO.setDeployAsIs(true);
}
boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO);
if (!updated) {
String errMsg = String.format("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type %s", hypervisorAndTemplateName.getKey().name());
LOGGER.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
}
public void updateSystemVmTemplates(final Connection conn) {
LOGGER.debug("Updating System Vm template IDs");
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(final TransactionStatus status) {
Set<Hypervisor.HypervisorType> hypervisorsListInUse = new HashSet<Hypervisor.HypervisorType>();
try {
hypervisorsListInUse = clusterDao.getDistictAvailableHypervisorsAcrossClusters();
} catch (final Exception e) {
LOGGER.error("updateSystemVmTemplates: Exception caught while getting hypervisor types from clusters: " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting hypervisor types from clusters", e);
}
for (final Map.Entry<Hypervisor.HypervisorType, String> hypervisorAndTemplateName : NewTemplateNameList.entrySet()) {
LOGGER.debug("Updating " + hypervisorAndTemplateName.getKey() + " System Vms");
Long templateId = getRegisteredTemplateId(new Pair<>(hypervisorAndTemplateName.getKey(), hypervisorAndTemplateName.getValue()));
try {
// change template type to SYSTEM
if (templateId != null) {
updateRegisteredTemplateDetails(templateId, hypervisorAndTemplateName);
} else {
if (hypervisorsListInUse.contains(hypervisorAndTemplateName.getKey())) {
try {
registerTemplates(hypervisorsListInUse);
break;
} catch (final Exception e) {
throw new CloudRuntimeException(String.format("%s.%s %s SystemVm template not found. Cannot upgrade system Vms", CS_MAJOR_VERSION, CS_TINY_VERSION, hypervisorAndTemplateName.getKey()));
}
} else {
LOGGER.warn(String.format("%s.%s %s SystemVm template not found. Cannot upgrade system Vms hypervisor is not used, so not failing upgrade",
CS_MAJOR_VERSION, CS_TINY_VERSION, hypervisorAndTemplateName.getKey()));
// Update the latest template URLs for corresponding hypervisor
VMTemplateVO templateVO = vmTemplateDao.findLatestTemplateByTypeAndHypervisor(hypervisorAndTemplateName.getKey(), Storage.TemplateType.SYSTEM);
if (templateVO != null) {
updateTemplateUrlAndChecksum(templateVO, hypervisorAndTemplateName);
}
}
}
} catch (final Exception e) {
String errMsg = "updateSystemVmTemplates:Exception while getting ids of templates";
LOGGER.error(errMsg, e);
throw new CloudRuntimeException(errMsg, e);
}
}
LOGGER.debug("Updating System Vm Template IDs Complete");
}
});
}
}

View File

@ -0,0 +1,236 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.upgrade.dao;
import java.util.List;
import java.util.Map;
import javax.naming.ConfigurationException;
import com.cloud.utils.db.Filter;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.VMTemplateStorageResourceAssoc;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
public class BasicTemplateDataStoreDaoImpl extends GenericDaoBase<TemplateDataStoreVO, Long> implements TemplateDataStoreDao {
private SearchBuilder<TemplateDataStoreVO> templateRoleSearch;
private SearchBuilder<TemplateDataStoreVO> storeTemplateSearch;
public BasicTemplateDataStoreDaoImpl() {
super();
templateRoleSearch = createSearchBuilder();
templateRoleSearch.and("template_id", templateRoleSearch.entity().getTemplateId(), SearchCriteria.Op.EQ);
templateRoleSearch.and("store_role", templateRoleSearch.entity().getDataStoreRole(), SearchCriteria.Op.EQ);
templateRoleSearch.and("destroyed", templateRoleSearch.entity().getDestroyed(), SearchCriteria.Op.EQ);
templateRoleSearch.and("state", templateRoleSearch.entity().getState(), SearchCriteria.Op.EQ);
templateRoleSearch.done();
storeTemplateSearch = createSearchBuilder();
storeTemplateSearch.and("template_id", storeTemplateSearch.entity().getTemplateId(), SearchCriteria.Op.EQ);
storeTemplateSearch.and("store_id", storeTemplateSearch.entity().getDataStoreId(), SearchCriteria.Op.EQ);
storeTemplateSearch.and("destroyed", storeTemplateSearch.entity().getDestroyed(), SearchCriteria.Op.EQ);
storeTemplateSearch.done();
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
return true;
}
@Override
public List<TemplateDataStoreVO> listByStoreId(long id) {
return null;
}
@Override
public List<TemplateDataStoreVO> listDestroyed(long storeId) {
return null;
}
@Override
public List<TemplateDataStoreVO> listActiveOnCache(long id) {
return null;
}
@Override
public void deletePrimaryRecordsForStore(long id) {
}
@Override
public void deletePrimaryRecordsForTemplate(long templateId) {
}
@Override
public List<TemplateDataStoreVO> listByTemplateStore(long templateId, long storeId) {
return null;
}
@Override
public List<TemplateDataStoreVO> listByTemplateStoreStatus(long templateId, long storeId, ObjectInDataStoreStateMachine.State... states) {
return null;
}
@Override
public List<TemplateDataStoreVO> listByTemplateStoreDownloadStatus(long templateId, long storeId, VMTemplateStorageResourceAssoc.Status... status) {
return null;
}
@Override
public List<TemplateDataStoreVO> listByTemplateZoneDownloadStatus(long templateId, Long zoneId, VMTemplateStorageResourceAssoc.Status... status) {
return null;
}
@Override
public TemplateDataStoreVO findByTemplateZoneDownloadStatus(long templateId, Long zoneId, VMTemplateStorageResourceAssoc.Status... status) {
return null;
}
@Override
public TemplateDataStoreVO findByTemplateZoneStagingDownloadStatus(long templateId, Long zoneId, VMTemplateStorageResourceAssoc.Status... status) {
return null;
}
@Override
public TemplateDataStoreVO findByStoreTemplate(long storeId, long templateId) {
SearchCriteria<TemplateDataStoreVO> sc = storeTemplateSearch.create();
sc.setParameters("store_id", storeId);
sc.setParameters("template_id", templateId);
sc.setParameters("destroyed", false);
Filter filter = new Filter(TemplateDataStoreVO.class, "id", false, 0L, 1L);
List<TemplateDataStoreVO> templates = listBy(sc, filter);
if ((templates != null) && !templates.isEmpty()) {
return templates.get(0);
}
return null;
}
@Override
public TemplateDataStoreVO findByStoreTemplate(long storeId, long templateId, boolean lock) {
return null;
}
@Override
public TemplateDataStoreVO findByTemplate(long templateId, DataStoreRole role) {
SearchCriteria<TemplateDataStoreVO> sc = templateRoleSearch.create();
sc.setParameters("template_id", templateId);
sc.setParameters("store_role", role);
sc.setParameters("destroyed", false);
return findOneIncludingRemovedBy(sc);
}
@Override
public TemplateDataStoreVO findReadyByTemplate(long templateId, DataStoreRole role) {
return null;
}
@Override
public TemplateDataStoreVO findByTemplateZone(long templateId, Long zoneId, DataStoreRole role) {
return null;
}
@Override
public List<TemplateDataStoreVO> listByTemplate(long templateId) {
return null;
}
@Override
public List<TemplateDataStoreVO> listByTemplateNotBypassed(long templateId, Long... storeIds) {
return null;
}
@Override
public TemplateDataStoreVO findByTemplateZoneReady(long templateId, Long zoneId) {
return null;
}
@Override
public void duplicateCacheRecordsOnRegionStore(long storeId) {
}
@Override
public TemplateDataStoreVO findReadyOnCache(long templateId) {
return null;
}
@Override
public List<TemplateDataStoreVO> listOnCache(long templateId) {
return null;
}
@Override
public void updateStoreRoleToCachce(long storeId) {
}
@Override
public List<TemplateDataStoreVO> listTemplateDownloadUrls() {
return null;
}
@Override
public void removeByTemplateStore(long templateId, long imageStoreId) {
}
@Override
public void expireDnldUrlsForZone(Long dcId) {
}
@Override
public List<TemplateDataStoreVO> listByTemplateState(VirtualMachineTemplate.State... states) {
return null;
}
@Override
public TemplateDataStoreVO createTemplateDirectDownloadEntry(long templateId, Long size) {
return null;
}
@Override
public TemplateDataStoreVO getReadyBypassedTemplate(long templateId) {
return null;
}
@Override
public boolean isTemplateMarkedForDirectDownload(long templateId) {
return false;
}
@Override
public List<TemplateDataStoreVO> listTemplateDownloadUrlsByStoreId(long storeId) {
return null;
}
@Override
public boolean updateState(ObjectInDataStoreStateMachine.State currentState, ObjectInDataStoreStateMachine.Event event, ObjectInDataStoreStateMachine.State nextState, DataObjectInStore vo, Object data) {
return false;
}
}

View File

@ -22,19 +22,19 @@ import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import com.cloud.upgrade.SystemVmTemplateRegistration;
import org.apache.log4j.Logger;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.utils.exception.CloudRuntimeException;
public class Upgrade41520to41600 implements DbUpgrade, DbUpgradeSystemVmTemplate {
final static Logger LOG = Logger.getLogger(Upgrade41520to41600.class);
private SystemVmTemplateRegistration systemVmTemplateRegistration;
public Upgrade41520to41600() {
}
@Override
public String[] getUpgradableVersionRange() {
@ -92,173 +92,20 @@ public class Upgrade41520to41600 implements DbUpgrade, DbUpgradeSystemVmTemplate
}
}
private void initSystemVmTemplateRegistration() {
systemVmTemplateRegistration = new SystemVmTemplateRegistration();
}
@Override
@SuppressWarnings("serial")
public void updateSystemVmTemplates(final Connection conn) {
LOG.debug("Updating System Vm template IDs");
final Set<Hypervisor.HypervisorType> hypervisorsListInUse = new HashSet<Hypervisor.HypervisorType>();
try (PreparedStatement pstmt = conn.prepareStatement("select distinct(hypervisor_type) from `cloud`.`cluster` where removed is null"); ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
switch (Hypervisor.HypervisorType.getType(rs.getString(1))) {
case XenServer:
hypervisorsListInUse.add(Hypervisor.HypervisorType.XenServer);
break;
case KVM:
hypervisorsListInUse.add(Hypervisor.HypervisorType.KVM);
break;
case VMware:
hypervisorsListInUse.add(Hypervisor.HypervisorType.VMware);
break;
case Hyperv:
hypervisorsListInUse.add(Hypervisor.HypervisorType.Hyperv);
break;
case LXC:
hypervisorsListInUse.add(Hypervisor.HypervisorType.LXC);
break;
case Ovm3:
hypervisorsListInUse.add(Hypervisor.HypervisorType.Ovm3);
break;
default:
break;
}
}
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates: Exception caught while getting hypervisor types from clusters: " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting hypervisor types from clusters", e);
initSystemVmTemplateRegistration();
try {
systemVmTemplateRegistration.updateSystemVmTemplates(conn);
} catch (Exception e) {
throw new CloudRuntimeException("Failed to find / register SystemVM template(s)");
}
final Map<Hypervisor.HypervisorType, String> NewTemplateNameList = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(Hypervisor.HypervisorType.KVM, "systemvm-kvm-4.16.0");
put(Hypervisor.HypervisorType.VMware, "systemvm-vmware-4.16.0");
put(Hypervisor.HypervisorType.XenServer, "systemvm-xenserver-4.16.0");
put(Hypervisor.HypervisorType.Hyperv, "systemvm-hyperv-4.16.0");
put(Hypervisor.HypervisorType.LXC, "systemvm-lxc-4.16.0");
put(Hypervisor.HypervisorType.Ovm3, "systemvm-ovm3-4.16.0");
}
};
final Map<Hypervisor.HypervisorType, String> routerTemplateConfigurationNames = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(Hypervisor.HypervisorType.KVM, "router.template.kvm");
put(Hypervisor.HypervisorType.VMware, "router.template.vmware");
put(Hypervisor.HypervisorType.XenServer, "router.template.xenserver");
put(Hypervisor.HypervisorType.Hyperv, "router.template.hyperv");
put(Hypervisor.HypervisorType.LXC, "router.template.lxc");
put(Hypervisor.HypervisorType.Ovm3, "router.template.ovm3");
}
};
final Map<Hypervisor.HypervisorType, String> newTemplateUrl = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(Hypervisor.HypervisorType.KVM, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-kvm.qcow2.bz2");
put(Hypervisor.HypervisorType.VMware, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-vmware.ova");
put(Hypervisor.HypervisorType.XenServer, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-xen.vhd.bz2");
put(Hypervisor.HypervisorType.Hyperv, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-hyperv.vhd.zip");
put(Hypervisor.HypervisorType.LXC, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-kvm.qcow2.bz2");
put(Hypervisor.HypervisorType.Ovm3, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-ovm.raw.bz2");
}
};
final Map<Hypervisor.HypervisorType, String> newTemplateChecksum = new HashMap<Hypervisor.HypervisorType, String>() {
{
put(Hypervisor.HypervisorType.KVM, "81b3e48bb934784a13555a43c5ef5ffb");
put(Hypervisor.HypervisorType.XenServer, "1b178a5dbdbe090555515340144c6017");
put(Hypervisor.HypervisorType.VMware, "e6a88e518c57d6f36c096c4204c3417f");
put(Hypervisor.HypervisorType.Hyperv, "5c94da45337cf3e1910dcbe084d4b9ad");
put(Hypervisor.HypervisorType.LXC, "81b3e48bb934784a13555a43c5ef5ffb");
put(Hypervisor.HypervisorType.Ovm3, "875c5c65455fc06c4a012394410db375");
}
};
for (final Map.Entry<Hypervisor.HypervisorType, String> hypervisorAndTemplateName : NewTemplateNameList.entrySet()) {
LOG.debug("Updating " + hypervisorAndTemplateName.getKey() + " System Vms");
try (PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = ? and removed is null order by id desc limit 1")) {
// Get systemvm template id for corresponding hypervisor
long templateId = -1;
pstmt.setString(1, hypervisorAndTemplateName.getValue());
try (ResultSet rs = pstmt.executeQuery()) {
if (rs.next()) {
templateId = rs.getLong(1);
}
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates: Exception caught while getting ids of templates: " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates: Exception caught while getting ids of templates", e);
}
// change template type to SYSTEM
if (templateId != -1) {
try (PreparedStatement templ_type_pstmt = conn.prepareStatement("update `cloud`.`vm_template` set type='SYSTEM' where id = ?");) {
templ_type_pstmt.setLong(1, templateId);
templ_type_pstmt.executeUpdate();
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system': " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system'", e);
}
// update template ID of system Vms
try (PreparedStatement update_templ_id_pstmt = conn
.prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = ? and removed is NULL");) {
update_templ_id_pstmt.setLong(1, templateId);
update_templ_id_pstmt.setString(2, hypervisorAndTemplateName.getKey().toString());
update_templ_id_pstmt.executeUpdate();
} catch (final Exception e) {
LOG.error("updateSystemVmTemplates:Exception while setting template for " + hypervisorAndTemplateName.getKey().toString() + " to " + templateId
+ ": " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting template for " + hypervisorAndTemplateName.getKey().toString() + " to "
+ templateId, e);
}
// Change value of global configuration parameter
// router.template.* for the corresponding hypervisor
try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?");) {
update_pstmt.setString(1, hypervisorAndTemplateName.getValue());
update_pstmt.setString(2, routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()));
update_pstmt.executeUpdate();
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates:Exception while setting " + routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to "
+ hypervisorAndTemplateName.getValue() + ": " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting "
+ routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to " + hypervisorAndTemplateName.getValue(), e);
}
// Change value of global configuration parameter
// minreq.sysvmtemplate.version for the ACS version
try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?");) {
update_pstmt.setString(1, "4.16.0");
update_pstmt.setString(2, "minreq.sysvmtemplate.version");
update_pstmt.executeUpdate();
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates:Exception while setting 'minreq.sysvmtemplate.version' to 4.16.0: " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting 'minreq.sysvmtemplate.version' to 4.16.0", e);
}
} else {
if (hypervisorsListInUse.contains(hypervisorAndTemplateName.getKey())) {
throw new CloudRuntimeException(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. Cannot upgrade system Vms");
} else {
LOG.warn(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. " + hypervisorAndTemplateName.getKey()
+ " hypervisor is not used, so not failing upgrade");
// Update the latest template URLs for corresponding
// hypervisor
try (PreparedStatement update_templ_url_pstmt = conn
.prepareStatement("UPDATE `cloud`.`vm_template` SET url = ? , checksum = ? WHERE hypervisor_type = ? AND type = 'SYSTEM' AND removed is null order by id desc limit 1");) {
update_templ_url_pstmt.setString(1, newTemplateUrl.get(hypervisorAndTemplateName.getKey()));
update_templ_url_pstmt.setString(2, newTemplateChecksum.get(hypervisorAndTemplateName.getKey()));
update_templ_url_pstmt.setString(3, hypervisorAndTemplateName.getKey().toString());
update_templ_url_pstmt.executeUpdate();
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type "
+ hypervisorAndTemplateName.getKey().toString() + ": " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type "
+ hypervisorAndTemplateName.getKey().toString(), e);
}
}
}
} catch (final SQLException e) {
LOG.error("updateSystemVmTemplates:Exception while getting ids of templates: " + e.getMessage());
throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting ids of templates", e);
}
}
LOG.debug("Updating System Vm Template IDs Complete");
}
@Override

View File

@ -48,6 +48,9 @@ public class UserVmVO extends VMInstanceVO implements UserVm {
@Column(name = "update_parameters", updatable = true)
protected boolean updateParameters = true;
@Column(name = "user_vm_type", updatable = true)
private String userVmType;
transient String password;
@Override
@ -126,6 +129,14 @@ public class UserVmVO extends VMInstanceVO implements UserVm {
return updateParameters;
}
public String getUserVmType() {
return userVmType;
}
public void setUserVmType(String userVmType) {
this.userVmType = userVmType;
}
@Override
public String getName() {
return instanceName;

View File

@ -21,6 +21,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.utils.Pair;
import com.cloud.utils.db.GenericDao;
import com.cloud.utils.fsm.StateDao;
@ -159,4 +160,6 @@ public interface VMInstanceDao extends GenericDao<VMInstanceVO, Long>, StateDao<
List<VMInstanceVO> listNonMigratingVmsByHostEqualsLastHost(long hostId);
void updateSystemVmTemplateId(long templateId, Hypervisor.HypervisorType hypervisorType);
}

View File

@ -28,6 +28,7 @@ import java.util.Map;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
import com.cloud.hypervisor.Hypervisor;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
@ -126,6 +127,8 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
private static final String COUNT_VMS_BASED_ON_VGPU_TYPES2 =
"GROUP BY offering.service_offering_id) results GROUP BY pci, type";
private static final String UPDATE_SYSTEM_VM_TEMPLATE_ID_FOR_HYPERVISOR = "UPDATE `cloud`.`vm_instance` SET vm_template_id = ? WHERE type <> 'User' AND hypervisor_type = ? AND removed is NULL";
@Inject
protected HostDao _hostDao;
@ -941,4 +944,23 @@ public class VMInstanceDaoImpl extends GenericDaoBase<VMInstanceVO, Long> implem
}
});
}
@Override
public void updateSystemVmTemplateId(long templateId, Hypervisor.HypervisorType hypervisorType) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
StringBuilder sql = new StringBuilder(UPDATE_SYSTEM_VM_TEMPLATE_ID_FOR_HYPERVISOR);
try {
PreparedStatement updateStatement = txn.prepareAutoCloseStatement(sql.toString());
updateStatement.setLong(1, templateId);
updateStatement.setString(2, hypervisorType.toString());
updateStatement.executeUpdate();
} catch (SQLException e) {
throw new CloudRuntimeException("DB Exception on: " + sql, e);
} catch (Throwable e) {
throw new CloudRuntimeException("Caught: " + sql, e);
}
}
}

View File

@ -20,6 +20,7 @@ package org.apache.cloudstack.storage.datastore.db;
import java.util.List;
import com.cloud.storage.DataStoreRole;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import com.cloud.utils.db.GenericDao;
@ -42,4 +43,10 @@ public interface ImageStoreDao extends GenericDao<ImageStoreVO, Long> {
List<ImageStoreVO> listImageCacheStores();
List<ImageStoreVO> listStoresByZoneId(long zoneId);
List<ImageStoreVO> listAllStoresInZone(Long zoneId, String provider, DataStoreRole role);
List<ImageStoreVO> findByProtocol(String protocol);
ImageStoreVO findOneByZoneAndProtocol(long zoneId, String protocol);
}

View File

@ -23,6 +23,7 @@ import java.util.Map;
import javax.naming.ConfigurationException;
import com.cloud.utils.db.Filter;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
@ -38,7 +39,23 @@ public class ImageStoreDaoImpl extends GenericDaoBase<ImageStoreVO, Long> implem
private SearchBuilder<ImageStoreVO> nameSearch;
private SearchBuilder<ImageStoreVO> providerSearch;
private SearchBuilder<ImageStoreVO> regionSearch;
private SearchBuilder<ImageStoreVO> storeSearch;
private SearchBuilder<ImageStoreVO> protocolSearch;
private SearchBuilder<ImageStoreVO> zoneProtocolSearch;
public ImageStoreDaoImpl() {
super();
protocolSearch = createSearchBuilder();
protocolSearch.and("protocol", protocolSearch.entity().getProtocol(), SearchCriteria.Op.EQ);
protocolSearch.and("role", protocolSearch.entity().getRole(), SearchCriteria.Op.EQ);
protocolSearch.done();
zoneProtocolSearch = createSearchBuilder();
zoneProtocolSearch.and("dataCenterId", zoneProtocolSearch.entity().getDcId(), SearchCriteria.Op.EQ);
zoneProtocolSearch.and("protocol", zoneProtocolSearch.entity().getProtocol(), SearchCriteria.Op.EQ);
zoneProtocolSearch.and("role", zoneProtocolSearch.entity().getRole(), SearchCriteria.Op.EQ);
zoneProtocolSearch.done();
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
@ -58,6 +75,12 @@ public class ImageStoreDaoImpl extends GenericDaoBase<ImageStoreVO, Long> implem
regionSearch.and("role", regionSearch.entity().getRole(), SearchCriteria.Op.EQ);
regionSearch.done();
storeSearch = createSearchBuilder();
storeSearch.and("providerName", storeSearch.entity().getProviderName(), SearchCriteria.Op.EQ);
storeSearch.and("role", storeSearch.entity().getRole(), SearchCriteria.Op.EQ);
storeSearch.and("dataCenterId", storeSearch.entity().getDcId(), SearchCriteria.Op.EQ);
storeSearch.done();
return true;
}
@ -76,6 +99,15 @@ public class ImageStoreDaoImpl extends GenericDaoBase<ImageStoreVO, Long> implem
return listBy(sc);
}
@Override
public List<ImageStoreVO> listAllStoresInZone(Long zoneId, String provider, DataStoreRole role) {
SearchCriteria<ImageStoreVO> sc = storeSearch.create();
sc.setParameters("providerName", provider);
sc.setParameters("role", role);
sc.setParameters("dataCenterId", zoneId);
return listBy(sc);
}
@Override
public List<ImageStoreVO> findByZone(ZoneScope scope, Boolean readonly) {
SearchCriteria<ImageStoreVO> sc = createSearchCriteria();
@ -140,4 +172,23 @@ public class ImageStoreDaoImpl extends GenericDaoBase<ImageStoreVO, Long> implem
sc.addAnd("dcId", SearchCriteria.Op.EQ, zoneId);
return listBy(sc);
}
@Override
public List<ImageStoreVO> findByProtocol(String protocol) {
SearchCriteria<ImageStoreVO> sc = protocolSearch.create();
sc.setParameters("protocol", protocol);
sc.setParameters("role", DataStoreRole.Image);
return listBy(sc);
}
@Override
public ImageStoreVO findOneByZoneAndProtocol(long dataCenterId, String protocol) {
SearchCriteria<ImageStoreVO> sc = zoneProtocolSearch.create();
sc.setParameters("dataCenterId", dataCenterId);
sc.setParameters("protocol", protocol);
sc.setParameters("role", DataStoreRole.Image);
Filter filter = new Filter(1);
List<ImageStoreVO> results = listBy(sc, filter);
return results.size() == 0 ? null : results.get(0);
}
}

View File

@ -135,6 +135,10 @@ public class ImageStoreVO implements ImageStore {
return this.dcId;
}
public Long getDcId() {
return this.dcId;
}
public ScopeType getScope() {
return this.scope;
}

View File

@ -19,6 +19,19 @@
-- Schema upgrade from 4.15.2.0 to 4.16.0.0
--;
ALTER TABLE `cloud`.`user_vm` ADD COLUMN `user_vm_type` varchar(255) DEFAULT "UserVM" COMMENT 'Defines the type of UserVM';
-- This is set, so as to ensure that the controller details from the ovf template are adhered to
UPDATE `cloud`.`vm_template` set deploy_as_is = 1 where id = 8;
DELETE FROM `cloud`.`configuration` WHERE name IN ("cloud.kubernetes.cluster.template.name.kvm", "cloud.kubernetes.cluster.template.name.vmware", "cloud.kubernetes.cluster.template.name.xenserver", "cloud.kubernetes.cluster.template.name.hyperv");
ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `autoscaling_enabled` tinyint(1) unsigned NOT NULL DEFAULT 0;
ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `minsize` bigint;
ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `maxsize` bigint;
ALTER TABLE `cloud`.`kubernetes_cluster_vm_map` ADD COLUMN `control_node` tinyint(1) unsigned NOT NULL DEFAULT 0;
-- Adding dynamic scalable flag for service offering table
ALTER TABLE `cloud`.`service_offering` ADD COLUMN `dynamic_scaling_enabled` tinyint(1) unsigned NOT NULL DEFAULT 1 COMMENT 'true(1) if VM needs to be dynamically scalable of cpu or memory';
DROP VIEW IF EXISTS `cloud`.`service_offering_view`;

View File

@ -0,0 +1,82 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
function getTemplateVersion() {
projVersion=$1
version="$(cut -d'-' -f1 <<<"$projVersion")"
subversion1="$(cut -d'.' -f1 <<<"$version")"
subversion2="$(cut -d'.' -f2 <<<"$version")"
minorversion="$(cut -d'.' -f3 <<<"$version")"
export CS_VERSION="${subversion1}"."${subversion2}"
export CS_MINOR_VERSION="${minorversion}"
}
function getGenericName() {
hypervisor=$(echo "$1" | tr "[:upper:]" "[:lower:]")
if [[ "$hypervisor" == "ovm3" ]]; then
echo "ovm"
elif [[ "$hypervisor" == "lxc" ]]; then
echo "kvm"
elif [[ "$hypervisor" == "xenserver" ]]; then
echo "xen"
else
echo "$hypervisor"
fi
}
function getChecksum() {
local fileData="$1"
local hvName=$2
while IFS= read -r line; do
if [[ $line == *"$hvName"* ]]; then
echo "$(cut -d' ' -f1 <<<"$line")"
fi
done <<< "$fileData"
}
function createMetadataFile() {
local fileData=$(cat $SOURCEFILE)
for i in "${!templates[@]}"
do
section="$i"
hvName=$(getGenericName $i)
templatename="systemvm-${i}-${CS_VERSION}"
checksum=$(getChecksum "$fileData" $hvName)
downloadurl="${templates[$i]}"
filename=$(echo ${downloadurl##*'/'})
echo -e "["$section"]\ntemplatename = $templatename\nchecksum = $checksum\ndownloadurl = $downloadurl\nfilename = $filename\n" >> $METADATAFILE
done
}
declare -A templates
getTemplateVersion $1
templates=( ["kvm"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-kvm.qcow2.bz2"
["vmware"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-vmware.ova"
["xenserver"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-xen.vhd.bz2"
["hyperv"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-hyperv.vhd.zip"
["lxc"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-kvm.qcow2.bz2"
["ovm3"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-ovm.raw.bz2" )
PARENTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/dist/systemvm-templates/"
mkdir -p $PARENTPATH
METADATAFILE=${PARENTPATH}"metadata.ini"
echo > $METADATAFILE
SOURCEFILE=${PARENTPATH}'md5sum.txt'
createMetadataFile

View File

@ -298,6 +298,11 @@ install -D server/target/conf/cloudstack-sudoers ${RPM_BUILD_ROOT}%{_sysconfdir}
touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid
#install -D server/target/conf/cloudstack-catalina.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-catalina
# SystemVM template
mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm
cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm
rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm/md5sum.txt
# UI
mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui
mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/
@ -551,6 +556,7 @@ pip3 install --upgrade urllib3
%{_datadir}/%{name}-management/conf
%{_datadir}/%{name}-management/lib/*.jar
%{_datadir}/%{name}-management/logs
%{_datadir}/%{name}-management/templates
%attr(0755,root,root) %{_bindir}/%{name}-setup-databases
%attr(0755,root,root) %{_bindir}/%{name}-migrate-databases
%attr(0755,root,root) %{_bindir}/%{name}-set-guest-password

View File

@ -291,6 +291,11 @@ install -D server/target/conf/cloudstack-sudoers ${RPM_BUILD_ROOT}%{_sysconfdir}
touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid
#install -D server/target/conf/cloudstack-catalina.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-catalina
# SystemVM template
mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm
cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm
rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm/md5sum.txt
# UI
mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui
mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/
@ -539,6 +544,7 @@ pip install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz
%{_datadir}/%{name}-management/conf
%{_datadir}/%{name}-management/lib/*.jar
%{_datadir}/%{name}-management/logs
%{_datadir}/%{name}-management/templates
%attr(0755,root,root) %{_bindir}/%{name}-setup-databases
%attr(0755,root,root) %{_bindir}/%{name}-migrate-databases
%attr(0755,root,root) %{_bindir}/%{name}-set-guest-password

View File

@ -293,6 +293,11 @@ install -D server/target/conf/cloudstack-sudoers ${RPM_BUILD_ROOT}%{_sysconfdir}
touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid
#install -D server/target/conf/cloudstack-catalina.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-catalina
# SystemVM template
mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm
cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm
rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm/md5sum.txt
# UI
mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui
mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/
@ -533,6 +538,7 @@ pip install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz
%{_datadir}/%{name}-management/conf
%{_datadir}/%{name}-management/lib/*.jar
%{_datadir}/%{name}-management/logs
%{_datadir}/%{name}-management/templates
%attr(0755,root,root) %{_bindir}/%{name}-setup-databases
%attr(0755,root,root) %{_bindir}/%{name}-migrate-databases
%attr(0755,root,root) %{_bindir}/%{name}-set-guest-password

View File

@ -40,6 +40,7 @@ import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
import com.cloud.network.Networks.TrafficType;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
import com.cloud.vm.UserVmManager;
import com.cloud.vm.VirtualMachine;
@ResourceWrapper(handles = StartCommand.class)
@ -88,14 +89,7 @@ public final class LibvirtStartCommandWrapper extends CommandWrapper<StartComman
libvirtComputingResource.applyDefaultNetworkRules(conn, vmSpec, false);
// pass cmdline info to system vms
if (vmSpec.getType() != VirtualMachine.Type.User) {
String controlIp = null;
for (final NicTO nic : vmSpec.getNics()) {
if (nic.getType() == TrafficType.Control) {
controlIp = nic.getIp();
break;
}
}
if (vmSpec.getType() != VirtualMachine.Type.User || (vmSpec.getBootArgs() != null && vmSpec.getBootArgs().contains(UserVmManager.CKS_NODE))) {
// try to patch and SSH into the systemvm for up to 5 minutes
for (int count = 0; count < 10; count++) {
// wait and try passCmdLine for 30 seconds at most for CLOUDSTACK-2823
@ -104,12 +98,22 @@ public final class LibvirtStartCommandWrapper extends CommandWrapper<StartComman
}
}
final VirtualRoutingResource virtRouterResource = libvirtComputingResource.getVirtRouterResource();
// check if the router is up?
for (int count = 0; count < 60; count++) {
final boolean result = virtRouterResource.connect(controlIp, 1, 5000);
if (result) {
break;
if (vmSpec.getType() != VirtualMachine.Type.User) {
String controlIp = null;
for (final NicTO nic : vmSpec.getNics()) {
if (nic.getType() == TrafficType.Control) {
controlIp = nic.getIp();
break;
}
}
final VirtualRoutingResource virtRouterResource = libvirtComputingResource.getVirtRouterResource();
// check if the router is up?
for (int count = 0; count < 60; count++) {
final boolean result = virtRouterResource.connect(controlIp, 1, 5000);
if (result) {
break;
}
}
}
}

View File

@ -48,6 +48,7 @@ import java.util.stream.Collectors;
import javax.naming.ConfigurationException;
import javax.xml.datatype.XMLGregorianCalendar;
import com.cloud.utils.script.Script;
import com.cloud.hypervisor.vmware.mo.NetworkMO;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.storage.command.CopyCommand;
@ -275,7 +276,6 @@ import com.cloud.utils.mgmt.JmxUtil;
import com.cloud.utils.mgmt.PropertyMapDynamicBean;
import com.cloud.utils.net.NetUtils;
import com.cloud.utils.nicira.nvp.plugin.NiciraNvpApiVersion;
import com.cloud.utils.script.Script;
import com.cloud.utils.ssh.SshHelper;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.PowerState;

View File

@ -37,6 +37,7 @@ public interface KubernetesCluster extends ControlledEntity, com.cloud.utils.fsm
StopRequested,
DestroyRequested,
RecoveryRequested,
AutoscaleRequested,
ScaleUpRequested,
ScaleDownRequested,
UpgradeRequested,
@ -81,6 +82,7 @@ public interface KubernetesCluster extends ControlledEntity, com.cloud.utils.fsm
s_fsm.addTransition(State.Running, Event.FaultsDetected, State.Alert);
s_fsm.addTransition(State.Running, Event.AutoscaleRequested, State.Scaling);
s_fsm.addTransition(State.Running, Event.ScaleUpRequested, State.Scaling);
s_fsm.addTransition(State.Running, Event.ScaleDownRequested, State.Scaling);
s_fsm.addTransition(State.Scaling, Event.OperationSucceeded, State.Running);
@ -131,4 +133,7 @@ public interface KubernetesCluster extends ControlledEntity, com.cloud.utils.fsm
@Override
State getState();
Date getCreated();
boolean getAutoscalingEnabled();
Long getMinSize();
Long getMaxSize();
}

View File

@ -21,18 +21,19 @@ import java.net.MalformedURLException;
import java.net.URL;
import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.UUID;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
@ -273,60 +274,6 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
logTransitStateAndThrow(logLevel, message, null, null, ex);
}
private boolean isKubernetesServiceTemplateConfigured(DataCenter zone) {
// Check Kubernetes VM template for zone
boolean isHyperVAvailable = false;
boolean isKVMAvailable = false;
boolean isVMwareAvailable = false;
boolean isXenserverAvailable = false;
List<ClusterVO> clusters = clusterDao.listByZoneId(zone.getId());
for (ClusterVO clusterVO : clusters) {
if (Hypervisor.HypervisorType.Hyperv.equals(clusterVO.getHypervisorType())) {
isHyperVAvailable = true;
}
if (Hypervisor.HypervisorType.KVM.equals(clusterVO.getHypervisorType())) {
isKVMAvailable = true;
}
if (Hypervisor.HypervisorType.VMware.equals(clusterVO.getHypervisorType())) {
isVMwareAvailable = true;
}
if (Hypervisor.HypervisorType.XenServer.equals(clusterVO.getHypervisorType())) {
isXenserverAvailable = true;
}
}
List<Pair<String, String>> templatePairs = new ArrayList<>();
if (isHyperVAvailable) {
templatePairs.add(new Pair<>(KubernetesClusterHyperVTemplateName.key(), KubernetesClusterHyperVTemplateName.value()));
}
if (isKVMAvailable) {
templatePairs.add(new Pair<>(KubernetesClusterKVMTemplateName.key(), KubernetesClusterKVMTemplateName.value()));
}
if (isVMwareAvailable) {
templatePairs.add(new Pair<>(KubernetesClusterVMwareTemplateName.key(), KubernetesClusterVMwareTemplateName.value()));
}
if (isXenserverAvailable) {
templatePairs.add(new Pair<>(KubernetesClusterXenserverTemplateName.key(), KubernetesClusterXenserverTemplateName.value()));
}
for (Pair<String, String> templatePair : templatePairs) {
String templateKey = templatePair.first();
String templateName = templatePair.second();
if (Strings.isNullOrEmpty(templateName)) {
LOGGER.warn(String.format("Global setting %s is empty. Template name need to be specified for Kubernetes service to function", templateKey));
return false;
}
final VMTemplateVO template = templateDao.findValidByTemplateName(templateName);
if (template == null) {
LOGGER.warn(String.format("Unable to find the template %s to be used for provisioning Kubernetes cluster nodes", templateName));
return false;
}
if (CollectionUtils.isEmpty(templateJoinDao.newTemplateView(template, zone.getId(), true))) {
LOGGER.warn(String.format("The template ID: %s, name: %s is not available for use in zone ID: %s provisioning Kubernetes cluster nodes", template.getUuid(), templateName, zone.getUuid()));
return false;
}
}
return true;
}
private boolean isKubernetesServiceNetworkOfferingConfigured(DataCenter zone) {
// Check network offering
String networkOfferingName = KubernetesClusterNetworkOffering.value();
@ -374,9 +321,6 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
}
private boolean isKubernetesServiceConfigured(DataCenter zone) {
if (!isKubernetesServiceTemplateConfigured(zone)) {
return false;
}
if (!isKubernetesServiceNetworkOfferingConfigured(zone)) {
return false;
}
@ -396,23 +340,12 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
return null;
}
private VMTemplateVO getKubernetesServiceTemplate(Hypervisor.HypervisorType hypervisorType) {
String templateName = null;
switch (hypervisorType) {
case Hyperv:
templateName = KubernetesClusterHyperVTemplateName.value();
break;
case KVM:
templateName = KubernetesClusterKVMTemplateName.value();
break;
case VMware:
templateName = KubernetesClusterVMwareTemplateName.value();
break;
case XenServer:
templateName = KubernetesClusterXenserverTemplateName.value();
break;
public VMTemplateVO getKubernetesServiceTemplate(DataCenter dataCenter, Hypervisor.HypervisorType hypervisorType) {
VMTemplateVO template = templateDao.findSystemVMReadyTemplate(dataCenter.getId(), hypervisorType);
if (template == null) {
throw new CloudRuntimeException("Not able to find the System templates or not downloaded in zone " + dataCenter.getId());
}
return templateDao.findValidByTemplateName(templateName);
return template;
}
private boolean validateIsolatedNetwork(Network network, int clusterTotalNodeCount) {
@ -482,7 +415,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
throw new InvalidParameterValueException(String.format("Custom service offerings are not supported for creating clusters, service offering ID: %s", serviceOffering.getUuid()));
}
if (serviceOffering.getCpu() < MIN_KUBERNETES_CLUSTER_NODE_CPU || serviceOffering.getRamSize() < MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE) {
throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, Kubernetes cluster template(CoreOS) needs minimum %d vCPUs and %d MB RAM", serviceOffering.getUuid(), MIN_KUBERNETES_CLUSTER_NODE_CPU, MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE));
throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, Kubernetes cluster template needs minimum %d vCPUs and %d MB RAM", serviceOffering.getUuid(), MIN_KUBERNETES_CLUSTER_NODE_CPU, MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE));
}
if (serviceOffering.getCpu() < version.getMinimumCpu()) {
throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, Kubernetes version ID: %s needs minimum %d vCPUs", serviceOffering.getUuid(), version.getUuid(), version.getMinimumCpu()));
@ -634,6 +567,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
response.setIpAddressId(ipAddresses.get(0).getUuid());
}
}
List<UserVmResponse> vmResponses = new ArrayList<UserVmResponse>();
List<KubernetesClusterVmMapVO> vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
ResponseView respView = ResponseView.Restricted;
@ -655,6 +589,9 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
response.setHasAnnotation(annotationDao.hasAnnotations(kubernetesCluster.getUuid(),
AnnotationService.EntityType.KUBERNETES_CLUSTER.name(), accountService.isRootAdmin(caller.getId())));
response.setVirtualMachines(vmResponses);
response.setAutoscalingEnabled(kubernetesCluster.getAutoscalingEnabled());
response.setMinSize(kubernetesCluster.getMinSize());
response.setMaxSize(kubernetesCluster.getMaxSize());
return response;
}
@ -678,6 +615,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
final String sshKeyPair = cmd.getSSHKeyPairName();
final Long controlNodeCount = cmd.getControlNodes();
final Long clusterSize = cmd.getClusterSize();
final long totalNodeCount = controlNodeCount + clusterSize;
final String dockerRegistryUserName = cmd.getDockerRegistryUserName();
final String dockerRegistryPassword = cmd.getDockerRegistryPassword();
final String dockerRegistryUrl = cmd.getDockerRegistryUrl();
@ -689,14 +627,20 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
throw new InvalidParameterValueException("Invalid name for the Kubernetes cluster name:" + name);
}
if (controlNodeCount < 1 || controlNodeCount > 100) {
if (controlNodeCount < 1) {
throw new InvalidParameterValueException("Invalid cluster control nodes count: " + controlNodeCount);
}
if (clusterSize < 1 || clusterSize > 100) {
if (clusterSize < 1) {
throw new InvalidParameterValueException("Invalid cluster size: " + clusterSize);
}
int maxClusterSize = KubernetesMaxClusterSize.valueIn(owner.getId());
if (totalNodeCount > maxClusterSize) {
throw new InvalidParameterValueException(
String.format("Maximum cluster size can not exceed %d. Please contact your administrator", maxClusterSize));
}
DataCenter zone = dataCenterDao.findById(zoneId);
if (zone == null) {
throw new InvalidParameterValueException("Unable to find zone by ID: " + zoneId);
@ -870,29 +814,89 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
final Long kubernetesClusterId = cmd.getId();
final Long serviceOfferingId = cmd.getServiceOfferingId();
final Long clusterSize = cmd.getClusterSize();
final List<Long> nodeIds = cmd.getNodeIds();
final Boolean isAutoscalingEnabled = cmd.isAutoscalingEnabled();
final Long minSize = cmd.getMinSize();
final Long maxSize = cmd.getMaxSize();
if (kubernetesClusterId == null || kubernetesClusterId < 1L) {
throw new InvalidParameterValueException("Invalid Kubernetes cluster ID");
}
KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId);
if (kubernetesCluster == null || kubernetesCluster.getRemoved() != null) {
throw new InvalidParameterValueException("Invalid Kubernetes cluster ID");
}
final DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
if (zone == null) {
logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster : %s", kubernetesCluster.getName()));
}
if (serviceOfferingId == null && clusterSize == null && nodeIds == null && isAutoscalingEnabled == null) {
throw new InvalidParameterValueException(String.format("Kubernetes cluster %s cannot be scaled, either service offering or cluster size or nodeids to be removed or autoscaling must be passed", kubernetesCluster.getName()));
}
Account caller = CallContext.current().getCallingAccount();
accountManager.checkAccess(caller, SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster);
if (serviceOfferingId == null && clusterSize == null) {
throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled, either a new service offering or a new cluster size must be passed", kubernetesCluster.getName()));
}
final KubernetesSupportedVersion clusterVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId());
if (clusterVersion == null) {
throw new CloudRuntimeException(String.format("Invalid Kubernetes version associated with Kubernetes cluster : %s", kubernetesCluster.getName()));
}
List<KubernetesCluster.State> validClusterStates = Arrays.asList(KubernetesCluster.State.Created, KubernetesCluster.State.Running, KubernetesCluster.State.Stopped);
if (!(validClusterStates.contains(kubernetesCluster.getState()))) {
throw new PermissionDeniedException(String.format("Kubernetes cluster %s is in %s state and can not be scaled", kubernetesCluster.getName(), kubernetesCluster.getState().toString()));
}
int maxClusterSize = KubernetesMaxClusterSize.valueIn(kubernetesCluster.getAccountId());
if (isAutoscalingEnabled != null && isAutoscalingEnabled) {
if (clusterSize != null || serviceOfferingId != null || nodeIds != null) {
throw new InvalidParameterValueException("Autoscaling can not be passed along with nodeids or clustersize or service offering");
}
if (!KubernetesVersionManagerImpl.versionSupportsAutoscaling(clusterVersion)) {
throw new InvalidParameterValueException(String.format("Autoscaling requires Kubernetes Version %s or above",
KubernetesVersionManagerImpl.MINIMUN_AUTOSCALER_SUPPORTED_VERSION ));
}
validateEndpointUrl();
if (minSize == null || maxSize == null) {
throw new InvalidParameterValueException("Autoscaling requires minsize and maxsize to be passed");
}
if (minSize < 1) {
throw new InvalidParameterValueException("Minsize must be at least than 1");
}
if (maxSize <= minSize) {
throw new InvalidParameterValueException("Maxsize must be greater than minsize");
}
if (maxSize + kubernetesCluster.getControlNodeCount() > maxClusterSize) {
throw new InvalidParameterValueException(
String.format("Maximum cluster size can not exceed %d. Please contact your administrator", maxClusterSize));
}
}
if (nodeIds != null) {
if (clusterSize != null || serviceOfferingId != null) {
throw new InvalidParameterValueException("nodeids can not be passed along with clustersize or service offering");
}
List<KubernetesClusterVmMapVO> nodes = kubernetesClusterVmMapDao.listByClusterIdAndVmIdsIn(kubernetesCluster.getId(), nodeIds);
// Do all the nodes exist ?
if (nodes == null || nodes.size() != nodeIds.size()) {
throw new InvalidParameterValueException("Invalid node ids");
}
// Ensure there's always a control node
long controleNodesToRemove = nodes.stream().filter(x -> x.isControlNode()).count();
if (controleNodesToRemove >= kubernetesCluster.getControlNodeCount()) {
throw new InvalidParameterValueException("Can not remove all control nodes from a cluster");
}
// Ensure there's always a node
long nodesToRemove = nodes.stream().filter(x -> !x.isControlNode()).count();
if (nodesToRemove >= kubernetesCluster.getNodeCount()) {
throw new InvalidParameterValueException("Can not remove all nodes from a cluster");
}
}
ServiceOffering serviceOffering = null;
if (serviceOfferingId != null) {
@ -924,12 +928,6 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
}
}
if (!(kubernetesCluster.getState().equals(KubernetesCluster.State.Created) ||
kubernetesCluster.getState().equals(KubernetesCluster.State.Running) ||
kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped))) {
throw new PermissionDeniedException(String.format("Kubernetes cluster : %s is in %s state", kubernetesCluster.getName(), kubernetesCluster.getState().toString()));
}
if (clusterSize != null) {
if (kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped)) { // Cannot scale stopped cluster currently for cluster size
throw new PermissionDeniedException(String.format("Kubernetes cluster : %s is in %s state", kubernetesCluster.getName(), kubernetesCluster.getState().toString()));
@ -937,6 +935,10 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
if (clusterSize < 1) {
throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled for size, %d", kubernetesCluster.getName(), clusterSize));
}
if (clusterSize + kubernetesCluster.getControlNodeCount() > maxClusterSize) {
throw new InvalidParameterValueException(
String.format("Maximum cluster size can not exceed %d. Please contact your administrator", maxClusterSize));
}
if (clusterSize > kubernetesCluster.getNodeCount()) { // Upscale
VMTemplateVO template = templateDao.findById(kubernetesCluster.getTemplateId());
if (template == null) {
@ -982,8 +984,8 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
}
KubernetesSupportedVersionVO clusterVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId());
if (clusterVersion == null || clusterVersion.getRemoved() != null) {
throw new InvalidParameterValueException(String.format("Invalid Kubernetes version associated with cluster ID: %s",
kubernetesCluster.getUuid()));
throw new InvalidParameterValueException(String.format("Invalid Kubernetes version associated with cluster : %s",
kubernetesCluster.getName()));
}
final ServiceOffering serviceOffering = serviceOfferingDao.findByIdIncludingRemoved(kubernetesCluster.getServiceOfferingId());
if (serviceOffering == null) {
@ -1050,7 +1052,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
}
final Network defaultNetwork = getKubernetesClusterNetworkIfMissing(cmd.getName(), zone, owner, (int)controlNodeCount, (int)clusterSize, cmd.getExternalLoadBalancerIpAddress(), cmd.getNetworkId());
final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(deployDestination.getCluster().getHypervisorType());
final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(zone, deployDestination.getCluster().getHypervisorType());
final long cores = serviceOffering.getCpu() * (controlNodeCount + clusterSize);
final long memory = serviceOffering.getRamSize() * (controlNodeCount + clusterSize);
@ -1115,7 +1117,7 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster : %s", kubernetesCluster.getName()));
}
KubernetesClusterStartWorker startWorker =
new KubernetesClusterStartWorker(kubernetesCluster, this);
new KubernetesClusterStartWorker(kubernetesCluster, this);
startWorker = ComponentContext.inject(startWorker);
if (onCreate) {
// Start for Kubernetes cluster in 'Created' state
@ -1279,9 +1281,20 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled");
}
validateKubernetesClusterScaleParameters(cmd);
KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(cmd.getId());
Account owner = accountService.getActiveAccountById(kubernetesCluster.getAccountId());
String[] keys = getServiceUserKeys(owner);
KubernetesClusterScaleWorker scaleWorker =
new KubernetesClusterScaleWorker(kubernetesClusterDao.findById(cmd.getId()),
serviceOfferingDao.findById(cmd.getServiceOfferingId()), cmd.getClusterSize(), this);
new KubernetesClusterScaleWorker(kubernetesClusterDao.findById(cmd.getId()),
serviceOfferingDao.findById(cmd.getServiceOfferingId()),
cmd.getClusterSize(),
cmd.getNodeIds(),
cmd.isAutoscalingEnabled(),
cmd.getMinSize(),
cmd.getMaxSize(),
this);
scaleWorker.setKeys(keys);
scaleWorker = ComponentContext.inject(scaleWorker);
return scaleWorker.scaleCluster();
}
@ -1291,13 +1304,14 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
if (!KubernetesServiceEnabled.value()) {
logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled");
}
validateKubernetesClusterUpgradeParameters(cmd);
KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(cmd.getId());
Account owner = accountService.getActiveAccountById(kubernetesCluster.getAccountId());
String[] keys = getServiceUserKeys(owner);
KubernetesClusterUpgradeWorker upgradeWorker =
new KubernetesClusterUpgradeWorker(kubernetesClusterDao.findById(cmd.getId()),
kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId()), this, keys);
new KubernetesClusterUpgradeWorker(kubernetesClusterDao.findById(cmd.getId()),
kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId()), this, keys);
upgradeWorker = ComponentContext.inject(upgradeWorker);
return upgradeWorker.upgradeCluster();
}
@ -1501,8 +1515,8 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
// check cluster is running at desired capacity include control nodes as well
if (clusterVMs.size() < kubernetesCluster.getTotalNodeCount()) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Found only %d VMs in the Kubernetes cluster ID: %s while expected %d VMs to be in state: %s",
clusterVMs.size(), kubernetesCluster.getUuid(), kubernetesCluster.getTotalNodeCount(), state.toString()));
LOGGER.debug(String.format("Found only %d VMs in the Kubernetes cluster %s while expected %d VMs to be in state: %s",
clusterVMs.size(), kubernetesCluster.getName(), kubernetesCluster.getTotalNodeCount(), state.toString()));
}
return false;
}
@ -1578,16 +1592,13 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] {
KubernetesServiceEnabled,
KubernetesClusterHyperVTemplateName,
KubernetesClusterKVMTemplateName,
KubernetesClusterVMwareTemplateName,
KubernetesClusterXenserverTemplateName,
KubernetesClusterNetworkOffering,
KubernetesClusterStartTimeout,
KubernetesClusterScaleTimeout,
KubernetesClusterUpgradeTimeout,
KubernetesClusterExperimentalFeaturesEnabled
KubernetesServiceEnabled,
KubernetesClusterNetworkOffering,
KubernetesClusterStartTimeout,
KubernetesClusterScaleTimeout,
KubernetesClusterUpgradeTimeout,
KubernetesClusterExperimentalFeaturesEnabled,
KubernetesMaxClusterSize
};
}
}

View File

@ -41,26 +41,6 @@ public interface KubernetesClusterService extends PluggableService, Configurable
"false",
"Indicates whether Kubernetes Service plugin is enabled or not. Management server restart needed on change",
false);
static final ConfigKey<String> KubernetesClusterHyperVTemplateName = new ConfigKey<String>("Advanced", String.class,
"cloud.kubernetes.cluster.template.name.hyperv",
"Kubernetes-Service-Template-HyperV",
"Name of the template to be used for creating Kubernetes cluster nodes on HyperV",
true);
static final ConfigKey<String> KubernetesClusterKVMTemplateName = new ConfigKey<String>("Advanced", String.class,
"cloud.kubernetes.cluster.template.name.kvm",
"Kubernetes-Service-Template-KVM",
"Name of the template to be used for creating Kubernetes cluster nodes on KVM",
true);
static final ConfigKey<String> KubernetesClusterVMwareTemplateName = new ConfigKey<String>("Advanced", String.class,
"cloud.kubernetes.cluster.template.name.vmware",
"Kubernetes-Service-Template-VMware",
"Name of the template to be used for creating Kubernetes cluster nodes on VMware",
true);
static final ConfigKey<String> KubernetesClusterXenserverTemplateName = new ConfigKey<String>("Advanced", String.class,
"cloud.kubernetes.cluster.template.name.xenserver",
"Kubernetes-Service-Template-Xenserver",
"Name of the template to be used for creating Kubernetes cluster nodes on Xenserver",
true);
static final ConfigKey<String> KubernetesClusterNetworkOffering = new ConfigKey<String>("Advanced", String.class,
"cloud.kubernetes.cluster.network.offering",
"DefaultNetworkOfferingforKubernetesService",
@ -86,6 +66,12 @@ public interface KubernetesClusterService extends PluggableService, Configurable
"false",
"Indicates whether experimental feature for Kubernetes cluster such as Docker private registry are enabled or not",
true);
static final ConfigKey<Integer> KubernetesMaxClusterSize = new ConfigKey<Integer>("Advanced", Integer.class,
"cloud.kubernetes.cluster.max.size",
"10",
"Maximum size of the kubernetes cluster.",
true, ConfigKey.Scope.Account);
KubernetesCluster findById(final Long id);

View File

@ -93,6 +93,15 @@ public class KubernetesClusterVO implements KubernetesCluster {
@Column(name = "endpoint")
private String endpoint;
@Column(name = "autoscaling_enabled")
private boolean autoscalingEnabled;
@Column(name = "minsize")
private Long minSize;
@Column(name = "maxsize")
private Long maxSize;
@Column(name = GenericDao.CREATED_COLUMN)
private Date created;
@ -303,6 +312,33 @@ public class KubernetesClusterVO implements KubernetesCluster {
return created;
}
@Override
public boolean getAutoscalingEnabled() {
return autoscalingEnabled;
}
public void setAutoscalingEnabled(boolean enabled) {
this.autoscalingEnabled = enabled;
}
@Override
public Long getMinSize() {
return minSize;
}
public void setMinSize(Long minSize) {
this.minSize = minSize;
}
@Override
public Long getMaxSize() {
return maxSize;
}
public void setMaxSize(Long maxSize) {
this.maxSize = maxSize;
}
public KubernetesClusterVO() {
this.uuid = UUID.randomUUID().toString();
}
@ -333,6 +369,16 @@ public class KubernetesClusterVO implements KubernetesCluster {
this.checkForGc = false;
}
public KubernetesClusterVO(String name, String description, long zoneId, long kubernetesVersionId, long serviceOfferingId, long templateId,
long networkId, long domainId, long accountId, long controlNodeCount, long nodeCount, State state, String keyPair, long cores,
long memory, Long nodeRootDiskSize, String endpoint, boolean autoscalingEnabled, Long minSize, Long maxSize) {
this(name, description, zoneId, kubernetesVersionId, serviceOfferingId, templateId, networkId, domainId, accountId, controlNodeCount,
nodeCount, state, keyPair, cores, memory, nodeRootDiskSize, endpoint);
this.autoscalingEnabled = autoscalingEnabled;
this.minSize = minSize;
this.maxSize = maxSize;
}
@Override
public Class<?> getEntityType() {
return KubernetesCluster.class;

View File

@ -27,4 +27,5 @@ public interface KubernetesClusterVmMap {
long getId();
long getClusterId();
long getVmId();
boolean isControlNode();
}

View File

@ -28,32 +28,6 @@ import javax.persistence.GenerationType;
@Table(name = "kubernetes_cluster_vm_map")
public class KubernetesClusterVmMapVO implements KubernetesClusterVmMap {
@Override
public long getId() {
return id;
}
@Override
public long getClusterId() {
return clusterId;
}
public void setClusterId(long clusterId) {
this.clusterId = clusterId;
}
@Override
public long getVmId() {
return vmId;
}
public void setVmId(long vmId) {
this.vmId = vmId;
}
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id")
@ -65,12 +39,48 @@ public class KubernetesClusterVmMapVO implements KubernetesClusterVmMap {
@Column(name = "vm_id")
long vmId;
public KubernetesClusterVmMapVO() {
@Column(name = "control_node")
boolean controlNode;
public KubernetesClusterVmMapVO() {
}
public KubernetesClusterVmMapVO(long clusterId, long vmId) {
public KubernetesClusterVmMapVO(long clusterId, long vmId, boolean controlNode) {
this.vmId = vmId;
this.clusterId = clusterId;
this.controlNode = controlNode;
}
@Override
public long getId() {
return id;
}
@Override
public long getClusterId() {
return clusterId;
}
public void setClusterId(long clusterId) {
this.clusterId = clusterId;
}
@Override
public long getVmId() {
return vmId;
}
public void setVmId(long vmId) {
this.vmId = vmId;
}
@Override
public boolean isControlNode() {
return controlNode;
}
public void setControlNode(boolean controlNode) {
this.controlNode = controlNode;
}
}

View File

@ -38,8 +38,10 @@ import org.apache.commons.io.IOUtils;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.VlanDao;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO;
import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
@ -59,6 +61,7 @@ import com.cloud.network.dao.NetworkDao;
import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.storage.Storage;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.dao.LaunchPermissionDao;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.template.TemplateApiService;
import com.cloud.template.VirtualMachineTemplate;
@ -76,6 +79,7 @@ import com.cloud.utils.fsm.NoTransitionException;
import com.cloud.utils.fsm.StateMachine2;
import com.cloud.utils.ssh.SshHelper;
import com.cloud.vm.UserVmService;
import com.cloud.vm.VirtualMachineManager;
import com.cloud.vm.dao.UserVmDao;
import com.google.common.base.Strings;
@ -119,6 +123,10 @@ public class KubernetesClusterActionWorker {
protected UserVmService userVmService;
@Inject
protected VlanDao vlanDao;
@Inject
protected VirtualMachineManager itMgr;
@Inject
protected LaunchPermissionDao launchPermissionDao;
protected KubernetesClusterDao kubernetesClusterDao;
protected KubernetesClusterVmMapDao kubernetesClusterVmMapDao;
@ -135,9 +143,11 @@ public class KubernetesClusterActionWorker {
protected final String deploySecretsScriptFilename = "deploy-cloudstack-secret";
protected final String deployProviderScriptFilename = "deploy-provider";
protected final String autoscaleScriptFilename = "autoscale-kube-cluster";
protected final String scriptPath = "/opt/bin/";
protected File deploySecretsScriptFile;
protected File deployProviderScriptFile;
protected File autoscaleScriptFile;
protected KubernetesClusterManagerImpl manager;
protected String[] keys;
@ -152,7 +162,12 @@ public class KubernetesClusterActionWorker {
protected void init() {
this.owner = accountDao.findById(kubernetesCluster.getAccountId());
this.clusterTemplate = templateDao.findById(kubernetesCluster.getTemplateId());
long zoneId = this.kubernetesCluster.getZoneId();
long templateId = this.kubernetesCluster.getTemplateId();
DataCenterVO dataCenterVO = dataCenterDao.findById(zoneId);
VMTemplateVO template = templateDao.findById(templateId);
Hypervisor.HypervisorType type = template.getHypervisorType();
this.clusterTemplate = manager.getKubernetesServiceTemplate(dataCenterVO, type);
this.sshKeyFile = getManagementServerSshPublicKeyFile();
}
@ -193,7 +208,7 @@ public class KubernetesClusterActionWorker {
}
protected void logTransitStateDetachIsoAndThrow(final Level logLevel, final String message, final KubernetesCluster kubernetesCluster,
final List<UserVm> clusterVMs, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException {
final List<UserVm> clusterVMs, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException {
logMessage(logLevel, message, e);
stateTransitTo(kubernetesCluster.getId(), event);
detachIsoKubernetesVMs(clusterVMs);
@ -203,11 +218,19 @@ public class KubernetesClusterActionWorker {
throw new CloudRuntimeException(message, e);
}
protected void deleteTemplateLaunchPermission() {
if (clusterTemplate != null && owner != null) {
LOGGER.info("Revoking launch permission for systemVM template");
launchPermissionDao.removePermissions(clusterTemplate.getId(), Collections.singletonList(owner.getId()));
}
}
protected void logTransitStateAndThrow(final Level logLevel, final String message, final Long kubernetesClusterId, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException {
logMessage(logLevel, message, e);
if (kubernetesClusterId != null && event != null) {
stateTransitTo(kubernetesClusterId, event);
}
deleteTemplateLaunchPermission();
if (e == null) {
throw new CloudRuntimeException(message);
}
@ -235,11 +258,11 @@ public class KubernetesClusterActionWorker {
return new File(keyFile);
}
protected KubernetesClusterVmMapVO addKubernetesClusterVm(final long kubernetesClusterId, final long vmId) {
protected KubernetesClusterVmMapVO addKubernetesClusterVm(final long kubernetesClusterId, final long vmId, boolean isControlNode) {
return Transaction.execute(new TransactionCallback<KubernetesClusterVmMapVO>() {
@Override
public KubernetesClusterVmMapVO doInTransaction(TransactionStatus status) {
KubernetesClusterVmMapVO newClusterVmMap = new KubernetesClusterVmMapVO(kubernetesClusterId, vmId);
KubernetesClusterVmMapVO newClusterVmMap = new KubernetesClusterVmMapVO(kubernetesClusterId, vmId, isControlNode);
kubernetesClusterVmMapDao.persist(newClusterVmMap);
return newClusterVmMap;
}
@ -332,6 +355,7 @@ public class KubernetesClusterActionWorker {
if (!iso.getState().equals(VirtualMachineTemplate.State.Active)) {
logTransitStateAndThrow(Level.ERROR, String.format("Unable to attach ISO to Kubernetes cluster : %s. Binaries ISO not active.", kubernetesCluster.getName()), kubernetesCluster.getId(), failedEvent);
}
for (UserVm vm : clusterVMs) {
try {
templateService.attachIso(iso.getId(), vm.getId(), true);
@ -368,12 +392,13 @@ public class KubernetesClusterActionWorker {
protected List<KubernetesClusterVmMapVO> getKubernetesClusterVMMaps() {
List<KubernetesClusterVmMapVO> clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId());
if (!CollectionUtils.isEmpty(clusterVMs)) {
clusterVMs.sort((t1, t2) -> (int)((t1.getId() - t2.getId())/Math.abs(t1.getId() - t2.getId())));
}
return clusterVMs;
}
protected List<KubernetesClusterVmMapVO> getKubernetesClusterVMMapsForNodes(List<Long> nodeIds) {
return kubernetesClusterVmMapDao.listByClusterIdAndVmIdsIn(kubernetesCluster.getId(), nodeIds);
}
protected List<UserVm> getKubernetesClusterVMs() {
List<UserVm> vmList = new ArrayList<>();
List<KubernetesClusterVmMapVO> clusterVMs = getKubernetesClusterVMMaps();
@ -433,18 +458,20 @@ public class KubernetesClusterActionWorker {
protected void retrieveScriptFiles() {
deploySecretsScriptFile = retrieveScriptFile(deploySecretsScriptFilename);
deployProviderScriptFile = retrieveScriptFile(deployProviderScriptFilename);
autoscaleScriptFile = retrieveScriptFile(autoscaleScriptFilename);
}
protected void copyScripts(String nodeAddress, final int sshPort) {
copyScriptFile(nodeAddress, sshPort, deploySecretsScriptFile, deploySecretsScriptFilename);
copyScriptFile(nodeAddress, sshPort, deployProviderScriptFile, deployProviderScriptFilename);
copyScriptFile(nodeAddress, sshPort, autoscaleScriptFile, autoscaleScriptFilename);
}
protected void copyScriptFile(String nodeAddress, final int sshPort, File file, String desitnation) {
try {
SshHelper.scpTo(nodeAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null,
"~/", deploySecretsScriptFile.getAbsolutePath(), "0755");
SshHelper.scpTo(nodeAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null,
"~/", deployProviderScriptFile.getAbsolutePath(), "0755");
String cmdStr = String.format("sudo mv ~/%s %s/%s", deploySecretsScriptFile.getName(), scriptPath, deploySecretsScriptFilename);
SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null,
cmdStr, 10000, 10000, 10 * 60 * 1000);
cmdStr = String.format("sudo mv ~/%s %s/%s", deployProviderScriptFile.getName(), scriptPath, deployProviderScriptFilename);
"~/", file.getAbsolutePath(), "0755");
String cmdStr = String.format("sudo mv ~/%s %s/%s", file.getName(), scriptPath, desitnation);
SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null,
cmdStr, 10000, 10000, 10 * 60 * 1000);
} catch (Exception e) {
@ -452,6 +479,33 @@ public class KubernetesClusterActionWorker {
}
}
protected boolean taintControlNodes() {
StringBuilder commands = new StringBuilder();
List<KubernetesClusterVmMapVO> vmMapVOList = getKubernetesClusterVMMaps();
for(KubernetesClusterVmMapVO vmMap :vmMapVOList) {
if(!vmMap.isControlNode()) {
continue;
}
String name = userVmDao.findById(vmMap.getVmId()).getDisplayName().toLowerCase();
String command = String.format("sudo /opt/bin/kubectl annotate node %s cluster-autoscaler.kubernetes.io/scale-down-disabled=true ; ", name);
commands.append(command);
}
try {
File pkFile = getManagementServerSshPublicKeyFile();
Pair<String, Integer> publicIpSshPort = getKubernetesClusterServerIpSshPort(null);
publicIpAddress = publicIpSshPort.first();
sshPort = publicIpSshPort.second();
Pair<Boolean, String> result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER,
pkFile, null, commands.toString(), 10000, 10000, 60000);
return result.first();
} catch (Exception e) {
String msg = String.format("Failed to taint control nodes on : %s : %s", kubernetesCluster.getName(), e.getMessage());
logMessage(Level.ERROR, msg, e);
return false;
}
}
protected boolean deployProvider() {
Network network = networkDao.findById(kubernetesCluster.getNetworkId());
// Since the provider creates IP addresses, don't deploy it unless the underlying network supports it

View File

@ -17,6 +17,7 @@
package com.cloud.kubernetes.cluster.actionworkers;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Field;
import java.util.ArrayList;
@ -47,7 +48,7 @@ import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.InsufficientServerCapacityException;
import com.cloud.exception.ManagementServerException;
import com.cloud.exception.NetworkRuleConflictException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.OperationTimedoutException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
@ -56,6 +57,7 @@ import com.cloud.hypervisor.Hypervisor;
import com.cloud.kubernetes.cluster.KubernetesCluster;
import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO;
import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
import com.cloud.kubernetes.cluster.KubernetesClusterVO;
import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil;
import com.cloud.network.IpAddress;
import com.cloud.network.Network;
@ -72,8 +74,9 @@ import com.cloud.network.rules.dao.PortForwardingRulesDao;
import com.cloud.offering.ServiceOffering;
import com.cloud.resource.ResourceManager;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeApiService;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.LaunchPermissionDao;
import com.cloud.storage.VolumeApiService;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.Account;
import com.cloud.user.SSHKeyPairVO;
@ -82,11 +85,13 @@ import com.cloud.utils.Pair;
import com.cloud.utils.StringUtils;
import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.db.TransactionCallback;
import com.cloud.utils.db.TransactionCallbackWithException;
import com.cloud.utils.db.TransactionStatus;
import com.cloud.utils.exception.ExecutionException;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.net.Ip;
import com.cloud.utils.net.NetUtils;
import com.cloud.utils.ssh.SshHelper;
import com.cloud.vm.Nic;
import com.cloud.vm.UserVmManager;
import com.cloud.vm.VirtualMachine;
@ -124,6 +129,8 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
@Inject
protected UserVmManager userVmManager;
@Inject
protected LaunchPermissionDao launchPermissionDao;
@Inject
protected VolumeApiService volumeService;
@Inject
protected VolumeDao volumeDao;
@ -150,7 +157,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
if (!Strings.isNullOrEmpty(sshKeyPair)) {
SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair);
if (sshkp != null) {
pubKey += "\n - \"" + sshkp.getPublicKey() + "\"";
pubKey += "\n - \"" + sshkp.getPublicKey() + "\"";
}
}
k8sNodeConfig = k8sNodeConfig.replace(sshPubKey, pubKey);
@ -181,7 +188,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
if (!Strings.isNullOrEmpty(dockerUserName) && !Strings.isNullOrEmpty(dockerPassword)) {
// do write file for /.docker/config.json through the code instead of k8s-node.yml as we can no make a section
// optional or conditionally applied
String dockerConfigString = "write-files:\n" +
String dockerConfigString = "write_files:\n" +
" - path: /.docker/config.json\n" +
" owner: core:core\n" +
" permissions: '0644'\n" +
@ -194,7 +201,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
" }\n" +
" }\n" +
" }";
k8sNodeConfig = k8sNodeConfig.replace("write-files:", dockerConfigString);
k8sNodeConfig = k8sNodeConfig.replace("write_files:", dockerConfigString);
final String dockerUrlKey = "{{docker.url}}";
final String dockerAuthKey = "{{docker.secret}}";
final String dockerEmailKey = "{{docker.email}}";
@ -307,12 +314,11 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
Field f = startVm.getClass().getDeclaredField("id");
f.setAccessible(true);
f.set(startVm, vm.getId());
userVmService.startVirtualMachine(startVm);
itMgr.advanceStart(vm.getUuid(), null, null);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Started VM : %s in the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName()));
}
} catch (IllegalAccessException | NoSuchFieldException | ExecutionException |
ResourceUnavailableException | ResourceAllocationException | InsufficientCapacityException ex) {
} catch (IllegalAccessException | NoSuchFieldException | OperationTimedoutException | ResourceUnavailableException | InsufficientCapacityException ex) {
throw new ManagementServerException(String.format("Failed to start VM in the Kubernetes cluster : %s", kubernetesCluster.getName()), ex);
}
@ -326,8 +332,8 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
ResourceUnavailableException, InsufficientCapacityException {
List<UserVm> nodes = new ArrayList<>();
for (int i = offset + 1; i <= nodeCount; i++) {
UserVm vm = createKubernetesNode(publicIpAddress, i);
addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId());
UserVm vm = createKubernetesNode(publicIpAddress);
addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId(), false);
if (kubernetesCluster.getNodeRootDiskSize() > 0) {
resizeNodeVolume(vm);
}
@ -349,7 +355,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
return provisionKubernetesClusterNodeVms(nodeCount, 0, publicIpAddress);
}
protected UserVm createKubernetesNode(String joinIp, int nodeInstance) throws ManagementServerException,
protected UserVm createKubernetesNode(String joinIp) throws ManagementServerException,
ResourceUnavailableException, InsufficientCapacityException {
UserVm nodeVm = null;
DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId());
@ -363,7 +369,8 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
if (rootDiskSize > 0) {
customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize));
}
String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-node-%s", kubernetesClusterNodeNamePrefix, nodeInstance));
String suffix = Long.toHexString(System.currentTimeMillis());
String hostName = String.format("%s-node-%s", kubernetesClusterNodeNamePrefix, suffix);
String k8sNodeConfig = null;
try {
k8sNodeConfig = getKubernetesNodeConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType()));
@ -374,7 +381,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
nodeVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner,
hostName, hostName, null, null, null,
Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(),
null, addrs, null, null, null, customParameterMap, null, null, null, null, true);
null, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Created node VM : %s, %s in the Kubernetes cluster : %s", hostName, nodeVm.getUuid(), kubernetesCluster.getName()));
}
@ -453,7 +460,6 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
final Ip vmIp = new Ip(vmNic.getIPv4Address());
final long vmIdFinal = vmId;
final int srcPortFinal = firewallRuleSourcePortStart + i;
PortForwardingRuleVO pfRule = Transaction.execute(new TransactionCallbackWithException<PortForwardingRuleVO, NetworkRuleConflictException>() {
@Override
public PortForwardingRuleVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException {
@ -519,6 +525,17 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
}
}
protected void removePortForwardingRules(final IpAddress publicIp, final Network network, final Account account, int startPort, int endPort)
throws ResourceUnavailableException {
List<PortForwardingRuleVO> pfRules = portForwardingRulesDao.listByNetwork(network.getId());
for (PortForwardingRuleVO pfRule : pfRules) {
if (startPort <= pfRule.getSourcePortStart() && pfRule.getSourcePortStart() <= endPort) {
portForwardingRulesDao.remove(pfRule.getId());
}
}
rulesService.applyPortForwardingRules(publicIp.getId(), account);
}
protected void removeLoadBalancingRule(final IpAddress publicIp, final Network network,
final Account account, final int port) throws ResourceUnavailableException {
List<LoadBalancerVO> rules = loadBalancerDao.listByIpAddress(publicIp.getId());
@ -548,13 +565,96 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
return prefix;
}
protected String getKubernetesClusterNodeAvailableName(final String hostName) {
String name = hostName;
int suffix = 1;
while (vmInstanceDao.findVMByHostName(name) != null) {
name = String.format("%s-%d", hostName, suffix);
suffix++;
protected KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, final Long memory,
final Long size, final Long serviceOfferingId, final Boolean autoscaleEnabled, final Long minSize, final Long maxSize) {
return Transaction.execute(new TransactionCallback<KubernetesClusterVO>() {
@Override
public KubernetesClusterVO doInTransaction(TransactionStatus status) {
KubernetesClusterVO updatedCluster = kubernetesClusterDao.createForUpdate(kubernetesCluster.getId());
if (cores != null) {
updatedCluster.setCores(cores);
}
if (memory != null) {
updatedCluster.setMemory(memory);
}
if (size != null) {
updatedCluster.setNodeCount(size);
}
if (serviceOfferingId != null) {
updatedCluster.setServiceOfferingId(serviceOfferingId);
}
if (autoscaleEnabled != null) {
updatedCluster.setAutoscalingEnabled(autoscaleEnabled.booleanValue());
}
updatedCluster.setMinSize(minSize);
updatedCluster.setMaxSize(maxSize);
return kubernetesClusterDao.persist(updatedCluster);
}
});
}
private KubernetesClusterVO updateKubernetesClusterEntry(final Boolean autoscaleEnabled, final Long minSize, final Long maxSize) throws CloudRuntimeException {
KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(null, null, null, null, autoscaleEnabled, minSize, maxSize);
if (kubernetesClusterVO == null) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to update Kubernetes cluster",
kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
return kubernetesClusterVO;
}
protected boolean autoscaleCluster(boolean enable, Long minSize, Long maxSize) {
if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) {
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.AutoscaleRequested);
}
File pkFile = getManagementServerSshPublicKeyFile();
Pair<String, Integer> publicIpSshPort = getKubernetesClusterServerIpSshPort(null);
publicIpAddress = publicIpSshPort.first();
sshPort = publicIpSshPort.second();
try {
if (enable) {
String command = String.format("sudo /opt/bin/autoscale-kube-cluster -i %s -e -M %d -m %d",
kubernetesCluster.getUuid(), maxSize, minSize);
Pair<Boolean, String> result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER,
pkFile, null, command, 10000, 10000, 60000);
// Maybe the file isn't present. Try and copy it
if (!result.first()) {
logMessage(Level.INFO, "Autoscaling files missing. Adding them now", null);
retrieveScriptFiles();
copyScripts(publicIpAddress, sshPort);
if (!createCloudStackSecret(keys)) {
logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup keys for Kubernetes cluster %s",
kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
// If at first you don't succeed ...
result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER,
pkFile, null, command, 10000, 10000, 60000);
if (!result.first()) {
throw new CloudRuntimeException(result.second());
}
}
updateKubernetesClusterEntry(true, minSize, maxSize);
} else {
Pair<Boolean, String> result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER,
pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -d"),
10000, 10000, 60000);
if (!result.first()) {
throw new CloudRuntimeException(result.second());
}
updateKubernetesClusterEntry(false, null, null);
}
return true;
} catch (Exception e) {
String msg = String.format("Failed to autoscale Kubernetes cluster: %s : %s", kubernetesCluster.getName(), e.getMessage());
logAndThrow(Level.ERROR, msg);
return false;
} finally {
// Deploying the autoscaler might fail but it can be deployed manually too, so no need to go to an alert state
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
}
return name;
}
}

View File

@ -19,8 +19,10 @@ package com.cloud.kubernetes.cluster.actionworkers;
import java.io.File;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.stream.Collectors;
import javax.inject.Inject;
@ -46,10 +48,9 @@ import com.cloud.network.IpAddress;
import com.cloud.network.Network;
import com.cloud.network.rules.FirewallRule;
import com.cloud.offering.ServiceOffering;
import com.cloud.storage.LaunchPermissionVO;
import com.cloud.uservm.UserVm;
import com.cloud.utils.Pair;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.db.TransactionCallback;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.ssh.SshHelper;
import com.cloud.vm.UserVmVO;
@ -65,18 +66,35 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
private ServiceOffering serviceOffering;
private Long clusterSize;
private List<Long> nodeIds;
private KubernetesCluster.State originalState;
private Network network;
private Long minSize;
private Long maxSize;
private Boolean isAutoscalingEnabled;
private long scaleTimeoutTime;
public KubernetesClusterScaleWorker(final KubernetesCluster kubernetesCluster,
final ServiceOffering serviceOffering,
final Long clusterSize,
final List<Long> nodeIds,
final Boolean isAutoscalingEnabled,
final Long minSize,
final Long maxSize,
final KubernetesClusterManagerImpl clusterManager) {
super(kubernetesCluster, clusterManager);
this.serviceOffering = serviceOffering;
this.clusterSize = clusterSize;
this.nodeIds = nodeIds;
this.isAutoscalingEnabled = isAutoscalingEnabled;
this.minSize = minSize;
this.maxSize = maxSize;
this.originalState = kubernetesCluster.getState();
if (this.nodeIds != null) {
this.clusterSize = kubernetesCluster.getNodeCount() - this.nodeIds.size();
} else {
this.clusterSize = clusterSize;
}
}
protected void init() {
@ -100,13 +118,12 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
/**
* Scale network rules for an existing Kubernetes cluster while scaling it
* Open up firewall for SSH access from port NODES_DEFAULT_START_SSH_PORT to NODES_DEFAULT_START_SSH_PORT+n.
* Also remove port forwarding rules for removed virtual machines and create port-forwarding rule
* Also remove port forwarding rules for all virtual machines and re-create port-forwarding rule
* to forward public IP traffic to all node VMs' private IP.
* @param clusterVMIds
* @param removedVMIds
* @throws ManagementServerException
*/
private void scaleKubernetesClusterNetworkRules(final List<Long> clusterVMIds, final List<Long> removedVMIds) throws ManagementServerException {
private void scaleKubernetesClusterNetworkRules(final List<Long> clusterVMIds) throws ManagementServerException {
if (!Network.GuestType.Isolated.equals(network.getGuestType())) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Network : %s for Kubernetes cluster : %s is not an isolated network, therefore, no need for network rules", network.getName(), kubernetesCluster.getName()));
@ -124,48 +141,31 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
throw new ManagementServerException("Firewall rule for node SSH access can't be provisioned");
}
int existingFirewallRuleSourcePortEnd = firewallRule.getSourcePortEnd();
final int scaledTotalNodeCount = clusterSize == null ? (int)kubernetesCluster.getTotalNodeCount() : (int)(clusterSize + kubernetesCluster.getControlNodeCount());
int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1;
// Provision new SSH firewall rules
try {
provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1);
provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Provisioned firewall rule to open up port %d to %d on %s in Kubernetes cluster ID: %s",
CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1, publicIp.getAddress().addr(), kubernetesCluster.getUuid()));
LOGGER.debug(String.format("Provisioned firewall rule to open up port %d to %d on %s in Kubernetes cluster %s",
CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getName()));
}
} catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException e) {
throw new ManagementServerException(String.format("Failed to activate SSH firewall rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e);
}
try {
removePortForwardingRules(publicIp, network, owner, removedVMIds);
removePortForwardingRules(publicIp, network, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, existingFirewallRuleSourcePortEnd);
} catch (ResourceUnavailableException e) {
throw new ManagementServerException(String.format("Failed to remove SSH port forwarding rules for removed VMs for the Kubernetes cluster : %s", kubernetesCluster.getName()), e);
}
try {
provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, existingFirewallRuleSourcePortEnd + 1);
provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, CLUSTER_NODES_DEFAULT_START_SSH_PORT);
} catch (ResourceUnavailableException | NetworkRuleConflictException e) {
throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e);
}
}
private KubernetesClusterVO updateKubernetesClusterEntry(final long cores, final long memory,
final Long size, final Long serviceOfferingId) {
return Transaction.execute((TransactionCallback<KubernetesClusterVO>) status -> {
KubernetesClusterVO updatedCluster = kubernetesClusterDao.createForUpdate(kubernetesCluster.getId());
updatedCluster.setCores(cores);
updatedCluster.setMemory(memory);
if (size != null) {
updatedCluster.setNodeCount(size);
}
if (serviceOfferingId != null) {
updatedCluster.setServiceOfferingId(serviceOfferingId);
}
kubernetesClusterDao.persist(updatedCluster);
return updatedCluster;
});
}
private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, final ServiceOffering newServiceOffering) throws CloudRuntimeException {
final ServiceOffering serviceOffering = newServiceOffering == null ?
serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()) : newServiceOffering;
@ -173,10 +173,11 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
final long size = newSize == null ? kubernetesCluster.getTotalNodeCount() : (newSize + kubernetesCluster.getControlNodeCount());
final long cores = serviceOffering.getCpu() * size;
final long memory = serviceOffering.getRamSize() * size;
KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newSize, serviceOfferingId);
KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newSize, serviceOfferingId,
kubernetesCluster.getAutoscalingEnabled(), kubernetesCluster.getMinSize(), kubernetesCluster.getMaxSize());
if (kubernetesClusterVO == null) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to update Kubernetes cluster",
kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to update Kubernetes cluster",
kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
return kubernetesClusterVO;
}
@ -192,13 +193,13 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
retryCounter++;
try {
Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER,
pkFile, null, String.format("sudo kubectl drain %s --ignore-daemonsets --delete-local-data", hostName),
pkFile, null, String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName),
10000, 10000, 60000);
if (!result.first()) {
LOGGER.warn(String.format("Draining node: %s on VM : %s in Kubernetes cluster : %s unsuccessful", hostName, userVm.getDisplayName(), kubernetesCluster.getName()));
} else {
result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER,
pkFile, null, String.format("sudo kubectl delete node %s", hostName),
pkFile, null, String.format("sudo /opt/bin/kubectl delete node %s", hostName),
10000, 10000, 30000);
if (result.first()) {
return true;
@ -302,45 +303,51 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering);
}
private void removeNodesFromCluster(List<KubernetesClusterVmMapVO> vmMaps) throws CloudRuntimeException {
for (KubernetesClusterVmMapVO vmMapVO : vmMaps) {
UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId());
LOGGER.info(String.format("Removing vm : %s from cluster %s", userVM.getDisplayName(), kubernetesCluster.getName()));
if (!removeKubernetesClusterNode(publicIpAddress, sshPort, userVM, 3, 30000)) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, failed to remove Kubernetes node: %s running on VM : %s", kubernetesCluster.getName(), userVM.getHostName(), userVM.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
try {
UserVm vm = userVmService.destroyVm(userVM.getId(), true);
if (!userVmManager.expunge(userVM, CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to expunge VM '%s'."
, kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
} catch (ResourceUnavailableException e) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to remove VM ID: %s",
kubernetesCluster.getName() , userVM.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e);
}
kubernetesClusterVmMapDao.expunge(vmMapVO.getId());
if (System.currentTimeMillis() > scaleTimeoutTime) {
logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster %s failed, scaling action timed out", kubernetesCluster.getName()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
}
// Scale network rules to update firewall rule
try {
List<Long> clusterVMIds = getKubernetesClusterVMMaps().stream().map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList());
scaleKubernetesClusterNetworkRules(clusterVMIds);
} catch (ManagementServerException e) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to update network rules", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e);
}
}
private void scaleDownKubernetesClusterSize() throws CloudRuntimeException {
if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) {
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleDownRequested);
}
final List<KubernetesClusterVmMapVO> originalVmList = getKubernetesClusterVMMaps();
int i = originalVmList.size() - 1;
List<Long> removedVmIds = new ArrayList<>();
while (i >= kubernetesCluster.getControlNodeCount() + clusterSize) {
KubernetesClusterVmMapVO vmMapVO = originalVmList.get(i);
UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId());
if (!removeKubernetesClusterNode(publicIpAddress, sshPort, userVM, 3, 30000)) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, failed to remove Kubernetes node: %s running on VM : %s", kubernetesCluster.getName(), userVM.getHostName(), userVM.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
// For removing port-forwarding network rules
removedVmIds.add(userVM.getId());
try {
UserVm vm = userVmService.destroyVm(userVM.getId(), true);
if (!userVmManager.expunge(userVM, CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to expunge VM '%s'."
, kubernetesCluster.getUuid()
, vm.getInstanceName()),
kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
} catch (ResourceUnavailableException e) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to remove VM ID: %s"
, kubernetesCluster.getUuid() , userVM.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e);
}
kubernetesClusterVmMapDao.expunge(vmMapVO.getId());
if (System.currentTimeMillis() > scaleTimeoutTime) {
logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster : %s failed, scaling action timed out", kubernetesCluster.getName()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
i--;
}
// Scale network rules to update firewall rule
try {
scaleKubernetesClusterNetworkRules(null, removedVmIds);
} catch (ManagementServerException e) {
logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to update network rules", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e);
List<KubernetesClusterVmMapVO> vmList;
if (this.nodeIds != null) {
vmList = getKubernetesClusterVMMapsForNodes(this.nodeIds);
} else {
vmList = getKubernetesClusterVMMaps();
vmList = vmList.subList((int) (kubernetesCluster.getControlNodeCount() + clusterSize), vmList.size());
}
Collections.reverse(vmList);
removeNodesFromCluster(vmList);
}
private void scaleUpKubernetesClusterSize(final long newVmCount) throws CloudRuntimeException {
@ -348,26 +355,26 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleUpRequested);
}
List<UserVm> clusterVMs = new ArrayList<>();
List<Long> clusterVMIds = new ArrayList<>();
LaunchPermissionVO launchPermission = new LaunchPermissionVO(clusterTemplate.getId(), owner.getId());
launchPermissionDao.persist(launchPermission);
try {
clusterVMs = provisionKubernetesClusterNodeVms((int)(newVmCount + kubernetesCluster.getNodeCount()), (int)kubernetesCluster.getNodeCount(), publicIpAddress);
} catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) {
logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to provision node VM in the cluster", kubernetesCluster.getName()), e);
}
attachIsoKubernetesVMs(clusterVMs);
for (UserVm vm : clusterVMs) {
clusterVMIds.add(vm.getId());
}
try {
scaleKubernetesClusterNetworkRules(clusterVMIds, null);
List<Long> clusterVMIds = getKubernetesClusterVMMaps().stream().map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList());
scaleKubernetesClusterNetworkRules(clusterVMIds);
} catch (ManagementServerException e) {
logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to update network rules", kubernetesCluster.getName()), e);
}
attachIsoKubernetesVMs(clusterVMs);
KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId());
kubernetesClusterVO.setNodeCount(clusterSize);
boolean readyNodesCountValid = KubernetesClusterUtil.validateKubernetesClusterReadyNodesCount(kubernetesClusterVO, publicIpAddress, sshPort,
CLUSTER_NODE_VM_USER, sshKeyFile, scaleTimeoutTime, 15000);
detachIsoKubernetesVMs(clusterVMs);
deleteTemplateLaunchPermission();
if (!readyNodesCountValid) { // Scaling failed
logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling unsuccessful for Kubernetes cluster : %s as it does not have desired number of nodes in ready state", kubernetesCluster.getName()));
}
@ -409,6 +416,10 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif
if (existingServiceOffering == null) {
logAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster : %s failed, service offering for the Kubernetes cluster not found!", kubernetesCluster.getName()));
}
if (this.isAutoscalingEnabled != null) {
return autoscaleCluster(this.isAutoscalingEnabled, minSize, maxSize);
}
final boolean serviceOfferingScalingNeeded = serviceOffering != null && serviceOffering.getId() != existingServiceOffering.getId();
final boolean clusterSizeScalingNeeded = clusterSize != null && clusterSize != originalClusterSize;
final long newVMRequired = clusterSize == null ? 0 : clusterSize - originalClusterSize;

View File

@ -60,6 +60,7 @@ import com.cloud.network.Network;
import com.cloud.network.addr.PublicIp;
import com.cloud.network.rules.LoadBalancer;
import com.cloud.offering.ServiceOffering;
import com.cloud.storage.LaunchPermissionVO;
import com.cloud.user.Account;
import com.cloud.user.SSHKeyPairVO;
import com.cloud.uservm.UserVm;
@ -71,6 +72,7 @@ import com.cloud.utils.net.NetUtils;
import com.cloud.vm.Nic;
import com.cloud.vm.ReservationContext;
import com.cloud.vm.ReservationContextImpl;
import com.cloud.vm.UserVmManager;
import com.cloud.vm.VirtualMachine;
import com.google.common.base.Strings;
@ -157,7 +159,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
if (!Strings.isNullOrEmpty(sshKeyPair)) {
SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair);
if (sshkp != null) {
pubKey += "\n - \"" + sshkp.getPublicKey() + "\"";
pubKey += "\n - \"" + sshkp.getPublicKey() + "\"";
}
}
k8sControlNodeConfig = k8sControlNodeConfig.replace(sshPubKey, pubKey);
@ -195,11 +197,8 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
if (rootDiskSize > 0) {
customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize));
}
String hostName = kubernetesClusterNodeNamePrefix + "-control";
if (kubernetesCluster.getControlNodeCount() > 1) {
hostName += "-1";
}
hostName = getKubernetesClusterNodeAvailableName(hostName);
String suffix = Long.toHexString(System.currentTimeMillis());
String hostName = String.format("%s-control-%s", kubernetesClusterNodeNamePrefix, suffix);
boolean haSupported = isKubernetesVersionSupportsHA();
String k8sControlNodeConfig = null;
try {
@ -211,7 +210,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
controlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner,
hostName, hostName, null, null, null,
Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(),
requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null, true);
requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Created control VM ID: %s, %s in the Kubernetes cluster : %s", controlVm.getUuid(), hostName, kubernetesCluster.getName()));
}
@ -230,7 +229,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
if (!Strings.isNullOrEmpty(sshKeyPair)) {
SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair);
if (sshkp != null) {
pubKey += "\n - \"" + sshkp.getPublicKey() + "\"";
pubKey += "\n - \"" + sshkp.getPublicKey() + "\"";
}
}
k8sControlNodeConfig = k8sControlNodeConfig.replace(sshPubKey, pubKey);
@ -254,7 +253,8 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
if (rootDiskSize > 0) {
customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize));
}
String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-control-%d", kubernetesClusterNodeNamePrefix, additionalControlNodeInstance + 1));
String suffix = Long.toHexString(System.currentTimeMillis());
String hostName = String.format("%s-control-%s", kubernetesClusterNodeNamePrefix, suffix);
String k8sControlNodeConfig = null;
try {
k8sControlNodeConfig = getKubernetesAdditionalControlNodeConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType()));
@ -265,7 +265,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
additionalControlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner,
hostName, hostName, null, null, null,
Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(),
null, addrs, null, null, null, customParameterMap, null, null, null, null, true);
null, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Created control VM ID : %s, %s in the Kubernetes cluster : %s", additionalControlVm.getUuid(), hostName, kubernetesCluster.getName()));
}
@ -276,7 +276,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
ManagementServerException, InsufficientCapacityException, ResourceUnavailableException {
UserVm k8sControlVM = null;
k8sControlVM = createKubernetesControlNode(network, publicIpAddress);
addKubernetesClusterVm(kubernetesCluster.getId(), k8sControlVM.getId());
addKubernetesClusterVm(kubernetesCluster.getId(), k8sControlVM.getId(), true);
if (kubernetesCluster.getNodeRootDiskSize() > 0) {
resizeNodeVolume(k8sControlVM);
}
@ -298,7 +298,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
for (int i = 1; i < kubernetesCluster.getControlNodeCount(); i++) {
UserVm vm = null;
vm = createKubernetesAdditionalControlNode(publicIpAddress, i);
addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId());
addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId(), true);
if (kubernetesCluster.getNodeRootDiskSize() > 0) {
resizeNodeVolume(vm);
}
@ -385,25 +385,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
network.getName(), kubernetesCluster.getName()));
}
try {
provisionFirewallRules(publicIp, owner, CLUSTER_API_PORT, CLUSTER_API_PORT);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Provisioned firewall rule to open up port %d on %s for Kubernetes cluster ID: %s",
CLUSTER_API_PORT, publicIp.getAddress().addr(), kubernetesCluster.getUuid()));
}
} catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) {
throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e);
}
try {
int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMs.size() - 1;
provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Provisioned firewall rule to open up port %d to %d on %s for Kubernetes cluster : %s", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getName()));
}
} catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) {
throw new ManagementServerException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e);
}
createFirewallRules(publicIp, clusterVMIds);
// Load balancer rule fo API access for control node VMs
try {
@ -420,6 +402,30 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
}
}
private void createFirewallRules(IpAddress publicIp, List<Long> clusterVMIds) throws ManagementServerException {
// Firewall rule fo API access for control node VMs
try {
provisionFirewallRules(publicIp, owner, CLUSTER_API_PORT, CLUSTER_API_PORT);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Provisioned firewall rule to open up port %d on %s for Kubernetes cluster %s",
CLUSTER_API_PORT, publicIp.getAddress().addr(), kubernetesCluster.getName()));
}
} catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) {
throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e);
}
// Firewall rule fo SSH access on each node VM
try {
int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1;
provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort);
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Provisioned firewall rule to open up port %d to %d on %s for Kubernetes cluster : %s", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getName()));
}
} catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) {
throw new ManagementServerException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e);
}
}
private void startKubernetesClusterVMs() {
List <UserVm> clusterVms = getKubernetesClusterVMs();
for (final UserVm vm : clusterVms) {
@ -427,6 +433,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
logTransitStateAndThrow(Level.ERROR, String.format("Failed to start all VMs in Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed);
}
try {
resizeNodeVolume(vm);
startKubernetesVM(vm);
} catch (ManagementServerException ex) {
LOGGER.warn(String.format("Failed to start VM : %s in Kubernetes cluster : %s due to ", vm.getDisplayName(), kubernetesCluster.getName()) + ex);
@ -506,6 +513,10 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
(Network.GuestType.Isolated.equals(network.getGuestType()) || kubernetesCluster.getControlNodeCount() > 1)) { // Shared network, single-control node cluster won't have an IP yet
logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster : %s as no public IP found for the cluster" , kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed);
}
// Allow account creating the kubernetes cluster to access systemVM template
LaunchPermissionVO launchPermission = new LaunchPermissionVO(clusterTemplate.getId(), owner.getId());
launchPermissionDao.persist(launchPermission);
List<UserVm> clusterVMs = new ArrayList<>();
UserVm k8sControlVM = null;
try {
@ -571,6 +582,7 @@ public class KubernetesClusterStartWorker extends KubernetesClusterResourceModif
if (!isKubernetesClusterDashboardServiceRunning(true, startTimeoutTime)) {
logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster : %s in usable state as unable to get Dashboard service running for the cluster", kubernetesCluster.getName()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed);
}
taintControlNodes();
deployProvider();
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded);
return true;

View File

@ -17,10 +17,7 @@
package com.cloud.kubernetes.cluster.actionworkers;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@ -45,6 +42,7 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke
private List<UserVm> clusterVMs = new ArrayList<>();
private KubernetesSupportedVersion upgradeVersion;
private final String upgradeScriptFilename = "upgrade-kubernetes.sh";
private File upgradeScriptFile;
private long upgradeTimeoutTime;
@ -57,16 +55,9 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke
this.keys = keys;
}
private void retrieveUpgradeScriptFile() {
try {
String upgradeScriptData = readResourceFile("/script/upgrade-kubernetes.sh");
upgradeScriptFile = File.createTempFile("upgrade-kuberntes", ".sh");
BufferedWriter upgradeScriptFileWriter = new BufferedWriter(new FileWriter(upgradeScriptFile));
upgradeScriptFileWriter.write(upgradeScriptData);
upgradeScriptFileWriter.close();
} catch (IOException e) {
logAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to prepare upgrade script", kubernetesCluster.getName()), e);
}
protected void retrieveScriptFiles() {
super.retrieveScriptFiles();
upgradeScriptFile = retrieveScriptFile(upgradeScriptFilename);
}
private Pair<Boolean, String> runInstallScriptOnVM(final UserVm vm, final int index) throws Exception {
@ -95,12 +86,12 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke
}
result = null;
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Upgrading node on VM ID: %s in Kubernetes cluster ID: %s with Kubernetes version(%s) ID: %s",
vm.getUuid(), kubernetesCluster.getUuid(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid()));
LOGGER.info(String.format("Upgrading node on VM %s in Kubernetes cluster %s with Kubernetes version(%s) ID: %s",
vm.getDisplayName(), kubernetesCluster.getName(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid()));
}
try {
result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null,
String.format("sudo kubectl drain %s --ignore-daemonsets --delete-local-data", hostName),
String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName),
10000, 10000, 60000);
} catch (Exception e) {
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to drain Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e);
@ -112,7 +103,6 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke
logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, upgrade action timed out", kubernetesCluster.getName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null);
}
try {
int port = (sshPort == CLUSTER_NODES_DEFAULT_START_SSH_PORT) ? sshPort + i : sshPort;
deployProvider();
result = runInstallScriptOnVM(vm, i);
} catch (Exception e) {
@ -133,8 +123,8 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke
}
}
if (LOGGER.isInfoEnabled()) {
LOGGER.info(String.format("Successfully upgraded node on VM ID: %s in Kubernetes cluster ID: %s with Kubernetes version(%s) ID: %s",
vm.getUuid(), kubernetesCluster.getUuid(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid()));
LOGGER.info(String.format("Successfully upgraded node on VM %s in Kubernetes cluster %s with Kubernetes version(%s) ID: %s",
vm.getDisplayName(), kubernetesCluster.getName(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid()));
}
}
}
@ -155,7 +145,7 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke
if (CollectionUtils.isEmpty(clusterVMs)) {
logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster : %s, unable to retrieve VMs for cluster", kubernetesCluster.getName()));
}
retrieveUpgradeScriptFile();
retrieveScriptFiles();
stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.UpgradeRequested);
attachIsoKubernetesVMs(clusterVMs, upgradeVersion);
upgradeKubernetesClusterNodes();

View File

@ -23,4 +23,5 @@ import java.util.List;
public interface KubernetesClusterVmMapDao extends GenericDao<KubernetesClusterVmMapVO, Long> {
public List<KubernetesClusterVmMapVO> listByClusterId(long clusterId);
public List<KubernetesClusterVmMapVO> listByClusterIdAndVmIdsIn(long clusterId, List<Long> vmIds);
}

View File

@ -21,6 +21,7 @@ import java.util.List;
import org.springframework.stereotype.Component;
import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
@ -34,6 +35,7 @@ public class KubernetesClusterVmMapDaoImpl extends GenericDaoBase<KubernetesClus
public KubernetesClusterVmMapDaoImpl() {
clusterIdSearch = createSearchBuilder();
clusterIdSearch.and("clusterId", clusterIdSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
clusterIdSearch.and("vmIdsIN", clusterIdSearch.entity().getVmId(), SearchCriteria.Op.IN);
clusterIdSearch.done();
}
@ -41,6 +43,15 @@ public class KubernetesClusterVmMapDaoImpl extends GenericDaoBase<KubernetesClus
public List<KubernetesClusterVmMapVO> listByClusterId(long clusterId) {
SearchCriteria<KubernetesClusterVmMapVO> sc = clusterIdSearch.create();
sc.setParameters("clusterId", clusterId);
return listBy(sc, null);
Filter filter = new Filter(KubernetesClusterVmMapVO.class, "id", Boolean.TRUE, null, null);
return listBy(sc, filter);
}
@Override
public List<KubernetesClusterVmMapVO> listByClusterIdAndVmIdsIn(long clusterId, List<Long> vmIds) {
SearchCriteria<KubernetesClusterVmMapVO> sc = clusterIdSearch.create();
sc.setParameters("clusterId", clusterId);
sc.setParameters("vmIdsIN", vmIds.toArray());
return listBy(sc);
}
}

View File

@ -49,7 +49,7 @@ public class KubernetesClusterUtil {
String user, File sshKeyFile, String nodeName) throws Exception {
Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port,
user, sshKeyFile, null,
String.format("sudo kubectl get nodes | awk '{if ($1 == \"%s\" && $2 == \"Ready\") print $1}'", nodeName.toLowerCase()),
String.format("sudo /opt/bin/kubectl get nodes | awk '{if ($1 == \"%s\" && $2 == \"Ready\") print $1}'", nodeName.toLowerCase()),
10000, 10000, 20000);
if (result.first() && nodeName.equals(result.second().trim())) {
return true;
@ -110,7 +110,7 @@ public class KubernetesClusterUtil {
Pair<Boolean, String> result = null;
try {
result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null,
String.format("sudo kubectl uncordon %s", hostName),
String.format("sudo /opt/bin/kubectl uncordon %s", hostName),
10000, 10000, 30000);
if (result.first()) {
return true;
@ -133,9 +133,9 @@ public class KubernetesClusterUtil {
final int port, final String user, final File sshKeyFile,
final String namespace, String serviceName) {
try {
String cmd = "sudo kubectl get pods --all-namespaces";
String cmd = "sudo /opt/bin/kubectl get pods --all-namespaces";
if (!Strings.isNullOrEmpty(namespace)) {
cmd = String.format("sudo kubectl get pods --namespace=%s", namespace);
cmd = String.format("sudo /opt/bin/kubectl get pods --namespace=%s", namespace);
}
Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port, user,
sshKeyFile, null, cmd,
@ -211,7 +211,7 @@ public class KubernetesClusterUtil {
final int port, final String user, final File sshKeyFile) throws Exception {
Pair<Boolean, String> result = SshHelper.sshExecute(ipAddress, port,
user, sshKeyFile, null,
"sudo kubectl get nodes | awk '{if ($2 == \"Ready\") print $1}' | wc -l",
"sudo /opt/bin/kubectl get nodes | awk '{if ($2 == \"Ready\") print $1}' | wc -l",
10000, 10000, 20000);
if (result.first()) {
return Integer.parseInt(result.second().trim().replace("\"", ""));

View File

@ -78,6 +78,8 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne
@Inject
private TemplateApiService templateService;
public static final String MINIMUN_AUTOSCALER_SUPPORTED_VERSION = "1.15.0";
private KubernetesSupportedVersionResponse createKubernetesSupportedVersionResponse(final KubernetesSupportedVersion kubernetesSupportedVersion) {
KubernetesSupportedVersionResponse response = new KubernetesSupportedVersionResponse();
response.setObjectName("kubernetessupportedversion");
@ -94,12 +96,9 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne
response.setZoneId(zone.getUuid());
response.setZoneName(zone.getName());
}
if (compareSemanticVersions(kubernetesSupportedVersion.getSemanticVersion(),
KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT)>=0) {
response.setSupportsHA(true);
} else {
response.setSupportsHA(false);
}
response.setSupportsHA(compareSemanticVersions(kubernetesSupportedVersion.getSemanticVersion(),
KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT)>=0);
response.setSupportsAutoscaling(versionSupportsAutoscaling(kubernetesSupportedVersion));
TemplateJoinVO template = templateJoinDao.findById(kubernetesSupportedVersion.getIsoId());
if (template != null) {
response.setIsoId(template.getUuid());
@ -202,6 +201,10 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne
return 0;
}
public static boolean versionSupportsAutoscaling(KubernetesSupportedVersion clusterVersion) {
return clusterVersion.getSemanticVersion().compareTo(MINIMUN_AUTOSCALER_SUPPORTED_VERSION) >= 0;
}
/**
* Returns a boolean value whether Kubernetes cluster upgrade can be carried from a given currentVersion to upgradeVersion
* Kubernetes clusters can only be upgraded from one MINOR version to the next MINOR version, or between PATCH versions of the same MINOR.
@ -214,9 +217,7 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne
*/
public static boolean canUpgradeKubernetesVersion(final String currentVersion, final String upgradeVersion) throws IllegalArgumentException {
int versionDiff = compareSemanticVersions(upgradeVersion, currentVersion);
if (versionDiff == 0) {
throw new IllegalArgumentException(String.format("Kubernetes clusters can not be upgraded, current version: %s, upgrade version: %s", currentVersion, upgradeVersion));
} else if (versionDiff < 0) {
if (versionDiff < 0) {
throw new IllegalArgumentException(String.format("Kubernetes clusters can not be downgraded, current version: %s, upgrade version: %s", currentVersion, upgradeVersion));
}
String[] thisParts = currentVersion.split("\\.");

View File

@ -16,6 +16,8 @@
// under the License.
package org.apache.cloudstack.api.command.user.kubernetes.cluster;
import java.security.InvalidParameterException;
import javax.inject.Inject;
import org.apache.cloudstack.acl.RoleType;
@ -55,6 +57,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd {
public static final Logger LOGGER = Logger.getLogger(CreateKubernetesClusterCmd.class.getName());
public static final String APINAME = "createKubernetesCluster";
private static final Long DEFAULT_NODE_ROOT_DISK_SIZE = 8L;
@Inject
public KubernetesClusterService kubernetesClusterService;
@ -142,7 +145,7 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd {
private String dockerRegistryEmail;
@Parameter(name = ApiConstants.NODE_ROOT_DISK_SIZE, type = CommandType.LONG,
description = "root disk size of root disk for each node")
description = "root disk size in GB for each node")
private Long nodeRootDiskSize;
/////////////////////////////////////////////////////
@ -228,7 +231,14 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd {
}
public Long getNodeRootDiskSize() {
return nodeRootDiskSize;
if (nodeRootDiskSize != null) {
if (nodeRootDiskSize < DEFAULT_NODE_ROOT_DISK_SIZE) {
throw new InvalidParameterException("Provided node root disk size is lesser than default size of " + DEFAULT_NODE_ROOT_DISK_SIZE +"GB");
}
return nodeRootDiskSize;
} else {
return DEFAULT_NODE_ROOT_DISK_SIZE;
}
}
/////////////////////////////////////////////////////

View File

@ -16,6 +16,8 @@
// under the License.
package org.apache.cloudstack.api.command.user.kubernetes.cluster;
import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.acl.RoleType;
@ -30,6 +32,7 @@ import org.apache.cloudstack.api.ResponseObject;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.KubernetesClusterResponse;
import org.apache.cloudstack.api.response.ServiceOfferingResponse;
import org.apache.cloudstack.api.response.UserVmResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.log4j.Logger;
@ -58,19 +61,38 @@ public class ScaleKubernetesClusterCmd extends BaseAsyncCmd {
//////////////// API parameters /////////////////////
/////////////////////////////////////////////////////
@Parameter(name = ApiConstants.ID, type = CommandType.UUID, required = true,
entityType = KubernetesClusterResponse.class,
description = "the ID of the Kubernetes cluster")
entityType = KubernetesClusterResponse.class,
description = "the ID of the Kubernetes cluster")
private Long id;
@ACL(accessType = SecurityChecker.AccessType.UseEntry)
@Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class,
description = "the ID of the service offering for the virtual machines in the cluster.")
description = "the ID of the service offering for the virtual machines in the cluster.")
private Long serviceOfferingId;
@Parameter(name=ApiConstants.SIZE, type = CommandType.LONG,
description = "number of Kubernetes cluster nodes")
description = "number of Kubernetes cluster nodes")
private Long clusterSize;
@Parameter(name = ApiConstants.NODE_IDS,
type = CommandType.LIST,
collectionType = CommandType.UUID,
entityType = UserVmResponse.class,
description = "the IDs of the nodes to be removed")
private List<Long> nodeIds;
@Parameter(name=ApiConstants.AUTOSCALING_ENABLED, type = CommandType.BOOLEAN,
description = "Whether autoscaling is enabled for the cluster")
private Boolean isAutoscalingEnabled;
@Parameter(name=ApiConstants.MIN_SIZE, type = CommandType.LONG,
description = "Minimum number of worker nodes in the cluster")
private Long minSize;
@Parameter(name=ApiConstants.MAX_SIZE, type = CommandType.LONG,
description = "Maximum number of worker nodes in the cluster")
private Long maxSize;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -87,6 +109,22 @@ public class ScaleKubernetesClusterCmd extends BaseAsyncCmd {
return clusterSize;
}
public List<Long> getNodeIds() {
return nodeIds;
}
public Boolean isAutoscalingEnabled() {
return isAutoscalingEnabled;
}
public Long getMinSize() {
return minSize;
}
public Long getMaxSize() {
return maxSize;
}
@Override
public String getEventType() {
return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_SCALE;

View File

@ -146,6 +146,18 @@ public class KubernetesClusterResponse extends BaseResponseWithAnnotations imple
@Param(description = "Public IP Address ID of the cluster")
private String ipAddressId;
@SerializedName(ApiConstants.AUTOSCALING_ENABLED)
@Param(description = "Whether autoscaling is enabled for the cluster")
private boolean isAutoscalingEnabled;
@SerializedName(ApiConstants.MIN_SIZE)
@Param(description = "Minimum size of the cluster")
private Long minSize;
@SerializedName(ApiConstants.MAX_SIZE)
@Param(description = "Maximum size of the cluster")
private Long maxSize;
public KubernetesClusterResponse() {
}
@ -353,4 +365,16 @@ public class KubernetesClusterResponse extends BaseResponseWithAnnotations imple
public void setIpAddressId(String ipAddressId) {
this.ipAddressId = ipAddressId;
}
public void setAutoscalingEnabled(boolean isAutoscalingEnabled) {
this.isAutoscalingEnabled = isAutoscalingEnabled;
}
public void setMinSize(Long minSize) {
this.minSize = minSize;
}
public void setMaxSize(Long maxSize) {
this.maxSize = maxSize;
}
}

View File

@ -64,6 +64,10 @@ public class KubernetesSupportedVersionResponse extends BaseResponse {
@Param(description = "whether Kubernetes supported version supports HA, multi-control nodes")
private Boolean supportsHA;
@SerializedName(ApiConstants.SUPPORTS_AUTOSCALING)
@Param(description = "whether Kubernetes supported version supports Autoscaling")
private Boolean supportsAutoscaling;
@SerializedName(ApiConstants.STATE)
@Param(description = "the enabled or disabled state of the Kubernetes supported version")
private String state;
@ -171,4 +175,12 @@ public class KubernetesSupportedVersionResponse extends BaseResponse {
public void setMinimumRamSize(Integer minimumRamSize) {
this.minimumRamSize = minimumRamSize;
}
public Boolean getSupportsAutoscaling() {
return supportsAutoscaling;
}
public void setSupportsAutoscaling(Boolean supportsAutoscaling) {
this.supportsAutoscaling = supportsAutoscaling;
}
}

View File

@ -17,12 +17,16 @@
# under the License.
---
ssh_authorized_keys:
{{ k8s.ssh.pub.key }}
users:
- name: core
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
ssh_authorized_keys:
{{ k8s.ssh.pub.key }}
write-files:
write_files:
- path: /opt/bin/setup-kube-system
permissions: 0700
permissions: '0700'
owner: root:root
content: |
#!/bin/bash -e
@ -96,7 +100,7 @@ write-files:
mkdir -p /opt/bin
cd /opt/bin
cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin
cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} .
chmod +x {kubeadm,kubelet,kubectl}
sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service
@ -125,6 +129,10 @@ write-files:
done <<< "$output"
setup_complete=true
fi
if [ -e "${BINARIES_DIR}/autoscaler.yaml" ]; then
mkdir -p /opt/autoscaler
cp "${BINARIES_DIR}/autoscaler.yaml" /opt/autoscaler/autoscaler_tmpl.yaml
fi
if [ -e "${BINARIES_DIR}/provider.yaml" ]; then
mkdir -p /opt/provider
cp "${BINARIES_DIR}/provider.yaml" /opt/provider/provider.yaml
@ -179,14 +187,14 @@ write-files:
fi
- path: /opt/bin/deploy-kube-system
permissions: 0700
permissions: '0700'
owner: root:root
content: |
#!/bin/bash -e
if [[ -f "/home/core/success" ]]; then
echo "Already provisioned!"
exit 0
echo "Already provisioned!"
exit 0
fi
if [[ $(systemctl is-active setup-kube-system) != "inactive" ]]; then
@ -196,7 +204,7 @@ write-files:
modprobe ip_vs
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
modprobe nf_conntrack
if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then
export PATH=$PATH:/opt/bin
fi
@ -205,37 +213,34 @@ write-files:
sudo touch /home/core/success
echo "true" > /home/core/success
coreos:
units:
- name: docker.service
command: start
enable: true
- path: /etc/systemd/system/setup-kube-system.service
permissions: '0755'
owner: root:root
content: |
[Unit]
Requires=docker.service
After=docker.service
- name: setup-kube-system.service
command: start
content: |
[Unit]
Requires=docker.service
After=docker.service
[Service]
Type=simple
StartLimitInterval=0
ExecStart=/opt/bin/setup-kube-system
[Service]
Type=simple
StartLimitInterval=0
ExecStart=/opt/bin/setup-kube-system
- path: /etc/systemd/system/deploy-kube-system.service
permissions: '0755'
owner: root:root
content: |
[Unit]
After=setup-kube-system.service
- name: deploy-kube-system.service
command: start
content: |
[Unit]
After=setup-kube-system.service
[Service]
Type=simple
StartLimitInterval=0
Restart=on-failure
ExecStartPre=/usr/bin/curl -k https://{{ k8s_control_node.join_ip }}:6443/version
ExecStart=/opt/bin/deploy-kube-system
[Service]
Type=simple
StartLimitInterval=0
Restart=on-failure
ExecStartPre=/usr/bin/curl -k https://{{ k8s_control_node.join_ip }}:6443/version
ExecStart=/opt/bin/deploy-kube-system
runcmd:
- [ systemctl, start, setup-kube-system ]
- [ systemctl, start, deploy-kube-system ]
update:
group: stable
reboot-strategy: off

View File

@ -17,10 +17,14 @@
# under the License.
---
ssh_authorized_keys:
{{ k8s.ssh.pub.key }}
users:
- name: core
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
ssh_authorized_keys:
{{ k8s.ssh.pub.key }}
write-files:
write_files:
- path: /etc/conf.d/nfs
permissions: '0644'
content: |
@ -42,7 +46,7 @@ write-files:
{{ k8s_control_node.apiserver.key }}
- path: /opt/bin/setup-kube-system
permissions: 0700
permissions: '0700'
owner: root:root
content: |
#!/bin/bash -e
@ -116,7 +120,7 @@ write-files:
mkdir -p /opt/bin
cd /opt/bin
cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin
cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} .
chmod +x {kubeadm,kubelet,kubectl}
sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service
@ -147,6 +151,10 @@ write-files:
fi
mkdir -p "${K8S_CONFIG_SCRIPTS_COPY_DIR}"
cp ${BINARIES_DIR}/*.yaml "${K8S_CONFIG_SCRIPTS_COPY_DIR}"
if [ -e "${BINARIES_DIR}/autoscaler.yaml" ]; then
mkdir -p /opt/autoscaler
cp "${BINARIES_DIR}/autoscaler.yaml" /opt/autoscaler/autoscaler_tmpl.yaml
fi
if [ -e "${BINARIES_DIR}/provider.yaml" ]; then
mkdir -p /opt/provider
cp "${BINARIES_DIR}/provider.yaml" /opt/provider/provider.yaml
@ -218,7 +226,7 @@ write-files:
done
- path: /opt/bin/deploy-kube-system
permissions: 0700
permissions: '0700'
owner: root:root
content: |
#!/bin/bash -e
@ -247,52 +255,49 @@ write-files:
if [ -d "$K8S_CONFIG_SCRIPTS_COPY_DIR" ]; then
### Network, dashboard configs available offline ###
echo "Offline configs are available!"
kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/network.yaml
kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/dashboard.yaml
/opt/bin/kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/network.yaml
/opt/bin/kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/dashboard.yaml
rm -rf "${K8S_CONFIG_SCRIPTS_COPY_DIR}"
else
kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta6/aio/deploy/recommended.yaml
/opt/bin/kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(/opt/bin/kubectl version | base64 | tr -d '\n')"
/opt/bin/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta6/aio/deploy/recommended.yaml
fi
kubectl create rolebinding admin-binding --role=admin --user=admin || true
kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=admin || true
kubectl create clusterrolebinding kubernetes-dashboard-ui --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard || true
/opt/bin/kubectl create rolebinding admin-binding --role=admin --user=admin || true
/opt/bin/kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=admin || true
/opt/bin/kubectl create clusterrolebinding kubernetes-dashboard-ui --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard || true
sudo touch /home/core/success
echo "true" > /home/core/success
coreos:
units:
- name: docker.service
command: start
enable: true
- path: /etc/systemd/system/setup-kube-system.service
permissions: '0755'
owner: root:root
content: |
[Unit]
Requires=docker.service
After=docker.service
- name: setup-kube-system.service
command: start
content: |
[Unit]
Requires=docker.service
After=docker.service
[Service]
Type=simple
StartLimitInterval=0
ExecStart=/opt/bin/setup-kube-system
[Service]
Type=simple
StartLimitInterval=0
ExecStart=/opt/bin/setup-kube-system
- path: /etc/systemd/system/deploy-kube-system.service
permissions: '0755'
owner: root:root
content: |
[Unit]
After=setup-kube-system.service
- name: deploy-kube-system.service
command: start
content: |
[Unit]
After=setup-kube-system.service
[Service]
Type=simple
StartLimitInterval=0
Restart=on-failure
ExecStartPre=/usr/bin/curl -k https://127.0.0.1:6443/version
ExecStart=/opt/bin/deploy-kube-system
[Service]
Type=simple
StartLimitInterval=0
Restart=on-failure
ExecStartPre=/usr/bin/curl -k https://127.0.0.1:6443/version
ExecStart=/opt/bin/deploy-kube-system
runcmd:
- [ systemctl, start, setup-kube-system ]
- [ systemctl, start, deploy-kube-system ]
update:
group: stable
reboot-strategy: off

View File

@ -17,12 +17,16 @@
# under the License.
---
ssh_authorized_keys:
{{ k8s.ssh.pub.key }}
users:
- name: core
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
ssh_authorized_keys:
{{ k8s.ssh.pub.key }}
write-files:
write_files:
- path: /opt/bin/setup-kube-system
permissions: 0700
permissions: '0700'
owner: root:root
content: |
#!/bin/bash -e
@ -96,7 +100,7 @@ write-files:
mkdir -p /opt/bin
cd /opt/bin
cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin
cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} .
chmod +x {kubeadm,kubelet,kubectl}
sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service
@ -125,6 +129,10 @@ write-files:
done <<< "$output"
setup_complete=true
fi
if [ -e "${BINARIES_DIR}/autoscaler.yaml" ]; then
mkdir -p /opt/autoscaler
cp "${BINARIES_DIR}/autoscaler.yaml" /opt/autoscaler/autoscaler_tmpl.yaml
fi
if [ -e "${BINARIES_DIR}/provider.yaml" ]; then
mkdir -p /opt/provider
cp "${BINARIES_DIR}/provider.yaml" /opt/provider/provider.yaml
@ -179,7 +187,7 @@ write-files:
fi
- path: /opt/bin/deploy-kube-system
permissions: 0700
permissions: '0700'
owner: root:root
content: |
#!/bin/bash -e
@ -196,7 +204,7 @@ write-files:
modprobe ip_vs
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
modprobe nf_conntrack
if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then
export PATH=$PATH:/opt/bin
fi
@ -205,37 +213,33 @@ write-files:
sudo touch /home/core/success
echo "true" > /home/core/success
coreos:
units:
- name: docker.service
command: start
enable: true
- path: /etc/systemd/system/setup-kube-system.service
permissions: '0755'
owner: root:root
content: |
[Unit]
Requires=docker.service
After=docker.service
- name: setup-kube-system.service
command: start
content: |
[Unit]
Requires=docker.service
After=docker.service
[Service]
Type=simple
StartLimitInterval=0
ExecStart=/opt/bin/setup-kube-system
[Service]
Type=simple
StartLimitInterval=0
ExecStart=/opt/bin/setup-kube-system
- path: /etc/systemd/system/deploy-kube-system.service
permissions: '0755'
owner: root:root
content: |
[Unit]
After=setup-kube-system.service
- name: deploy-kube-system.service
command: start
content: |
[Unit]
After=setup-kube-system.service
[Service]
Type=simple
StartLimitInterval=0
Restart=on-failure
ExecStartPre=/usr/bin/curl -k https://{{ k8s_control_node.join_ip }}:6443/version
ExecStart=/opt/bin/deploy-kube-system
[Service]
Type=simple
StartLimitInterval=0
Restart=on-failure
ExecStartPre=/usr/bin/curl -k https://{{ k8s_control_node.join_ip }}:6443/version
ExecStart=/opt/bin/deploy-kube-system
update:
group: stable
reboot-strategy: off
runcmd:
- [ systemctl, start, setup-kube-system ]
- [ systemctl, start, deploy-kube-system ]

View File

@ -0,0 +1,93 @@
#!/bin/bash -e
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
function usage() {
cat << USAGE
Usage: ./autoscale-kube-cluster [OPTIONS]...
Enables autoscaling for the kubernetes cluster.
Arguments:
-i, --id string ID of the cluster
-e, --enable Enables autoscaling
-d, --disable Disables autoscaling
-M, --maxsize number Maximum size of the cluster
-m, --minsize number Minimum size of the cluster
Other arguments:
-h, --help Display this help message and exit
Examples:
./autoscale-kube-cluster -e -M 3 -m 1
./autoscale-kube-cluster -d
USAGE
exit 0
}
ID=""
ENABLE=""
MINSIZE=""
MAXSIZE=""
while [ -n "$1" ]; do
case "$1" in
-h | --help)
usage
;;
-i | --id)
ID=$2
shift 2
;;
-e | --enable)
ENABLE="true"
shift 1
;;
-d | --enable)
ENABLE="false"
shift 1
;;
-M | --maxsize)
MAXSIZE=$2
shift 2
;;
-m | --minsize)
MINSIZE=$2
shift 2
;;
-*|*)
echo "ERROR: no such option $1. -h or --help for help"
exit 1
;;
esac
done
if [ $ENABLE == "true" ] ; then
if [ -e /opt/autoscaler/autoscaler_tmpl.yaml ]; then
sed -e "s/<cluster-id>/$ID/g" -e "s/<min>/$MINSIZE/g" -e "s/<max>/$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml
/opt/bin/kubectl apply -f /opt/autoscaler/autoscaler_now.yaml
exit 0
else
mkdir -p /opt/autoscaler
AUTOSCALER_URL="https://raw.githubusercontent.com/shapeblue/autoscaler/add-acs/cluster-autoscaler/cloudprovider/cloudstack/examples/cluster-autoscaler-standard.yaml"
autoscaler_conf_file="/opt/autoscaler/autoscaler_tmpl.yaml"
curl -sSL ${AUTOSCALER_URL} -o ${autoscaler_conf_file}
if [ $? -ne 0 ]; then
echo "Unable to connect to the internet to download the autoscaler deployment and image"
exit 1
else
sed -e "s/<cluster-id>/$ID/g" -e "s/<min>/$MINSIZE/g" -e "s/<max>/$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml
/opt/bin/kubectl apply -f /opt/autoscaler/autoscaler_now.yaml
exit 0
fi
fi
else
/opt/bin/kubectl delete deployment -n kube-system cluster-autoscaler
fi

View File

@ -16,7 +16,7 @@
# specific language governing permissions and limitations
# under the License.
(kubectl get pods -A | grep cloud-controller-manager) && exit 0
(/opt/bin/kubectl get pods -A | grep cloud-controller-manager) && exit 0
if [ -e /opt/provider/provider.yaml ]; then
/opt/bin/kubectl apply -f /opt/provider/provider.yaml

View File

@ -101,22 +101,28 @@ if [ -d "$BINARIES_DIR" ]; then
cp "${BINARIES_DIR}/provider.yaml" /opt/provider/provider.yaml
fi
# Fetch the autoscaler if present
if [ -e "${BINARIES_DIR}/autoscaler.yaml" ]; then
mkdir -p /opt/autoscaler
cp "${BINARIES_DIR}/autoscaler.yaml" /opt/autoscaler/autoscaler_tmpl.yaml
fi
tar -f "${BINARIES_DIR}/cni/cni-plugins-amd64.tgz" -C /opt/cni/bin -xz
tar -f "${BINARIES_DIR}/cri-tools/crictl-linux-amd64.tar.gz" -C /opt/bin -xz
if [ "${IS_MAIN_CONTROL}" == 'true' ]; then
set +e
kubeadm upgrade apply ${UPGRADE_VERSION} -y
kubeadm --v=5 upgrade apply ${UPGRADE_VERSION} -y
retval=$?
set -e
if [ $retval -ne 0 ]; then
kubeadm upgrade apply ${UPGRADE_VERSION} --ignore-preflight-errors=CoreDNSUnsupportedPlugins -y
kubeadm --v=5 upgrade apply ${UPGRADE_VERSION} --ignore-preflight-errors=CoreDNSUnsupportedPlugins -y
fi
else
if [ "${IS_OLD_VERSION}" == 'true' ]; then
kubeadm upgrade node config --kubelet-version ${UPGRADE_VERSION}
kubeadm --v=5 upgrade node config --kubelet-version ${UPGRADE_VERSION}
else
kubeadm upgrade node
kubeadm --v=5 upgrade node
fi
fi
@ -126,8 +132,8 @@ if [ -d "$BINARIES_DIR" ]; then
systemctl restart kubelet
if [ "${IS_MAIN_CONTROL}" == 'true' ]; then
kubectl apply -f ${BINARIES_DIR}/network.yaml
kubectl apply -f ${BINARIES_DIR}/dashboard.yaml
/opt/bin/kubectl apply -f ${BINARIES_DIR}/network.yaml
/opt/bin/kubectl apply -f ${BINARIES_DIR}/dashboard.yaml
fi
umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}"

View File

@ -476,7 +476,8 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast
if (defaultDns2 != null) {
buf.append(" dns2=").append(defaultDns2);
}
String msPublicKey = _configDao.getValue("ssh.publickey");
buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey));
if (s_logger.isDebugEnabled()) {
s_logger.debug("Boot Args for " + profile + ": " + buf.toString());
}

View File

@ -218,6 +218,8 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In
buf.append(" localgw=").append(dest.getPod().getGateway());
}
}
String msPublicKey = _configDao.getValue("ssh.publickey");
buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey));
}
if (controlNic == null) {

View File

@ -457,7 +457,6 @@ public class MockAccountManager extends ManagerBase implements AccountManager {
return null;
}
@Override
public Map<String, String> getKeys(Long userId) {
return null;

12
pom.xml
View File

@ -17,7 +17,7 @@
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
@ -173,6 +173,8 @@
<cs.xmlrpc.version>3.1.3</cs.xmlrpc.version>
<cs.xstream.version>1.4.15</cs.xstream.version>
<org.springframework.version>5.3.3</org.springframework.version>
<cs.ini.version>0.5.4</cs.ini.version>
<cs.gmaven.version>1.12.0</cs.gmaven.version>
</properties>
<distributionManagement>
@ -1036,10 +1038,10 @@
<meminitial>128m</meminitial>
<maxmem>512m</maxmem>
<compilerArgs>
<arg>-XDignore.symbol.file=true</arg>
<arg>--add-opens=java.base/java.lang=ALL-UNNAMED</arg>
<arg>--add-exports=java.base/sun.security.x509=ALL-UNNAMED</arg>
<arg>--add-exports=java.base/sun.security.provider=ALL-UNNAMED</arg>
<arg>-XDignore.symbol.file=true</arg>
<arg>--add-opens=java.base/java.lang=ALL-UNNAMED</arg>
<arg>--add-exports=java.base/sun.security.x509=ALL-UNNAMED</arg>
<arg>--add-exports=java.base/sun.security.provider=ALL-UNNAMED</arg>
</compilerArgs>
</configuration>
</plugin>

View File

@ -15,7 +15,6 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# $Id: createtmplt.sh 9132 2010-06-04 20:17:43Z manuel $ $HeadURL: svn://svn.lab.vmops.com/repos/vmdev/java/scripts/storage/secondary/createtmplt.sh $
@ -110,6 +109,16 @@ create_from_file() {
}
create_from_file_user() {
local tmpltfs=$1
local tmpltimg=$2
local tmpltname=$3
[ -n "$verbose" ] && echo "Copying to $tmpltfs/$tmpltname...could take a while" >&2
sudo cp $tmpltimg /$tmpltfs/$tmpltname
}
tflag=
nflag=
fflag=
@ -118,8 +127,9 @@ hflag=
hvm=false
cleanup=false
dflag=
cloud=false
while getopts 'vuht:n:f:s:d:S:' OPTION
while getopts 'vcuht:n:f:s:d:S:' OPTION
do
case $OPTION in
t) tflag=1
@ -144,6 +154,8 @@ do
h) hflag=1
hvm="true"
;;
c) cloud="true"
;;
u) cleanup="true"
;;
v) verbose="true"
@ -199,7 +211,14 @@ fi
imgsize=$(ls -l $tmpltimg2| awk -F" " '{print $5}')
create_from_file $tmpltfs $tmpltimg2 $tmpltname
if [ "$cloud" == "true" ]
then
create_from_file_user $tmpltfs $tmpltimg2 $tmpltname
tmpltfs=/tmp/cloud/templates/
else
create_from_file $tmpltfs $tmpltimg2 $tmpltname
fi
touch /$tmpltfs/template.properties
rollback_if_needed $tmpltfs $? "Failed to create template.properties file"
@ -213,7 +232,7 @@ echo "description=$descr" >> /$tmpltfs/template.properties
echo "hvm=$hvm" >> /$tmpltfs/template.properties
echo "size=$imgsize" >> /$tmpltfs/template.properties
if [ "$cleanup" == "true" ]
if [[ "$cleanup" == "true" ]] && [[ $cloud != "true" ]]
then
rm -f $tmpltimg
fi

View File

@ -0,0 +1,172 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Usage: e.g. failed $? "this is an error"
set -x
failed() {
local returnval=$1
local returnmsg=$2
# check for an message, if there is no one dont print anything
if [[ -z $returnmsg ]]; then
:
else
echo -e $returnmsg
fi
if [[ $returnval -eq 0 ]]; then
return 0
else
echo "Installation failed"
exit $returnval
fi
}
# check if first parameter is not a dash (-) then print the usage block
if [[ ! $@ =~ ^\-.+ ]]; then
usage
exit 0
fi
OPTERR=0
while getopts 'h:f:d:u::'# OPTION
do
case $OPTION in
u) uflag=1
uuid="$OPTARG"
;;
f) fflag=1
tmpltimg="$OPTARG"
;;
h) hyper="$OPTARG"
;;
d) destdir="$OPTARG"
;;
?) usage
exit 0
;;
*) usage
exit 0
;;
esac
done
if [[ "$hyper" == "kvm" ]]; then
ext="qcow2"
qemuimgcmd=$(which qemu-img)
elif [[ "$hyper" == "xenserver" ]]; then
ext="vhd"
elif [[ "$hyper" == "vmware" ]]; then
ext="ova"
elif [[ "$hyper" == "lxc" ]]; then
ext="qcow2"
elif [[ "$hyper" == "hyperv" ]]; then
ext="vhd"
elif [[ "$hyper" == "ovm3" ]]; then
ext="raw"
else
failed 2 "Please add a correct hypervisor name like: kvm|vmware|xenserver|hyperv|ovm3"
fi
localfile=$uuid.$ext
sudo mkdir -p $destdir
if [[ $? -ne 0 ]]; then
failed 2 "Failed to write to destdir $destdir -- is it mounted?\n"
fi
if [[ -f $destdir/template.properties ]]; then
failed 2 "Data already exists at destination $destdir"
fi
destfiles=$(find $destdir -name \*.$ext)
if [[ "$destfiles" != "" ]]; then
failed 2 "Data already exists at destination $destdir"
fi
tmpfolder=/tmp/cloud/templates/
mkdir -p $tmpfolder
tmplfile=$tmpfolder/$localfile
sudo touch $tmplfile
if [[ $? -ne 0 ]]; then
failed 2 "Failed to create temporary file in directory $tmpfolder -- is it read-only or full?\n"
fi
destcap=$(df -P $destdir | awk '{print $4}' | tail -1 )
[ $destcap -lt $DISKSPACE ] && echo "Insufficient free disk space for target folder $destdir: avail=${destcap}k req=${DISKSPACE}k" && failed 4
localcap=$(df -P $tmpfolder | awk '{print $4}' | tail -1 )
[ $localcap -lt $DISKSPACE ] && echo "Insufficient free disk space for local temporary folder $tmpfolder: avail=${localcap}k req=${DISKSPACE}k" && failed 4
if [[ "$fflag" == "1" ]]; then
sudo cp $tmpltimg $tmplfile
if [[ $? -ne 0 ]]; then
failed 2 "Failed to create temporary file in directory $tmpfolder -- is it read-only or full?\n"
fi
fi
installrslt=$($(dirname $0)/createtmplt.sh -s 2 -d "SystemVM Template ( $hyper )" -n $localfile -t $destdir/ -f $tmplfile -u -v -c)
if [[ $? -ne 0 ]]; then
failed 2 "Failed to install system vm template $tmpltimg to $destdir: $installrslt"
fi
tmpdestdir=$tmpfolder
if [ "$ext" == "ova" ]
then
tar xvf $tmpdestdir/$localfile -C $tmpdestdir &> /dev/null
sudo cp $tmpdestdir/*.vmdk $tmpdestdir/*.mf $tmpdestdir/*.ovf $destdir/
rm -rf $tmpdestdir/*.vmdk $tmpdestdir/*.mf $tmpdestdir/*.ovf $tmpdestdir/*.ova
else
rm -rf $tmpdestdir/*.tmp
fi
tmpltfile=$destdir/$localfile
tmpltsize=$(ls -l $tmpltfile | awk -F" " '{print $5}')
if [[ "$ext" == "qcow2" ]]; then
vrtmpltsize=$($qemuimgcmd info $tmpltfile | grep -i 'virtual size' | sed -ne 's/.*(\([0-9]*\).*/\1/p' | xargs)
else
vrtmpltsize=$tmpltsize
fi
templateId=${destdir##*/}
sudo touch $destdir/template.properties
echo "$ext=true" >> $tmpdestdir/template.properties
echo "id=$templateId" >> $tmpdestdir/template.properties
echo "public=true" >> $tmpdestdir/template.properties
echo "$ext.filename=$localfile" >> $tmpdestdir/template.properties
echo "uniquename=routing-$templateId" >> $tmpdestdir/template.properties
echo "$ext.virtualsize=$vrtmpltsize" >> $tmpdestdir/template.properties
echo "virtualsize=$vrtmpltsize" >> $tmpdestdir/template.properties
echo "$ext.size=$tmpltsize" >> $tmpdestdir/template.properties
sudo cp $tmpdestdir/template.properties $destdir/template.properties
if [ -f "$tmpdestdir/template.properties" ]
then
rm -rf $tmpdestdir/template.properties
fi
echo "Successfully installed system VM template $tmpltimg and template.properties to $destdir"
exit 0

View File

@ -25,6 +25,7 @@ if [ $# -lt 6 ]; then
fi
RELEASE="v${2}"
VAL="1.18.0"
output_dir="${1}"
start_dir="$PWD"
iso_dir="/tmp/iso"
@ -60,12 +61,20 @@ echo "Downloading kubelet.service ${RELEASE}..."
cd "${start_dir}"
kubelet_service_file="${working_dir}/kubelet.service"
touch "${kubelet_service_file}"
curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > ${kubelet_service_file}
if [[ `echo "${2} $VAL" | awk '{print ($1 < $2)}'` == 1 ]]; then
curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > ${kubelet_service_file}
else
curl -sSL "https://raw.githubusercontent.com/shapeblue/cloudstack-nonoss/main/cks/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > ${kubelet_service_file}
fi
echo "Downloading 10-kubeadm.conf ${RELEASE}..."
kubeadm_conf_file="${working_dir}/10-kubeadm.conf"
touch "${kubeadm_conf_file}"
curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > ${kubeadm_conf_file}
if [[ `echo "${2} $val" | awk '{print ($1 < $2)}'` == 1 ]]; then
curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > ${kubeadm_conf_file}
else
curl -sSL "https://raw.githubusercontent.com/shapeblue/cloudstack-nonoss/main/cks/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > ${kubeadm_conf_file}
fi
NETWORK_CONFIG_URL="${5}"
echo "Downloading network config ${NETWORK_CONFIG_URL}"
@ -77,6 +86,12 @@ echo "Downloading dashboard config ${DASHBORAD_CONFIG_URL}"
dashboard_conf_file="${working_dir}/dashboard.yaml"
curl -sSL ${DASHBORAD_CONFIG_URL} -o ${dashboard_conf_file}
# TODO : Change the url once merged
AUTOSCALER_URL="https://raw.githubusercontent.com/shapeblue/autoscaler/add-acs/cluster-autoscaler/cloudprovider/cloudstack/examples/cluster-autoscaler-standard.yaml"
echo "Downloading kubernetes cluster autoscaler ${AUTOSCALER_URL}"
autoscaler_conf_file="${working_dir}/autoscaler.yaml"
curl -sSL ${AUTOSCALER_URL} -o ${autoscaler_conf_file}
PROVIDER_URL="https://raw.githubusercontent.com/apache/cloudstack-kubernetes-provider/main/deployment.yaml"
echo "Downloading kubernetes cluster provider ${PROVIDER_URL}"
provider_conf_file="${working_dir}/provider.yaml"
@ -107,6 +122,10 @@ do
output=`printf "%s\n" ${output} ${images}`
done
# Don't forget about the other image !
autoscaler_image=`grep "image:" ${autoscaler_conf_file} | cut -d ':' -f2- | tr -d ' '`
output=`printf "%s\n" ${output} ${autoscaler_image}`
provider_image=`grep "image:" ${provider_conf_file} | cut -d ':' -f2- | tr -d ' '`
output=`printf "%s\n" ${output} ${provider_image}`

View File

@ -25,6 +25,7 @@ while getopts "n:c:h" opt; do
name=$OPTARG
;;
c )
bootargs=$OPTARG
cmdline=$(echo $OPTARG | base64 -w 0)
;;
h )
@ -70,11 +71,5 @@ do
sleep 0.1
done
# Write ssh public key
send_file $name "/root/.ssh/authorized_keys" $sshkey
# Fix ssh public key permission
virsh qemu-agent-command $name '{"execute":"guest-exec","arguments":{"path":"chmod","arg":["go-rwx","/root/.ssh/authorized_keys"]}}' > /dev/null
# Write cmdline payload
send_file $name "/var/cache/cloud/cmdline" $cmdline

View File

@ -18,8 +18,7 @@
# Copies keys that enable SSH communication with system vms
# $1 = new public key
# $2 = new private key
# $1 = new private key
#set -x
set -e
@ -33,34 +32,6 @@ clean_up() {
$SUDO umount $MOUNTPATH
}
inject_into_iso() {
local isofile=${systemvmpath}
local newpubkey=$2
local backup=${isofile}.bak
local tmpiso=${TMP}/$1
mkdir -p $MOUNTPATH
[ ! -f $isofile ] && echo "$(basename $0): Could not find systemvm iso patch file $isofile" && return 1
$SUDO mount -o loop $isofile $MOUNTPATH
[ $? -ne 0 ] && echo "$(basename $0): Failed to mount original iso $isofile" && clean_up && return 1
diff -q $MOUNTPATH/authorized_keys $newpubkey &> /dev/null && echo "New public key is the same as the one in the systemvm.iso, not injecting it, not modifying systemvm.iso" && clean_up && return 0
$SUDO cp -b $isofile $backup
[ $? -ne 0 ] && echo "$(basename $0): Failed to backup original iso $isofile" && clean_up && return 1
rm -rf $TMPDIR
mkdir -p $TMPDIR
[ ! -d $TMPDIR ] && echo "$(basename $0): Could not find/create temporary dir $TMPDIR" && clean_up && return 1
$SUDO cp -fr $MOUNTPATH/* $TMPDIR/
[ $? -ne 0 ] && echo "$(basename $0): Failed to copy from original iso $isofile" && clean_up && return 1
$SUDO cp $newpubkey $TMPDIR/authorized_keys
[ $? -ne 0 ] && echo "$(basename $0): Failed to copy key $newpubkey from original iso to new iso " && clean_up && return 1
mkisofs -quiet -r -o $tmpiso $TMPDIR
[ $? -ne 0 ] && echo "$(basename $0): Failed to create new iso $tmpiso from $TMPDIR" && clean_up && return 1
$SUDO umount $MOUNTPATH
[ $? -ne 0 ] && echo "$(basename $0): Failed to unmount old iso from $MOUNTPATH" && return 1
$SUDO cp -f $tmpiso $isofile
[ $? -ne 0 ] && echo "$(basename $0): Failed to overwrite old iso $isofile with $tmpiso" && return 1
rm -rf $TMPDIR
}
copy_priv_key() {
local newprivkey=$1
diff -q $newprivkey $(dirname $0)/id_rsa.cloud && return 0
@ -76,28 +47,19 @@ fi
$SUDO mkdir -p $MOUNTPATH
[ $# -ne 3 ] && echo "Usage: $(basename $0) <new public key file> <new private key file> <systemvm iso path>" && exit 3
newpubkey=$1
newprivkey=$2
systemvmpath=$3
[ ! -f $newpubkey ] && echo "$(basename $0): Could not open $newpubkey" && exit 3
[ $# -ne 1 ] && echo "Usage: $(basename $0) <new private key file>" && exit 3
newprivkey=$1
[ ! -f $newprivkey ] && echo "$(basename $0): Could not open $newprivkey" && exit 3
command -v mkisofs > /dev/null || (echo "$(basename $0): mkisofs not found, please install or ensure PATH is accurate" ; exit 4)
# if running into Docker as unprivileges, skip ssh verification as iso cannot be mounted due to missing loop device.
if [ -f /.dockerenv ]; then
if [ -e /dev/loop0 ]; then
# it's a docker instance with privileges.
inject_into_iso systemvm.iso $newpubkey
[ $? -ne 0 ] && exit 5
copy_priv_key $newprivkey
else
# this mean it's a docker instance, ssh key cannot be verify.
echo "We run inside Docker, skipping ssh key insertion in systemvm.iso"
# this mean it's a docker instance, ssh key cannot be verified.
echo "We run inside Docker, skipping copying private key"
fi
else
inject_into_iso systemvm.iso $newpubkey
[ $? -ne 0 ] && exit 5
copy_priv_key $newprivkey
fi

View File

@ -18,7 +18,7 @@
# The CloudStack management server needs sudo permissions
# without a password.
Cmnd_Alias CLOUDSTACK = /bin/mkdir, /bin/mount, /bin/umount, /bin/cp, /bin/chmod, /usr/bin/keytool, /bin/keytool
Cmnd_Alias CLOUDSTACK = /bin/mkdir, /bin/mount, /bin/umount, /bin/cp, /bin/chmod, /usr/bin/keytool, /bin/keytool, /bin/touch
Defaults:@MSUSER@ !requiretty

View File

@ -168,7 +168,7 @@ public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation<Templa
List<ImageStoreVO> storesInZone = dataStoreDao.listStoresByZoneId(template.getDataCenterId());
Long[] storeIds = storesInZone.stream().map(ImageStoreVO::getId).toArray(Long[]::new);
List<TemplateDataStoreVO> templatesInStore = _templateStoreDao.listByTemplateNotBypassed(template.getId(), storeIds);
List<Map<String, String>> downloadProgressDetails = new ArrayList();
List<Map<String, String>> downloadProgressDetails = new ArrayList<>();
HashMap<String, String> downloadDetailInImageStores = null;
for (TemplateDataStoreVO templateInStore : templatesInStore) {
downloadDetailInImageStores = new HashMap<>();

View File

@ -1225,6 +1225,9 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy
buf.append(" disable_rp_filter=true");
}
String msPublicKey = configurationDao.getValue("ssh.publickey");
buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey));
boolean externalDhcp = false;
String externalDhcpStr = configurationDao.getValue("direct.attach.network.externalIpAllocator.enabled");
if (externalDhcpStr != null && externalDhcpStr.equalsIgnoreCase("true")) {
@ -1326,7 +1329,6 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy
if(profile.getHypervisorType() == HypervisorType.Hyperv) {
controlNic = managementNic;
}
CheckSshCommand check = new CheckSshCommand(profile.getInstanceName(), controlNic.getIPv4Address(), 3922);
cmds.addCommand("checkSsh", check);

View File

@ -1336,7 +1336,7 @@ public class AutoScaleManagerImpl<Type> extends ManagerBase implements AutoScale
} else {
vm = _userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, null, owner, "autoScaleVm-" + asGroup.getId() + "-" +
getCurrentTimeStampString(), "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(),
null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, addrs, true, null, null, null, null, null, null, null, true);
null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, addrs, true, null, null, null, null, null, null, null, true, null);
}
}

View File

@ -1939,6 +1939,8 @@ Configurable, StateListener<VirtualMachine.State, VirtualMachine.Event, VirtualM
if (Boolean.valueOf(_configDao.getValue("system.vm.random.password"))) {
buf.append(" vmpassword=").append(_configDao.getValue("system.vm.password"));
}
String msPublicKey = _configDao.getValue("ssh.publickey");
buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey));
NicProfile controlNic = null;
String defaultDns1 = null;

View File

@ -669,7 +669,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
}
s_logger.info("Going to update systemvm iso with generated keypairs if needed");
try {
injectSshKeysIntoSystemVmIsoPatch(pubkeyfile.getAbsolutePath(), privkeyfile.getAbsolutePath());
copyPrivateKeyToHosts(pubkeyfile.getAbsolutePath(), privkeyfile.getAbsolutePath());
} catch (CloudRuntimeException e) {
if (!devel) {
throw new CloudRuntimeException(e.getMessage());
@ -738,8 +738,8 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
}
}
protected void injectSshKeysIntoSystemVmIsoPatch(String publicKeyPath, String privKeyPath) {
s_logger.info("Trying to inject public and private keys into systemvm iso");
protected void copyPrivateKeyToHosts(String publicKeyPath, String privKeyPath) {
s_logger.info("Trying to copy private keys to hosts");
String injectScript = getInjectScript();
String scriptPath = Script.findScript("", injectScript);
String systemVmIsoPath = Script.findScript("", "vms/systemvm.iso");
@ -757,15 +757,11 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
}
if (isOnWindows()) {
scriptPath = scriptPath.replaceAll("\\\\" ,"/" );
systemVmIsoPath = systemVmIsoPath.replaceAll("\\\\" ,"/" );
publicKeyPath = publicKeyPath.replaceAll("\\\\" ,"/" );
privKeyPath = privKeyPath.replaceAll("\\\\" ,"/" );
}
command.add(scriptPath);
command.add(publicKeyPath);
command.add(privKeyPath);
command.add(systemVmIsoPath);
command.add(scriptPath);
command.add(privKeyPath);
final String result = command.execute();
s_logger.info("The script injectkeys.sh was run with result : " + result);
if (result != null) {

View File

@ -22,6 +22,7 @@ import java.math.BigDecimal;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.UnknownHostException;
import java.nio.file.Files;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.util.ArrayList;
@ -40,11 +41,14 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import javax.inject.Inject;
import com.cloud.agent.api.GetStoragePoolCapabilitiesAnswer;
import com.cloud.agent.api.GetStoragePoolCapabilitiesCommand;
import com.cloud.network.router.VirtualNetworkApplianceManager;
import com.cloud.upgrade.SystemVmTemplateRegistration;
import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
import org.apache.cloudstack.api.ApiConstants;
@ -2637,6 +2641,29 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
return null;
}
private String getValidTemplateName(Long zoneId, HypervisorType hType) {
String templateName = null;
switch (hType) {
case XenServer:
templateName = VirtualNetworkApplianceManager.RouterTemplateXen.valueIn(zoneId);
break;
case KVM:
templateName = VirtualNetworkApplianceManager.RouterTemplateKvm.valueIn(zoneId);
break;
case VMware:
templateName = VirtualNetworkApplianceManager.RouterTemplateVmware.valueIn(zoneId);
break;
case Hyperv:
templateName = VirtualNetworkApplianceManager.RouterTemplateHyperV.valueIn(zoneId);
break;
case LXC:
templateName = VirtualNetworkApplianceManager.RouterTemplateLxc.valueIn(zoneId);
break;
default:
break;
}
return templateName;
}
@Override
public ImageStore discoverImageStore(String name, String url, String providerName, Long zoneId, Map details) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException {
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(providerName);
@ -2721,6 +2748,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
// populate template_store_ref table
_imageSrv.addSystemVMTemplatesToSecondary(store);
_imageSrv.handleTemplateSync(store);
registerSystemVmTemplateOnFirstNfsStore(zoneId, providerName, url, store);
}
// associate builtin template with zones associated with this image store
@ -2734,6 +2762,69 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
return (ImageStore)_dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Image);
}
private void registerSystemVmTemplateOnFirstNfsStore(Long zoneId, String providerName, String url, DataStore store) {
if (DataStoreProvider.NFS_IMAGE.equals(providerName) && zoneId != null) {
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(final TransactionStatus status) {
List<ImageStoreVO> stores = _imageStoreDao.listAllStoresInZone(zoneId, providerName, DataStoreRole.Image);
stores = stores.stream().filter(str -> str.getId() != store.getId()).collect(Collectors.toList());
// Check if it's the only/first store in the zone
if (stores.size() == 0) {
List<HypervisorType> hypervisorTypes = _clusterDao.getAvailableHypervisorInZone(zoneId);
Set<HypervisorType> hypSet = new HashSet<HypervisorType>(hypervisorTypes);
TransactionLegacy txn = TransactionLegacy.open("AutomaticTemplateRegister");
SystemVmTemplateRegistration systemVmTemplateRegistration = new SystemVmTemplateRegistration();
String filePath = null;
try {
filePath = Files.createTempDirectory(SystemVmTemplateRegistration.TEMPORARY_SECONDARY_STORE).toString();
if (filePath == null) {
throw new CloudRuntimeException("Failed to create temporary file path to mount the store");
}
Pair<String, Long> storeUrlAndId = new Pair<>(url, store.getId());
for (HypervisorType hypervisorType : hypSet) {
try {
String templateName = getValidTemplateName(zoneId, hypervisorType);
Pair<Hypervisor.HypervisorType, String> hypervisorAndTemplateName =
new Pair<>(hypervisorType, templateName);
Long templateId = systemVmTemplateRegistration.getRegisteredTemplateId(hypervisorAndTemplateName);
VMTemplateVO vmTemplateVO = null;
TemplateDataStoreVO templateVO = null;
if (templateId != null) {
vmTemplateVO = _templateDao.findById(templateId);
templateVO = _templateStoreDao.findByTemplate(templateId, DataStoreRole.Image);
if (templateVO != null) {
try {
if (SystemVmTemplateRegistration.validateIfSeeded(url, templateVO.getInstallPath())) {
continue;
}
} catch (Exception e) {
s_logger.error("Failed to validated if template is seeded", e);
}
}
}
SystemVmTemplateRegistration.mountStore(storeUrlAndId.first(), filePath);
if (templateVO != null && vmTemplateVO != null) {
systemVmTemplateRegistration.registerTemplate(hypervisorAndTemplateName, storeUrlAndId, vmTemplateVO, filePath);
} else {
systemVmTemplateRegistration.registerTemplate(hypervisorAndTemplateName, storeUrlAndId, filePath);
}
} catch (CloudRuntimeException e) {
SystemVmTemplateRegistration.unmountStore(filePath);
s_logger.error(String.format("Failed to register systemVM template for hypervisor: %s", hypervisorType.name()), e);
}
}
} catch (Exception e) {
s_logger.error("Failed to register systemVM template(s)");
} finally {
SystemVmTemplateRegistration.unmountStore(filePath);
txn.close();
}
}
}
});
}
}
@Override
public ImageStore migrateToObjectStore(String name, String url, String providerName, Map<String, String> details) throws DiscoveryException, InvalidParameterValueException {
// check if current cloud is ready to migrate, we only support cloud with only NFS secondary storages

View File

@ -2789,6 +2789,14 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
return volService.takeSnapshot(volume);
}
private boolean isOperationSupported(VMTemplateVO template, UserVmVO userVm) {
if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM &&
(userVm == null || !UserVmManager.CKS_NODE.equals(userVm.getUserVmType()))) {
return false;
}
return true;
}
@Override
@ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_CREATE, eventDescription = "allocating snapshot", create = true)
public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, Snapshot.LocationType locationType) throws ResourceAllocationException {
@ -2817,7 +2825,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (volume.getTemplateId() != null) {
VMTemplateVO template = _templateDao.findById(volume.getTemplateId());
if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM) {
Long instanceId = volume.getInstanceId();
UserVmVO userVmVO = null;
if (instanceId != null) {
userVmVO = _userVmDao.findById(instanceId);
}
if (!isOperationSupported(template, userVmVO)) {
throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported");
}
}
@ -2874,7 +2887,12 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
if (volume.getTemplateId() != null) {
VMTemplateVO template = _templateDao.findById(volume.getTemplateId());
if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM) {
Long instanceId = volume.getInstanceId();
UserVmVO userVmVO = null;
if (instanceId != null) {
userVmVO = _userVmDao.findById(instanceId);
}
if (!isOperationSupported(template, userVmVO)) {
throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported");
}
}

View File

@ -134,6 +134,7 @@ import com.cloud.utils.db.JoinBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.UserVmManager;
import com.cloud.vm.UserVmVO;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
@ -844,7 +845,12 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement
if (volume.getTemplateId() != null) {
VMTemplateVO template = _templateDao.findById(volume.getTemplateId());
if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM) {
Long instanceId = volume.getInstanceId();
UserVmVO userVmVO = null;
if (instanceId != null) {
userVmVO = _vmDao.findById(instanceId);
}
if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM && (userVmVO == null || !UserVmManager.CKS_NODE.equals(userVmVO.getUserVmType()))) {
throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported");
}
}

View File

@ -57,6 +57,8 @@ public interface UserVmManager extends UserVmService {
static final int MAX_USER_DATA_LENGTH_BYTES = 2048;
public static final String CKS_NODE = "cksnode";
/**
* @param hostId get all of the virtual machines that belong to one host.
* @return collection of VirtualMachine.

View File

@ -3461,7 +3461,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod,
userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParametes, customId, dhcpOptionMap,
dataDiskTemplateToDiskOfferingMap, userVmOVFProperties, dynamicScalingEnabled);
dataDiskTemplateToDiskOfferingMap, userVmOVFProperties, dynamicScalingEnabled, null);
}
@ -3572,7 +3572,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod,
userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap, dataDiskTemplateToDiskOfferingMap,
userVmOVFProperties, dynamicScalingEnabled);
userVmOVFProperties, dynamicScalingEnabled, null);
}
@Override
@ -3581,7 +3581,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData,
String sshKeyPair, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIps, Boolean displayvm, String keyboard, List<Long> affinityGroupIdList,
Map<String, String> customParametrs, String customId, Map<String, Map<Integer, String>> dhcpOptionsMap, Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap,
Map<String, String> userVmOVFPropertiesMap, boolean dynamicScalingEnabled) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException,
Map<String, String> userVmOVFPropertiesMap, boolean dynamicScalingEnabled, String type) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException,
StorageUnavailableException, ResourceAllocationException {
Account caller = CallContext.current().getCallingAccount();
@ -3633,7 +3633,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, null, group, httpmethod, userData,
sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayvm, keyboard, affinityGroupIdList, customParametrs, customId, dhcpOptionsMap,
dataDiskTemplateToDiskOfferingMap, userVmOVFPropertiesMap, dynamicScalingEnabled);
dataDiskTemplateToDiskOfferingMap, userVmOVFPropertiesMap, dynamicScalingEnabled, type);
}
private NetworkVO getNetworkToAddToNetworkList(VirtualMachineTemplate template, Account owner, HypervisorType hypervisor,
@ -3752,7 +3752,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
String sshKeyPair, HypervisorType hypervisor, Account caller, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIps, Boolean isDisplayVm, String keyboard,
List<Long> affinityGroupIdList, Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap,
Map<Long, DiskOffering> datadiskTemplateToDiskOfferringMap,
Map<String, String> userVmOVFPropertiesMap, boolean dynamicScalingEnabled) throws InsufficientCapacityException, ResourceUnavailableException,
Map<String, String> userVmOVFPropertiesMap, boolean dynamicScalingEnabled, String type) throws InsufficientCapacityException, ResourceUnavailableException,
ConcurrentOperationException, StorageUnavailableException, ResourceAllocationException {
_accountMgr.checkAccess(caller, null, true, owner);
@ -3927,7 +3927,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
}
}
if (template.getTemplateType().equals(TemplateType.SYSTEM)) {
if (template.getTemplateType().equals(TemplateType.SYSTEM) && !CKS_NODE.equals(type)) {
throw new InvalidParameterValueException("Unable to use system template " + template.getId() + " to deploy a user vm");
}
List<VMTemplateZoneVO> listZoneTemplate = _templateZoneDao.listByZoneTemplate(zone.getId(), template.getId());
@ -4116,7 +4116,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
UserVmVO vm = commitUserVm(zone, template, hostName, displayName, owner, diskOfferingId, diskSize, userData, caller, isDisplayVm, keyboard, accountId, userId, offering,
isIso, sshPublicKey, networkNicMap, id, instanceName, uuidName, hypervisorType, customParameters, dhcpOptionMap,
datadiskTemplateToDiskOfferringMap, userVmOVFPropertiesMap, dynamicScalingEnabled);
datadiskTemplateToDiskOfferringMap, userVmOVFPropertiesMap, dynamicScalingEnabled, type);
// Assign instance to the group
try {
@ -4228,7 +4228,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
final long accountId, final long userId, final ServiceOffering offering, final boolean isIso, final String sshPublicKey, final LinkedHashMap<String, List<NicProfile>> networkNicMap,
final long id, final String instanceName, final String uuidName, final HypervisorType hypervisorType, final Map<String, String> customParameters,
final Map<String, Map<Integer, String>> extraDhcpOptionMap, final Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap,
final Map<String, String> userVmOVFPropertiesMap, final VirtualMachine.PowerState powerState, final boolean dynamicScalingEnabled) throws InsufficientCapacityException {
final Map<String, String> userVmOVFPropertiesMap, final VirtualMachine.PowerState powerState, final boolean dynamicScalingEnabled, String type) throws InsufficientCapacityException {
return Transaction.execute(new TransactionCallbackWithException<UserVmVO, InsufficientCapacityException>() {
@Override
public UserVmVO doInTransaction(TransactionStatus status) throws InsufficientCapacityException {
@ -4315,6 +4315,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
}
}
vm.setUserVmType(type);
_vmDao.persist(vm);
for (String key : customParameters.keySet()) {
if (key.equalsIgnoreCase(VmDetailConstants.CPU_NUMBER) ||
@ -4420,13 +4421,13 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
final long accountId, final long userId, final ServiceOfferingVO offering, final boolean isIso, final String sshPublicKey, final LinkedHashMap<String, List<NicProfile>> networkNicMap,
final long id, final String instanceName, final String uuidName, final HypervisorType hypervisorType, final Map<String, String> customParameters, final Map<String,
Map<Integer, String>> extraDhcpOptionMap, final Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap,
Map<String, String> userVmOVFPropertiesMap, final boolean dynamicScalingEnabled) throws InsufficientCapacityException {
Map<String, String> userVmOVFPropertiesMap, final boolean dynamicScalingEnabled, String type) throws InsufficientCapacityException {
return commitUserVm(false, zone, null, null, template, hostName, displayName, owner,
diskOfferingId, diskSize, userData, caller, isDisplayVm, keyboard,
accountId, userId, offering, isIso, sshPublicKey, networkNicMap,
id, instanceName, uuidName, hypervisorType, customParameters,
extraDhcpOptionMap, dataDiskTemplateToDiskOfferingMap,
userVmOVFPropertiesMap, null, dynamicScalingEnabled);
userVmOVFPropertiesMap, null, dynamicScalingEnabled, type);
}
public void validateRootDiskResize(final HypervisorType hypervisorType, Long rootDiskSize, VMTemplateVO templateVO, UserVmVO vm, final Map<String, String> customParameters) throws InvalidParameterValueException
@ -4737,12 +4738,54 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
return vm;
}
private void addUserVMCmdlineArgs(Long vmId, VirtualMachineProfile profile, DeployDestination dest, StringBuilder buf) {
UserVmVO k8sVM = _vmDao.findById(vmId);
buf.append(" template=domP");
buf.append(" name=").append(profile.getHostName());
buf.append(" type=").append(k8sVM.getUserVmType());
for (NicProfile nic : profile.getNics()) {
int deviceId = nic.getDeviceId();
if (nic.getIPv4Address() == null) {
buf.append(" eth").append(deviceId).append("ip=").append("0.0.0.0");
buf.append(" eth").append(deviceId).append("mask=").append("0.0.0.0");
} else {
buf.append(" eth").append(deviceId).append("ip=").append(nic.getIPv4Address());
buf.append(" eth").append(deviceId).append("mask=").append(nic.getIPv4Netmask());
}
if (nic.isDefaultNic()) {
buf.append(" gateway=").append(nic.getIPv4Gateway());
}
if (nic.getTrafficType() == TrafficType.Management) {
String mgmt_cidr = _configDao.getValue(Config.ManagementNetwork.key());
if (NetUtils.isValidIp4Cidr(mgmt_cidr)) {
buf.append(" mgmtcidr=").append(mgmt_cidr);
}
buf.append(" localgw=").append(dest.getPod().getGateway());
}
}
DataCenterVO dc = _dcDao.findById(profile.getVirtualMachine().getDataCenterId());
buf.append(" internaldns1=").append(dc.getInternalDns1());
if (dc.getInternalDns2() != null) {
buf.append(" internaldns2=").append(dc.getInternalDns2());
}
buf.append(" dns1=").append(dc.getDns1());
if (dc.getDns2() != null) {
buf.append(" dns2=").append(dc.getDns2());
}
s_logger.info("cmdline details: "+ buf.toString());
}
@Override
public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, DeployDestination dest, ReservationContext context) {
UserVmVO vm = _vmDao.findById(profile.getId());
Map<String, String> details = userVmDetailsDao.listDetailsKeyPairs(vm.getId());
vm.setDetails(details);
StringBuilder buf = profile.getBootArgsBuilder();
if (CKS_NODE.equals(vm.getUserVmType())) {
addUserVMCmdlineArgs(vm.getId(), profile, dest, buf);
}
// add userdata info into vm profile
Nic defaultNic = _networkModel.getDefaultNic(vm.getId());
if(defaultNic != null) {
@ -5618,7 +5661,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
}
vm = createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner, name, displayName, diskOfferingId, size, group,
cmd.getHypervisor(), cmd.getHttpMethod(), userData, sshKeyPairName, cmd.getIpToNetworkMap(), addrs, displayVm, keyboard, cmd.getAffinityGroupIdList(), cmd.getDetails(),
cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap, userVmOVFProperties, dynamicScalingEnabled);
cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap, userVmOVFProperties, dynamicScalingEnabled, null);
}
}
// check if this templateId has a child ISO
@ -7752,7 +7795,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
null, null, userData, caller, isDisplayVm, keyboard,
accountId, userId, serviceOffering, template.getFormat().equals(ImageFormat.ISO), sshPublicKey, null,
id, instanceName, uuidName, hypervisorType, customParameters,
null, null, null, powerState, dynamicScalingEnabled);
null, null, null, powerState, dynamicScalingEnabled, null);
}
@Override

View File

@ -339,6 +339,7 @@ public class DiagnosticsServiceImpl extends ManagerBase implements PluggableServ
File dataDirectory = new File(dataDirectoryInSecondaryStore);
boolean existsInSecondaryStore = dataDirectory.exists() || dataDirectory.mkdir();
if (existsInSecondaryStore) {
// scp from system VM to mounted sec storage directory
String homeDir = System.getProperty("user.home");
File permKey = new File(homeDir + "/.ssh/id_rsa");
SshHelper.scpFrom(vmSshIp, 3922, "root", permKey, dataDirectoryInSecondaryStore, diagnosticsFile);

View File

@ -1084,6 +1084,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar
buf.append(" guid=").append(profile.getVirtualMachine().getHostName());
buf.append(" workers=").append(_configDao.getValue("workers"));
String msPublicKey = _configDao.getValue("ssh.publickey");
buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey));
if (_configDao.isPremium()) {
s_logger.debug("VMWare hypervisor was configured, informing secondary storage VM to load the PremiumSecondaryStorageResource.");

View File

@ -173,6 +173,7 @@ patch_systemvm() {
patch() {
local PATCH_MOUNT=/media/cdrom
local logfile="/var/log/patchsystemvm.log"
if [ "$TYPE" == "consoleproxy" ] || [ "$TYPE" == "secstorage" ] && [ -f ${PATCH_MOUNT}/agent.zip ] && [ -f /var/cache/cloud/patch.required ]
then
echo "Patching systemvm for cloud service with mount=$PATCH_MOUNT for type=$TYPE" >> $logfile

View File

@ -0,0 +1,76 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
. /opt/cloud/bin/setup/common.sh
setup_k8s_node() {
log_it "Setting up k8s node"
update-alternatives --set iptables /usr/sbin/iptables-legacy
update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
update-alternatives --set arptables /usr/sbin/arptables-legacy
update-alternatives --set ebtables /usr/sbin/ebtables-legacy
# set default ssh port and restart sshd service
sed -i 's/3922/22/g' /etc/ssh/sshd_config
# Prevent root login
> /root/.ssh/authorized_keys
passwd -l root
#sed -i 's#root:x:0:0:root:/root:/bin/bash#root:x:0:0:root:/root:/sbin/nologin#' /etc/passwd
swapoff -a
sudo sed -i '/ swap / s/^/#/' /etc/fstab
log_it "Swap disabled"
log_it "Setting up interfaces"
setup_common eth0
setup_system_rfc1918_internal
log_it "Setting up entry in hosts"
sed -i /$NAME/d /etc/hosts
echo "$ETH0_IP $NAME" >> /etc/hosts
public_ip=`getPublicIp`
echo "$public_ip $NAME" >> /etc/hosts
echo "export PATH='$PATH:/opt/bin/'">> ~/.bashrc
disable_rpfilter
enable_fwding 1
enable_irqbalance 0
setup_ntp
dhclient -1
rm -f /etc/logrotate.d/cloud
log_it "Starting cloud-init services"
systemctl enable --now --no-block containerd
systemctl enable --now --no-block docker.socket
systemctl enable --now --no-block docker.service
if [ -f /home/core/success ]; then
systemctl stop cloud-init cloud-config cloud-final
systemctl disable cloud-init cloud-config cloud-final
else
systemctl start --no-block cloud-init
systemctl start --no-block cloud-config
systemctl start --no-block cloud-final
fi
}
setup_k8s_node

View File

@ -61,7 +61,6 @@ patch() {
[ -f ${md5file} ] && oldmd5=$(cat ${md5file})
local newmd5=
[ -f ${patchfile} ] && newmd5=$(md5sum ${patchfile} | awk '{print $1}')
log_it "Scripts checksum detected: oldmd5=$oldmd5 newmd5=$newmd5"
if [ "$oldmd5" != "$newmd5" ] && [ -f ${patchfile} ] && [ "$newmd5" != "" ]
then
@ -89,6 +88,14 @@ start() {
rm -f /root/.rnd
echo "" > /root/.ssh/known_hosts
if which growpart > /dev/null; then
ROOT_MOUNT_POINT=$(df -h / | tail -n 1 | cut -d' ' -f1)
ROOT_DISK=$(echo $ROOT_MOUNT_POINT | sed 's/[0-9]*$//g')
growpart $ROOT_DISK 2
growpart $ROOT_DISK 6
resize2fs $ROOT_MOUNT_POINT
fi
patch
sync
/opt/cloud/bin/setup/bootstrap.sh

View File

@ -543,7 +543,7 @@ setup_system_rfc1918_internal() {
public_ip=`getPublicIp`
echo "$public_ip" | grep -E "^((127\.)|(10\.)|(172\.1[6-9]\.)|(172\.2[0-9]\.)|(172\.3[0-1]\.)|(192\.168\.))"
if [ "$?" == "0" ]; then
log_it "Not setting up route of RFC1918 space to $LOCAL_GW befause $public_ip is RFC1918."
log_it "Not setting up route of RFC1918 space to $LOCAL_GW because $public_ip is RFC1918."
else
log_it "Setting up route of RFC1918 space to $LOCAL_GW"
# Setup general route for RFC 1918 space, as otherwise it will be sent to
@ -759,6 +759,9 @@ parse_cmd_line() {
ntpserverlist)
export NTP_SERVER_LIST=$VALUE
;;
authorized_key)
export AUTHORIZED_KEYS=$VALUE
;;
esac
done
echo -e "\n\t}\n}" >> ${CHEF_TMP_FILE}
@ -767,6 +770,17 @@ parse_cmd_line() {
mv ${CHEF_TMP_FILE} /var/cache/cloud/cmd_line.json
fi
TMP_KEY_PATH=/tmp/.auth_key
AUTHORIZED_KEYS_PATH=/root/.ssh/authorized_keys
if [ ! -z "$AUTHORIZED_KEYS" ]
then
echo "$AUTHORIZED_KEYS" > $TMP_KEY_PATH
base64Val=$(base64 -d $TMP_KEY_PATH)
echo "$base64Val" > $AUTHORIZED_KEYS_PATH
chmod go-rwx $AUTHORIZED_KEYS_PATH
rm -rf $TMP_KEY_PATH
fi
[ $ETH0_IP ] && export LOCAL_ADDRS=$ETH0_IP
[ $ETH0_IP6 ] && export LOCAL_ADDRS=$ETH0_IP6
[ $ETH0_IP ] && [ $ETH0_IP6 ] && export LOCAL_ADDRS="$ETH0_IP,$ETH0_IP6"

View File

@ -18,8 +18,17 @@
#
# This scripts before ssh.service but after cloud-early-config
log_it() {
echo "$(date) $@" >> /var/log/cloud.log
log_action_msg "$@"
}
# Eject cdrom if any
eject || true
CMDLINE=/var/cache/cloud/cmdline
export TYPE=$(grep -Po 'type=\K[a-zA-Z]*' $CMDLINE)
if [ "$TYPE" != "cksnode" ]; then
eject || true
fi
# Restart journald for setting changes to apply
systemctl restart systemd-journald
@ -33,6 +42,10 @@ then
fi
fi
if [ "$TYPE" == "cksnode" ]; then
pkill -9 dhclient
fi
[ ! -f /var/cache/cloud/enabled_svcs ] && touch /var/cache/cloud/enabled_svcs
for svc in $(cat /var/cache/cloud/enabled_svcs)
do

View File

@ -31,6 +31,7 @@ from marvin.cloudstackAPI import (listInfrastructure,
deleteKubernetesCluster,
upgradeKubernetesCluster,
scaleKubernetesCluster,
getKubernetesClusterConfig,
destroyVirtualMachine,
deleteNetwork)
from marvin.cloudstackException import CloudstackAPIException
@ -49,7 +50,8 @@ from marvin.sshClient import SshClient
from nose.plugins.attrib import attr
from marvin.lib.decoratorGenerators import skipTestIf
import time
from kubernetes import client, config
import time, io, yaml
_multiprocess_shared_ = True
@ -59,13 +61,12 @@ class TestKubernetesCluster(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestKubernetesCluster, cls).getClsTestClient()
cls.apiclient = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.hypervisor = cls.testClient.getHypervisorInfo()
testClient = super(TestKubernetesCluster, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.hypervisor = testClient.getHypervisorInfo()
cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__
cls.cks_template_name_key = "cloud.kubernetes.cluster.template.name." + cls.hypervisor.lower()
cls.hypervisorNotSupported = False
if cls.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]:
@ -88,59 +89,26 @@ class TestKubernetesCluster(cloudstackTestCase):
"true")
cls.restartServer()
cls.updateVmwareSettings(False)
cls.cks_template = None
cls.initial_configuration_cks_template_name = None
cls.cks_service_offering = None
if cls.setup_failed == False:
try:
cls.kubernetes_version_1 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.14.9"])
cls.kubernetes_version_ids.append(cls.kubernetes_version_1.id)
cls.kubernetes_version_1_20_9 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.20.9"])
cls.kubernetes_version_ids.append(cls.kubernetes_version_1_20_9.id)
except Exception as e:
cls.setup_failed = True
cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" %
(cls.services["cks_kubernetes_versions"]["1.14.9"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.14.9"]["url"], e))
(cls.services["cks_kubernetes_versions"]["1.20.9"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.20.9"]["url"], e))
if cls.setup_failed == False:
try:
cls.kubernetes_version_2 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.15.0"])
cls.kubernetes_version_ids.append(cls.kubernetes_version_2.id)
cls.kubernetes_version_1_21_3 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.21.3"])
cls.kubernetes_version_ids.append(cls.kubernetes_version_1_21_3.id)
except Exception as e:
cls.setup_failed = True
cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" %
(cls.services["cks_kubernetes_versions"]["1.15.0"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.15.0"]["url"], e))
if cls.setup_failed == False:
try:
cls.kubernetes_version_3 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.16.0"])
cls.kubernetes_version_ids.append(cls.kubernetes_version_3.id)
except Exception as e:
cls.setup_failed = True
cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" %
(cls.services["cks_kubernetes_versions"]["1.16.0"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.16.0"]["url"], e))
if cls.setup_failed == False:
try:
cls.kubernetes_version_4 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.16.3"])
cls.kubernetes_version_ids.append(cls.kubernetes_version_4.id)
except Exception as e:
cls.setup_failed = True
cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" %
(cls.services["cks_kubernetes_versions"]["1.16.3"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.16.3"]["url"], e))
(cls.services["cks_kubernetes_versions"]["1.21.3"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.21.3"]["url"], e))
if cls.setup_failed == False:
cls.cks_template, existAlready = cls.getKubernetesTemplate()
if cls.cks_template == FAILED:
assert False, "getKubernetesTemplate() failed to return template for hypervisor %s" % cls.hypervisor
cls.setup_failed = True
else:
if not existAlready:
cls._cleanup.append(cls.cks_template)
if cls.setup_failed == False:
cls.initial_configuration_cks_template_name = Configurations.list(cls.apiclient,
name=cls.cks_template_name_key)[0].value
Configurations.update(cls.apiclient,
cls.cks_template_name_key,
cls.cks_template.name)
cks_offering_data = cls.services["cks_service_offering"]
cks_offering_data["name"] = 'CKS-Instance-' + random_gen()
cls.cks_service_offering = ServiceOffering.create(
@ -159,6 +127,10 @@ class TestKubernetesCluster(cloudstackTestCase):
@classmethod
def tearDownClass(cls):
if k8s_cluster != None and k8s_cluster.id != None:
clsObj = TestKubernetesCluster()
clsObj.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
version_delete_failed = False
# Delete added Kubernetes supported version
for version_id in cls.kubernetes_version_ids:
@ -168,11 +140,6 @@ class TestKubernetesCluster(cloudstackTestCase):
version_delete_failed = True
cls.debug("Error: Exception during cleanup for added Kubernetes supported versions: %s" % e)
try:
# Restore original CKS template
if cls.hypervisorNotSupported == False and cls.initial_configuration_cks_template_name != None:
Configurations.update(cls.apiclient,
cls.cks_template_name_key,
cls.initial_configuration_cks_template_name)
# Restore CKS enabled
if cls.initial_configuration_cks_enabled not in ["true", True]:
cls.debug("Restoring Kubernetes Service enabled value")
@ -242,41 +209,6 @@ class TestKubernetesCluster(cloudstackTestCase):
except Exception:
return False
@classmethod
def getKubernetesTemplate(cls, cks_templates=None):
if cks_templates is None:
cks_templates = cls.services["cks_templates"]
hypervisor = cls.hypervisor.lower()
if hypervisor not in list(cks_templates.keys()):
cls.debug("Provided hypervisor has no CKS template")
return FAILED, False
cks_template = cks_templates[hypervisor]
cmd = listTemplates.listTemplatesCmd()
cmd.name = cks_template['name']
cmd.templatefilter = 'all'
cmd.zoneid = cls.zone.id
cmd.hypervisor = hypervisor
templates = cls.apiclient.listTemplates(cmd)
if validateList(templates)[0] != PASS:
details = None
if hypervisor in ["vmware"] and "details" in cks_template:
details = cks_template["details"]
template = Template.register(cls.apiclient, cks_template, zoneid=cls.zone.id, hypervisor=hypervisor.lower(), randomize_name=False, details=details)
template.download(cls.apiclient)
return template, False
for template in templates:
if template.isready and template.ispublic:
return Template(template.__dict__), True
return FAILED, False
@classmethod
def waitForKubernetesSupportedVersionIsoReadyState(cls, version_id, retries=30, interval=60):
"""Check if Kubernetes supported version ISO is in Ready state"""
@ -319,9 +251,79 @@ class TestKubernetesCluster(cloudstackTestCase):
def deleteKubernetesSupportedVersion(cls, version_id):
deleteKubernetesSupportedVersionCmd = deleteKubernetesSupportedVersion.deleteKubernetesSupportedVersionCmd()
deleteKubernetesSupportedVersionCmd.id = version_id
deleteKubernetesSupportedVersionCmd.deleteiso = True
cls.apiclient.deleteKubernetesSupportedVersion(deleteKubernetesSupportedVersionCmd)
@classmethod
def listKubernetesCluster(cls, cluster_id = None):
listKubernetesClustersCmd = listKubernetesClusters.listKubernetesClustersCmd()
listKubernetesClustersCmd.listall = True
if cluster_id != None:
listKubernetesClustersCmd.id = cluster_id
clusterResponse = cls.apiclient.listKubernetesClusters(listKubernetesClustersCmd)
if cluster_id != None and clusterResponse != None:
return clusterResponse[0]
return clusterResponse
@classmethod
def deleteKubernetesCluster(cls, cluster_id):
deleteKubernetesClusterCmd = deleteKubernetesCluster.deleteKubernetesClusterCmd()
deleteKubernetesClusterCmd.id = cluster_id
response = cls.apiclient.deleteKubernetesCluster(deleteKubernetesClusterCmd)
return response
@classmethod
def stopKubernetesCluster(cls, cluster_id):
stopKubernetesClusterCmd = stopKubernetesCluster.stopKubernetesClusterCmd()
stopKubernetesClusterCmd.id = cluster_id
response = cls.apiclient.stopKubernetesCluster(stopKubernetesClusterCmd)
return response
def deleteKubernetesClusterAndVerify(self, cluster_id, verify = True, forced = False):
"""Delete Kubernetes cluster and check if it is really deleted"""
delete_response = {}
forceDeleted = False
try:
delete_response = self.deleteKubernetesCluster(cluster_id)
except Exception as e:
if forced:
cluster = self.listKubernetesCluster(cluster_id)
if cluster != None:
if cluster.state in ['Starting', 'Running', 'Upgrading', 'Scaling']:
self.stopKubernetesCluster(cluster_id)
self.deleteKubernetesCluster(cluster_id)
else:
forceDeleted = True
for cluster_vm in cluster.virtualmachines:
cmd = destroyVirtualMachine.destroyVirtualMachineCmd()
cmd.id = cluster_vm.id
cmd.expunge = True
self.apiclient.destroyVirtualMachine(cmd)
cmd = deleteNetwork.deleteNetworkCmd()
cmd.id = cluster.networkid
cmd.forced = True
self.apiclient.deleteNetwork(cmd)
self.dbclient.execute("update kubernetes_cluster set state='Destroyed', removed=now() where uuid = '%s';" % cluster.id)
else:
raise Exception("Error: Exception during delete cluster : %s" % e)
if verify == True and forceDeleted == False:
self.assertEqual(
delete_response.success,
True,
"Check KubernetesCluster delete response {}, {}".format(delete_response.success, True)
)
db_cluster_removed = self.dbclient.execute("select removed from kubernetes_cluster where uuid = '%s';" % cluster_id)[0][0]
self.assertNotEqual(
db_cluster_removed,
None,
"KubernetesCluster not removed in DB, {}".format(db_cluster_removed)
)
def setUp(self):
self.services = self.testClient.getParsedTestDataConfig()
self.apiclient = self.testClient.getApiClient()
@ -347,24 +349,25 @@ class TestKubernetesCluster(cloudstackTestCase):
if self.setup_failed == True:
self.fail("Setup incomplete")
global k8s_cluster
k8s_cluster = self.getValidKubernetesCluster()
k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_21_3)
self.debug("Upgrading Kubernetes cluster with ID: %s to a lower version" % k8s_cluster.id)
self.debug("Downgrading Kubernetes cluster with ID: %s to a lower version. This should fail!" % k8s_cluster.id)
try:
k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1.id)
self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % kubernetes_version_1.id)
k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_20_9.id)
self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % self.kubernetes_version_1_20_9.id)
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
self.fail("Kubernetes cluster upgraded to a lower Kubernetes supported version. Must be an error.")
self.fail("Kubernetes cluster downgrade to a lower Kubernetes supported version. Must be an error.")
except Exception as e:
self.debug("Upgrading Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e)
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_21_3.id)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
@skipTestIf("hypervisorNotSupported")
def test_02_deploy_and_upgrade_kubernetes_cluster(self):
def test_02_upgrade_kubernetes_cluster(self):
"""Test to deploy a new Kubernetes cluster and upgrade it to newer version
# Validate the following:
@ -373,19 +376,17 @@ class TestKubernetesCluster(cloudstackTestCase):
if self.setup_failed == True:
self.fail("Setup incomplete")
global k8s_cluster
k8s_cluster = self.getValidKubernetesCluster()
k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_20_9)
time.sleep(self.services["sleep"])
self.debug("Upgrading Kubernetes cluster with ID: %s" % k8s_cluster.id)
try:
k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_3.id)
k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_21_3.id)
except Exception as e:
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
self.fail("Failed to upgrade Kubernetes cluster due to: %s" % e)
self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_3.id)
self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_21_3.id)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
@ -403,7 +404,6 @@ class TestKubernetesCluster(cloudstackTestCase):
k8s_cluster = self.getValidKubernetesCluster()
self.debug("Upscaling Kubernetes cluster with ID: %s" % k8s_cluster.id)
try:
k8s_cluster = self.scaleKubernetesCluster(k8s_cluster.id, 2)
except Exception as e:
@ -411,7 +411,6 @@ class TestKubernetesCluster(cloudstackTestCase):
self.fail("Failed to upscale Kubernetes cluster due to: %s" % e)
self.verifyKubernetesClusterScale(k8s_cluster, 2)
self.debug("Kubernetes cluster with ID: %s successfully upscaled, now downscaling it" % k8s_cluster.id)
try:
@ -421,14 +420,38 @@ class TestKubernetesCluster(cloudstackTestCase):
self.fail("Failed to downscale Kubernetes cluster due to: %s" % e)
self.verifyKubernetesClusterScale(k8s_cluster)
self.debug("Kubernetes cluster with ID: %s successfully downscaled" % k8s_cluster.id)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
@skipTestIf("hypervisorNotSupported")
def test_04_basic_lifecycle_kubernetes_cluster(self):
def test_04_autoscale_kubernetes_cluster(self):
"""Test to enable autoscaling a Kubernetes cluster
# Validate the following:
# 1. scaleKubernetesCluster should return valid info for the cluster when it is autoscaled
# 2. cluster-autoscaler pod should be running
"""
if self.setup_failed == True:
self.fail("Setup incomplete")
global k8s_cluster
k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_21_3)
self.debug("Autoscaling Kubernetes cluster with ID: %s" % k8s_cluster.id)
try:
k8s_cluster = self.autoscaleKubernetesCluster(k8s_cluster.id, 1, 2)
self.verifyKubernetesClusterAutocale(k8s_cluster, 1, 2)
up = self.waitForAutoscalerPodInRunningState(k8s_cluster.id)
self.assertTrue(up, "Autoscaler pod failed to run")
self.debug("Kubernetes cluster with ID: %s has autoscaler running" % k8s_cluster.id)
except Exception as e:
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
self.fail("Failed to autoscale Kubernetes cluster due to: %s" % e)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
@skipTestIf("hypervisorNotSupported")
def test_05_basic_lifecycle_kubernetes_cluster(self):
"""Test to deploy a new Kubernetes cluster
# Validate the following:
@ -456,9 +479,10 @@ class TestKubernetesCluster(cloudstackTestCase):
self.verifyKubernetesClusterState(k8s_cluster, 'Running')
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
@skipTestIf("hypervisorNotSupported")
def test_05_delete_kubernetes_cluster(self):
def test_06_delete_kubernetes_cluster(self):
"""Test to delete an existing Kubernetes cluster
# Validate the following:
@ -479,29 +503,6 @@ class TestKubernetesCluster(cloudstackTestCase):
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
@skipTestIf("hypervisorNotSupported")
def test_06_deploy_invalid_kubernetes_ha_cluster(self):
"""Test to deploy an invalid HA Kubernetes cluster
# Validate the following:
# 1. createKubernetesCluster should fail as version doesn't support HA
"""
if self.setup_failed == True:
self.fail("Setup incomplete")
name = 'testcluster-' + random_gen()
self.debug("Creating for Kubernetes cluster with name %s" % name)
try:
cluster_response = self.createKubernetesCluster(name, self.kubernetes_version_2.id, 1, 2)
self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % cluster_response.id)
self.deleteKubernetesClusterAndVerify(cluster_response.id, False, True)
self.fail("HA Kubernetes cluster deployed with Kubernetes supported version below version 1.16.0. Must be an error.")
except CloudstackAPIException as e:
self.debug("HA Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
@skipTestIf("hypervisorNotSupported")
def test_07_deploy_kubernetes_ha_cluster(self):
@ -515,15 +516,13 @@ class TestKubernetesCluster(cloudstackTestCase):
self.fail("Setup incomplete")
global k8s_cluster
k8s_cluster = self.getValidKubernetesCluster(1, 2)
self.debug("HA Kubernetes cluster with ID: %s successfully deployed" % k8s_cluster.id)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
@skipTestIf("hypervisorNotSupported")
def test_08_deploy_and_upgrade_kubernetes_ha_cluster(self):
"""Test to deploy a new HA Kubernetes cluster and upgrade it to newer version
def test_08_upgrade_kubernetes_ha_cluster(self):
"""Test to upgrade a Kubernetes cluster to newer version
# Validate the following:
# 1. upgradeKubernetesCluster should return valid info for the cluster
@ -536,15 +535,13 @@ class TestKubernetesCluster(cloudstackTestCase):
self.debug("Upgrading HA Kubernetes cluster with ID: %s" % k8s_cluster.id)
try:
k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_4.id)
k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_21_3.id)
except Exception as e:
self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)
self.fail("Failed to upgrade Kubernetes HA cluster due to: %s" % e)
self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_4.id)
self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_21_3.id)
self.debug("Kubernetes cluster with ID: %s successfully upgraded" % k8s_cluster.id)
return
@attr(tags=["advanced", "smoke"], required_hardware="true")
@ -561,22 +558,8 @@ class TestKubernetesCluster(cloudstackTestCase):
k8s_cluster = self.getValidKubernetesCluster(1, 2)
self.debug("Deleting Kubernetes cluster with ID: %s" % k8s_cluster.id)
self.deleteKubernetesClusterAndVerify(k8s_cluster.id)
self.debug("Kubernetes cluster with ID: %s successfully deleted" % k8s_cluster.id)
return
def listKubernetesCluster(self, cluster_id = None):
listKubernetesClustersCmd = listKubernetesClusters.listKubernetesClustersCmd()
if cluster_id != None:
listKubernetesClustersCmd.id = cluster_id
clusterResponse = self.apiclient.listKubernetesClusters(listKubernetesClustersCmd)
if cluster_id != None and clusterResponse != None:
return clusterResponse[0]
return clusterResponse
def createKubernetesCluster(self, name, version_id, size=1, control_nodes=1):
createKubernetesClusterCmd = createKubernetesCluster.createKubernetesClusterCmd()
createKubernetesClusterCmd.name = name
@ -594,24 +577,12 @@ class TestKubernetesCluster(cloudstackTestCase):
self.cleanup.append(clusterResponse)
return clusterResponse
def stopKubernetesCluster(self, cluster_id):
stopKubernetesClusterCmd = stopKubernetesCluster.stopKubernetesClusterCmd()
stopKubernetesClusterCmd.id = cluster_id
response = self.apiclient.stopKubernetesCluster(stopKubernetesClusterCmd)
return response
def startKubernetesCluster(self, cluster_id):
startKubernetesClusterCmd = startKubernetesCluster.startKubernetesClusterCmd()
startKubernetesClusterCmd.id = cluster_id
response = self.apiclient.startKubernetesCluster(startKubernetesClusterCmd)
return response
def deleteKubernetesCluster(self, cluster_id):
deleteKubernetesClusterCmd = deleteKubernetesCluster.deleteKubernetesClusterCmd()
deleteKubernetesClusterCmd.id = cluster_id
response = self.apiclient.deleteKubernetesCluster(deleteKubernetesClusterCmd)
return response
def upgradeKubernetesCluster(self, cluster_id, version_id):
upgradeKubernetesClusterCmd = upgradeKubernetesCluster.upgradeKubernetesClusterCmd()
upgradeKubernetesClusterCmd.id = cluster_id
@ -626,42 +597,96 @@ class TestKubernetesCluster(cloudstackTestCase):
response = self.apiclient.scaleKubernetesCluster(scaleKubernetesClusterCmd)
return response
def getValidKubernetesCluster(self, size=1, control_nodes=1):
def autoscaleKubernetesCluster(self, cluster_id, minsize, maxsize):
scaleKubernetesClusterCmd = scaleKubernetesCluster.scaleKubernetesClusterCmd()
scaleKubernetesClusterCmd.id = cluster_id
scaleKubernetesClusterCmd.autoscalingenabled = True
scaleKubernetesClusterCmd.minsize = minsize
scaleKubernetesClusterCmd.maxsize = maxsize
response = self.apiclient.scaleKubernetesCluster(scaleKubernetesClusterCmd)
return response
def fetchKubernetesClusterConfig(self, cluster_id):
getKubernetesClusterConfigCmd = getKubernetesClusterConfig.getKubernetesClusterConfigCmd()
getKubernetesClusterConfigCmd.id = cluster_id
response = self.apiclient.getKubernetesClusterConfig(getKubernetesClusterConfigCmd)
return response
def waitForAutoscalerPodInRunningState(self, cluster_id, retries=5, interval=60):
k8s_config = self.fetchKubernetesClusterConfig(cluster_id)
cfg = io.StringIO(k8s_config.configdata)
cfg = yaml.load(cfg)
# Adding this so we don't get certificate exceptions
cfg['clusters'][0]['cluster']['insecure-skip-tls-verify']=True
config.load_kube_config_from_dict(cfg)
v1 = client.CoreV1Api()
while retries > 0:
time.sleep(interval)
pods = v1.list_pod_for_all_namespaces(watch=False, label_selector="app=cluster-autoscaler").items
if len(pods) == 0 :
self.debug("Autoscaler pod still not up")
continue
pod = pods[0]
if pod.status.phase == 'Running' :
self.debug("Autoscaler pod %s up and running!" % pod.metadata.name)
return True
self.debug("Autoscaler pod %s up but not running on retry %d. State is : %s" %(pod.metadata.name, retries, pod.status.phase))
retries = retries - 1
return False
def getValidKubernetesCluster(self, size=1, control_nodes=1, version={}):
cluster = k8s_cluster
version = self.kubernetes_version_2
if control_nodes != 1:
version = self.kubernetes_version_3
valid = True
if cluster == None:
valid = False
# Does a cluster already exist ?
if cluster == None or cluster.id == None:
if not version:
version = self.kubernetes_version_1_20_9
self.debug("No existing cluster available, k8s_cluster: %s" % cluster)
if valid == True and cluster.id == None:
valid = False
self.debug("ID for existing cluster not found, k8s_cluster ID: %s" % cluster.id)
if valid == True:
return self.createNewKubernetesCluster(version, size, control_nodes)
# Is the existing cluster what is needed ?
valid = cluster.size == size and cluster.controlnodes == control_nodes
if version:
# Check the version only if specified
valid = valid and cluster.kubernetesversionid == version.id
else:
version = self.kubernetes_version_1_20_9
if valid:
cluster_id = cluster.id
cluster = self.listKubernetesCluster(cluster_id)
if cluster == None:
valid = False
# Looks like the cluster disappeared !
self.debug("Existing cluster, k8s_cluster ID: %s not returned by list API" % cluster_id)
if valid == True:
return self.createNewKubernetesCluster(version, size, control_nodes)
if valid:
try:
self.verifyKubernetesCluster(cluster, cluster.name, None, size, control_nodes)
self.debug("Existing Kubernetes cluster available with name %s" % cluster.name)
return cluster
except AssertionError as error:
valid = False
self.debug("Existing cluster failed verification due to %s, need to deploy a new one" % error)
if valid == False:
name = 'testcluster-' + random_gen()
self.debug("Creating for Kubernetes cluster with name %s" % name)
try:
self.deleteAllLeftoverClusters()
cluster = self.createKubernetesCluster(name, version.id, size, control_nodes)
self.verifyKubernetesCluster(cluster, name, version.id, size, control_nodes)
except Exception as ex:
self.fail("Kubernetes cluster deployment failed: %s" % ex)
except AssertionError as err:
self.fail("Kubernetes cluster deployment failed during cluster verification: %s" % err)
self.deleteKubernetesClusterAndVerify(cluster.id, False, True)
# Can't have too many loose clusters running around
if cluster.id != None:
self.deleteKubernetesClusterAndVerify(cluster.id, False, True)
self.debug("No valid cluster, need to deploy a new one")
return self.createNewKubernetesCluster(version, size, control_nodes)
def createNewKubernetesCluster(self, version, size, control_nodes) :
name = 'testcluster-' + random_gen()
self.debug("Creating for Kubernetes cluster with name %s" % name)
try:
cluster = self.createKubernetesCluster(name, version.id, size, control_nodes)
self.verifyKubernetesCluster(cluster, name, version.id, size, control_nodes)
except Exception as ex:
self.fail("Kubernetes cluster deployment failed: %s" % ex)
except AssertionError as err:
self.fail("Kubernetes cluster deployment failed during cluster verification: %s" % err)
return cluster
def verifyKubernetesCluster(self, cluster_response, name, version_id=None, size=1, control_nodes=1):
@ -740,6 +765,21 @@ class TestKubernetesCluster(cloudstackTestCase):
self.verifyKubernetesClusterState(cluster_response, 'Running')
self.verifyKubernetesClusterSize(cluster_response, size, control_nodes)
def verifyKubernetesClusterAutocale(self, cluster_response, minsize, maxsize):
"""Check if Kubernetes cluster state and node sizes are valid after upgrade"""
self.verifyKubernetesClusterState(cluster_response, 'Running')
self.assertEqual(
cluster_response.minsize,
minsize,
"Check KubernetesCluster minsize {}, {}".format(cluster_response.minsize, minsize)
)
self.assertEqual(
cluster_response.maxsize,
maxsize,
"Check KubernetesCluster maxsize {}, {}".format(cluster_response.maxsize, maxsize)
)
def stopAndVerifyKubernetesCluster(self, cluster_id):
"""Stop Kubernetes cluster and check if it is really stopped"""
@ -758,52 +798,3 @@ class TestKubernetesCluster(cloudstackTestCase):
'Stopped',
"KubernetesCluster not stopped in DB, {}".format(db_cluster_state)
)
def deleteKubernetesClusterAndVerify(self, cluster_id, verify = True, forced = False):
"""Delete Kubernetes cluster and check if it is really deleted"""
forceDeleted = False
try:
delete_response = self.deleteKubernetesCluster(cluster_id)
except Exception as e:
if forced:
cluster = self.listKubernetesCluster(cluster_id)
if cluster != None:
if cluster.state in ['Starting', 'Running', 'Upgrading', 'Scaling']:
self.stopKubernetesCluster(cluster_id)
self.deleteKubernetesCluster(cluster_id)
else:
forceDeleted = True
for cluster_vm in cluster.virtualmachines:
cmd = destroyVirtualMachine.destroyVirtualMachineCmd()
cmd.id = cluster_vm.id
cmd.expunge = True
self.apiclient.destroyVirtualMachine(cmd)
cmd = deleteNetwork.deleteNetworkCmd()
cmd.id = cluster.networkid
cmd.forced = True
self.apiclient.deleteNetwork(cmd)
self.dbclient.execute("update kubernetes_cluster set state='Destroyed', removed=now() where uuid = '%s';" % cluster.id)
else:
raise Exception("Error: Exception during delete cluster : %s" % e)
if verify == True and forceDeleted == False:
self.assertEqual(
delete_response.success,
True,
"Check KubernetesCluster delete response {}, {}".format(delete_response.success, True)
)
db_cluster_removed = self.dbclient.execute("select removed from kubernetes_cluster where uuid = '%s';" % cluster_id)[0][0]
self.assertNotEqual(
db_cluster_removed,
None,
"KubernetesCluster not removed in DB, {}".format(db_cluster_removed)
)
def deleteAllLeftoverClusters(self):
clusters = self.listKubernetesCluster()
if clusters != None:
for cluster in clusters:
self.deleteKubernetesClusterAndVerify(cluster.id, False, True)

View File

@ -66,13 +66,13 @@ d-i partman-auto/expert_recipe string \
use_filesystem{ } filesystem{ ext2 } \
mountpoint{ /boot } \
. \
256 1000 256 linux-swap \
method{ swap } format{ } \
. \
2240 40 4000 ext4 \
method{ format } format{ } \
use_filesystem{ } filesystem{ ext4 } \
mountpoint{ / } \
. \
256 1000 256 linux-swap \
method{ swap } format{ } \
.
d-i partman-md/confirm boolean true

View File

@ -17,11 +17,10 @@
# under the License.
set -e
set -x
function cleanup_apt() {
export DEBIAN_FRONTEND=noninteractive
apt-get -y remove --purge dictionaries-common busybox isc-dhcp-client isc-dhcp-common \
apt-get -y remove --purge dictionaries-common busybox \
task-english task-ssh-server tasksel tasksel-data laptop-detect wamerican sharutils \
nano util-linux-locales krb5-locales

View File

@ -34,8 +34,6 @@ function load_conntrack_modules() {
grep nf_conntrack_ipv4 /etc/modules && return
cat >> /etc/modules << EOF
nf_conntrack_ipv4
nf_conntrack_ipv6
nf_conntrack
nf_conntrack_ftp
nf_conntrack_pptp

View File

@ -41,7 +41,7 @@ function configure_issue() {
__?.o/ Apache CloudStack SystemVM $CLOUDSTACK_RELEASE
( )# https://cloudstack.apache.org
(___(_) Debian GNU/Linux 10 \n \l
(___(_) Debian GNU/Linux 11 \n \l
EOF
}
@ -124,6 +124,26 @@ function configure_services() {
systemctl disable hyperv-daemons.hv-vss-daemon.service
systemctl disable qemu-guest-agent
# Disable container services
systemctl disable containerd
systemctl disable docker.service
systemctl stop docker.service
systemctl disable docker.socket
systemctl stop docker.socket
# Disable cloud init by default
cat <<EOF > /etc/cloud/cloud.cfg.d/cloudstack.cfg
datasource_list: ['CloudStack']
datasource:
CloudStack:
max_wait: 120
timeout: 50
EOF
touch /etc/cloud/cloud-init.disabled
systemctl stop cloud-init
systemctl disable cloud-init
configure_apache2
configure_strongswan
configure_issue

View File

@ -35,6 +35,12 @@ function debconf_packages() {
echo "libc6 libraries/restart-without-asking boolean false" | debconf-set-selections
}
function apt_clean() {
apt-get -y autoremove --purge
apt-get clean
apt-get autoclean
}
function install_packages() {
export DEBIAN_FRONTEND=noninteractive
export DEBIAN_PRIORITY=critical
@ -69,23 +75,33 @@ function install_packages() {
radvd \
sharutils genisoimage \
strongswan libcharon-extra-plugins libstrongswan-extra-plugins strongswan-charon strongswan-starter \
virt-what open-vm-tools qemu-guest-agent hyperv-daemons
virt-what open-vm-tools qemu-guest-agent hyperv-daemons cloud-guest-utils \
conntrack apt-transport-https ca-certificates curl gnupg gnupg-agent software-properties-common
apt-get install -y python3-json-pointer python3-jsonschema cloud-init
# python2-netaddr workaround
wget https://github.com/shapeblue/cloudstack-nonoss/raw/main/python-netaddr_0.7.19-1_all.deb
dpkg -i python-netaddr_0.7.19-1_all.deb
apt-get -y autoremove --purge
apt-get clean
apt-get autoclean
apt_clean
#32 bit architecture support for vhd-util: not required for 32 bit template
# 32 bit architecture support for vhd-util
if [ "${arch}" != "i386" ]; then
dpkg --add-architecture i386
apt-get update
${apt_get} install libuuid1:i386 libc6:i386
fi
# Install docker and containerd for CKS
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
apt-key fingerprint 0EBFCD88
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable"
apt-get update
${apt_get} install docker-ce docker-ce-cli containerd.io
apt_clean
install_vhd_util
# Install xenserver guest utilities as debian repos don't have it
wget https://mirrors.kernel.org/ubuntu/pool/main/x/xe-guest-utilities/xe-guest-utilities_7.10.0-0ubuntu1_amd64.deb

View File

@ -73,4 +73,3 @@
}
]
}

View File

@ -2048,12 +2048,6 @@ test_data = {
}
},
"cks_kubernetes_versions": {
"1.14.9": {
"semanticversion": "1.14.9",
"url": "http://download.cloudstack.org/cks/setup-1.14.9.iso",
"mincpunumber": 2,
"minmemory": 2048
},
"1.15.0": {
"semanticversion": "1.15.0",
"url": "http://download.cloudstack.org/cks/setup-1.15.0.iso",
@ -2073,41 +2067,6 @@ test_data = {
"minmemory": 2048
}
},
"cks_templates": {
"kvm": {
"name": "Kubernetes-Service-Template-kvm",
"displaytext": "Kubernetes-Service-Template kvm",
"format": "qcow2",
"hypervisor": "kvm",
"ostype": "CoreOS",
"url": "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-kvm.qcow2.bz2",
"requireshvm": "True",
"ispublic": "True",
"isextractable": "True"
},
"xenserver": {
"name": "Kubernetes-Service-Template-xen",
"displaytext": "Kubernetes-Service-Template xen",
"format": "vhd",
"hypervisor": "xenserver",
"ostype": "CoreOS",
"url": "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-xen.vhd.bz2",
"requireshvm": "True",
"ispublic": "True",
"isextractable": "True"
},
"vmware": {
"name": "Kubernetes-Service-Template-vmware",
"displaytext": "Kubernetes-Service-Template vmware",
"format": "ova",
"hypervisor": "vmware",
"ostype": "CoreOS",
"url": "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-vmware.ova",
"requireshvm": "True",
"ispublic": "True",
"details": [{"keyboard":"us","nicAdapter":"Vmxnet3","rootDiskController":"pvscsi"}]
}
},
"cks_service_offering": {
"name": "CKS-Instance",
"displaytext": "CKS Instance",

View File

@ -58,7 +58,9 @@ setup(name="Marvin",
"pytz",
"retries",
"PyCrypt",
"urllib3"
"kubernetes",
"urllib3",
"setuptools >= 40.3.0"
],
py_modules=['marvin.marvinPlugin'],
zip_safe=False,

View File

@ -121,6 +121,7 @@
"label.action.delete.network.processing": "Deleting Network....",
"label.action.delete.nexusvswitch": "Delete Nexus 1000v",
"label.action.delete.nic": "Remove NIC",
"label.action.delete.node": "Delete node",
"label.action.delete.physical.network": "Delete physical network",
"label.action.delete.pod": "Delete Pod",
"label.action.delete.pod.processing": "Deleting Pod....",
@ -473,6 +474,7 @@
"label.auto.assign.random.ip": "Automatically assign a random IP address",
"label.autoscale": "AutoScale",
"label.autoscale.configuration.wizard": "AutoScale Configuration Wizard",
"label.autoscalingenabled": "Autoscaling",
"label.availability": "Availability",
"label.availabilityzone": "Availability Zone",
"label.available": "Available",
@ -567,6 +569,9 @@
"label.cisco.nexus1000v.password": "Nexus 1000v Password",
"label.cisco.nexus1000v.username": "Nexus 1000v Username",
"label.ciscovnmc.resource.details": "CiscoVNMC resource details",
"label.cks.cluster.autoscalingenabled": "Enable autoscaling on this cluster",
"label.cks.cluster.maxsize": "Maximum cluster size (Worker nodes)",
"label.cks.cluster.minsize": "Minimum cluster size (Worker nodes)",
"label.cks.cluster.size": "Cluster size (Worker nodes)",
"label.cleanup": "Clean up",
"label.clear": "Clear",
@ -1354,6 +1359,7 @@
"label.maxproject": "Max. Projects",
"label.maxpublicip": "Max. Public IPs",
"label.maxsecondarystorage": "Max. Secondary Storage (GiB)",
"label.maxsize": "Maximum size",
"label.maxsnapshot": "Max. Snapshots",
"label.maxtemplate": "Max. Templates",
"label.maxuservm": "Max. User VMs",
@ -1439,6 +1445,7 @@
"label.miniops": "Min IOPS",
"label.minmaxiops": "Min IOPS / Max IOPS",
"label.minmemory": "Min Memory (in MB)",
"label.minsize": "Minimum size",
"label.minute.past.hour": "minute(s) past the hour",
"label.minutes.past.hour": "minutes(s) past the hour",
"label.monday": "Monday",
@ -2067,6 +2074,7 @@
"label.start.rolling.maintenance": "Start Rolling Maintenance",
"label.start.rolling.maintenance.payload": "Payload",
"label.start.vlan": "Start VLAN",
"label.start.vm": "Start VM",
"label.start.vxlan": "Start VXLAN",
"label.startdate": "By date (start)",
"label.startip": "Start IP",
@ -2130,6 +2138,7 @@
"label.summary": "Summary",
"label.sunday": "Sunday",
"label.supportedservices": "Supported Services",
"label.supportsautoscaling": "Supports Autoscaling",
"label.supportsha": "Supports HA",
"label.supportspublicaccess": "Supports Public Access",
"label.supportsregionlevelvpc": "Supports Region Level VPC",
@ -2493,6 +2502,7 @@
"message.action.delete.iso.for.all.zones": "The ISO is used by all zones. Please confirm that you want to delete it from all zones.",
"message.action.delete.network": "Please confirm that you want to delete this network.",
"message.action.delete.nexusvswitch": "Please confirm that you want to delete this nexus 1000v",
"message.action.delete.node": "Please confirm that you want to delete this node.",
"message.action.delete.physical.network": "Please confirm that you want to delete this physical network",
"message.action.delete.pod": "Please confirm that you want to delete this pod.",
"message.action.delete.primary.storage": "Please confirm that you want to delete this primary storage.",
@ -2544,6 +2554,7 @@
"message.action.revert.snapshot": "Please confirm that you want to revert the owning volume to this snapshot.",
"message.action.router.health.checks": "Health checks result will be fetched from router.",
"message.action.router.health.checks.disabled.warning": "Please enable router health checks.",
"message.action.scale.kubernetes.cluster.warning": "Please do not manually scale the cluster if cluster autoscaling is enabled",
"message.action.secondary.storage.read.only": "Please confirm that you want to make this secondary storage read only.",
"message.action.secondary.storage.read.write": "Please confirm that you want to make this secondary storage read write.",
"message.action.secure.host": "This will restart the host agent and libvirtd process after applying new X509 certificates, please confirm?",
@ -2797,6 +2808,7 @@
"message.delete.vpn.connection": "Please confirm that you want to delete VPN connection",
"message.delete.vpn.customer.gateway": "Please confirm that you want to delete this VPN Customer Gateway",
"message.delete.vpn.gateway": "Please confirm that you want to delete this VPN Gateway",
"message.deleting.node": "Deleting Node",
"message.deleting.vm": "Deleting VM",
"message.deployasis": "Selected template is Deploy As-Is i.e., the VM is deployed by importing an OVA with vApps directly into vCenter. Root disk(s) resize is allowed only on stopped VMs for such templates.",
"message.desc.add.new.lb.sticky.rule": "Add new LB sticky rule",
@ -3266,6 +3278,7 @@
"message.success.delete.acl.rule": "Successfully removed ACL rule",
"message.success.delete.backup.schedule": "Successfully deleted Configure VM backup schedule",
"message.success.delete.icon": "Successfully deleted icon of",
"message.success.delete.node": "Successfully Deleted Node",
"message.success.delete.snapshot.policy": "Successfully deleted snapshot policy",
"message.success.delete.static.route": "Successfully deleted static route",
"message.success.delete.tag": "Successfully deleted tag",

View File

@ -268,6 +268,10 @@
<a slot="readonly" slot-scope="text, record">
<status :text="record.readonly ? 'ReadOnly' : 'ReadWrite'" displayText />
</a>
<span slot="autoscalingenabled" slot-scope="text, record">
<status :text="record.autoscalingenabled ? 'Enabled' : 'Disabled'" />
{{ record.autoscalingenabled ? 'Enabled' : 'Disabled' }}
</span>
<span slot="current" slot-scope="text, record">
<status :text="record.current ? record.current.toString() : 'false'" />
</span>

View File

@ -135,27 +135,8 @@ export default {
this.$emit('onTabChange', key)
},
showTab (tab) {
if ('networkServiceFilter' in tab) {
if (this.resource && this.resource.virtualmachineid && !this.resource.vpcid && tab.name !== 'firewall') {
return false
}
if (this.resource && this.resource.virtualmachineid && this.resource.vpcid) {
return false
}
// dont display any option for source NAT IP of VPC
if (this.resource && this.resource.vpcid && !this.resource.issourcenat && tab.name !== 'firewall') {
return true
}
// display LB and PF options for isolated networks if static nat is disabled
if (this.resource && !this.resource.vpcid) {
if (!this.resource.isstaticnat) {
return true
} else if (tab.name === 'firewall') {
return true
}
}
return this.networkService && this.networkService.service &&
tab.networkServiceFilter(this.networkService.service)
if (this.networkService && this.networkService.service && tab.networkServiceFilter) {
return tab.networkServiceFilter(this.networkService.service)
} else if ('show' in tab) {
return tab.show(this.resource, this.$route, this.$store.getters.userInfo)
} else {

View File

@ -428,15 +428,18 @@ export default {
icon: kubernetes,
docHelp: 'plugins/cloudstack-kubernetes-service.html',
permission: ['listKubernetesClusters'],
columns: () => {
columns: (store) => {
var fields = ['name', 'state', 'size', 'cpunumber', 'memory']
if (['Admin', 'DomainAdmin'].includes(store.getters.userInfo.roletype)) {
if (['Admin', 'DomainAdmin'].includes(store.userInfo.roletype)) {
fields.push('account')
}
if (store.apis.scaleKubernetesCluster.params.filter(x => x.name === 'autoscalingenabled').length > 0) {
fields.splice(2, 0, 'autoscalingenabled')
}
fields.push('zonename')
return fields
},
details: ['name', 'description', 'zonename', 'kubernetesversionname', 'size', 'controlnodes', 'cpunumber', 'memory', 'keypair', 'associatednetworkname', 'account', 'domain', 'zonename'],
details: ['name', 'description', 'zonename', 'kubernetesversionname', 'autoscalingenabled', 'minsize', 'maxsize', 'size', 'controlnodes', 'cpunumber', 'memory', 'keypair', 'associatednetworkname', 'account', 'domain', 'zonename'],
tabs: [{
name: 'k8s',
component: () => import('@/views/compute/KubernetesServiceTab.vue')

View File

@ -318,7 +318,7 @@ export default {
docHelp: 'plugins/cloudstack-kubernetes-service.html#kubernetes-supported-versions',
permission: ['listKubernetesSupportedVersions'],
columns: ['name', 'state', 'semanticversion', 'isostate', 'mincpunumber', 'minmemory', 'zonename'],
details: ['name', 'semanticversion', 'zoneid', 'zonename', 'isoid', 'isoname', 'isostate', 'mincpunumber', 'minmemory', 'supportsha', 'state'],
details: ['name', 'semanticversion', 'supportsautoscaling', 'zoneid', 'zonename', 'isoid', 'isoname', 'isostate', 'mincpunumber', 'minmemory', 'supportsha', 'state'],
actions: [
{
api: 'addKubernetesSupportedVersion',

View File

@ -760,7 +760,7 @@ export default {
if (this.$route.meta.columns) {
const columns = this.$route.meta.columns
if (columns && typeof columns === 'function') {
this.columnKeys = columns()
this.columnKeys = columns(this.$store.getters)
} else {
this.columnKeys = columns
}

View File

@ -106,10 +106,11 @@
<tooltip-label slot="label" :title="$t('label.noderootdisksize')" :tooltip="apiParams.noderootdisksize.description"/>
<a-input
v-decorator="['noderootdisksize', {
initialValue: '8',
rules: [{
validator: (rule, value, callback) => {
if (value && (isNaN(value) || value <= 0)) {
callback(this.$t('message.validate.number'))
if (value && (isNaN(value) || value < 8)) {
callback(this.$t('messgae.validate.min').replace('{0}', '8GB'))
}
callback()
}

View File

@ -93,8 +93,8 @@
<a-table
class="table"
size="small"
:columns="this.vmColumns"
:dataSource="this.virtualmachines"
:columns="vmColumns"
:dataSource="virtualmachines"
:rowKey="item => item.id"
:pagination="false"
>
@ -107,6 +107,26 @@
<template slot="port" slot-scope="text, record, index">
{{ cksSshStartingPort + index }}
</template>
<template slot="action" slot-scope="text, record">
<a-tooltip placement="bottom" >
<template slot="title">
{{ $t('label.action.delete.node') }}
</template>
<a-popconfirm
:title="$t('message.action.delete.node')"
@confirm="deleteNode(record)"
:okText="$t('label.yes')"
:cancelText="$t('label.no')"
:disabled="!['Created', 'Running'].includes(resource.state) || resource.autoscalingenabled"
>
<a-button
type="danger"
icon="delete"
shape="circle"
:disabled="!['Created', 'Running'].includes(resource.state) || resource.autoscalingenabled" />
</a-popconfirm>
</a-tooltip>
</template>
</a-table>
</a-tab-pane>
<a-tab-pane :tab="$t('label.firewall')" key="firewall" v-if="publicIpAddress">
@ -149,6 +169,7 @@ export default {
AnnotationsTab
},
mixins: [mixinDevice],
inject: ['parentFetchData'],
props: {
resource: {
type: Object,
@ -230,6 +251,14 @@ export default {
}
},
mounted () {
if (this.$store.getters.apis.scaleKubernetesCluster.params.filter(x => x.name === 'nodeids').length > 0) {
this.vmColumns.push({
title: this.$t('label.action'),
dataIndex: 'action',
scopedSlots: { customRender: 'action' }
})
}
this.handleFetchData()
this.setCurrentTab()
},
methods: {
@ -381,6 +410,35 @@ export default {
elem.click()
document.body.removeChild(elem)
}
},
deleteNode (node) {
const params = {
id: this.resource.id,
nodeids: node.id
}
api('scaleKubernetesCluster', params).then(json => {
const jobId = json.scalekubernetesclusterresponse.jobid
console.log(jobId)
this.$store.dispatch('AddAsyncJob', {
title: this.$t('label.action.delete.node'),
jobid: jobId,
description: node.name,
status: 'progress'
})
this.$pollJob({
jobId,
loadingMessage: `${this.$t('message.deleting.node')} ${node.name}`,
catchMessage: this.$t('error.fetching.async.job.result'),
successMessage: `${this.$t('message.success.delete.node')} ${node.name}`,
successMethod: () => {
this.parentFetchData()
}
})
}).catch(error => {
this.$notifyError(error)
}).finally(() => {
this.parentFetchData()
})
}
}
}

View File

@ -19,48 +19,88 @@
<div class="form-layout" v-ctrl-enter="handleSubmit">
<a-spin :spinning="loading">
<a-alert type="warning">
<span slot="message" v-html="$t('message.kubernetes.cluster.scale')" />
<span
slot="message"
v-html="resource.autoscalingenabled ? $t('message.action.scale.kubernetes.cluster.warning') : $t('message.kubernetes.cluster.scale')" />
</a-alert>
<br />
<a-form
:form="form"
@submit="handleSubmit"
layout="vertical">
<a-form-item>
<tooltip-label slot="label" :title="$t('label.cks.cluster.size')" :tooltip="apiParams.size.description"/>
<a-input
v-decorator="['size', {
initialValue: originalSize,
rules: [{
validator: (rule, value, callback) => {
if (value && (isNaN(value) || value <= 0)) {
callback(this.$t('message.error.number'))
<a-form-item v-if="apiParams.autoscalingenabled">
<tooltip-label slot="label" :title="$t('label.cks.cluster.autoscalingenabled')" :tooltip="apiParams.autoscalingenabled.description"/>
<a-switch :checked="this.autoscalingenabled" @change="val => { this.autoscalingenabled = val }" />
</a-form-item>
<span v-if="this.autoscalingenabled">
<a-form-item>
<tooltip-label slot="label" :title="$t('label.cks.cluster.minsize')" :tooltip="apiParams.minsize.description"/>
<a-input
v-decorator="['minsize', {
initialValue: minsize,
rules: [{
validator: (rule, value, callback) => {
if (value && (isNaN(value) || value <= 0)) {
callback(this.$t('message.error.number'))
}
callback()
}
callback()
}
}]
}]"
:placeholder="apiParams.size.description"
autoFocus />
</a-form-item>
<a-form-item>
<tooltip-label slot="label" :title="$t('label.serviceofferingid')" :tooltip="apiParams.serviceofferingid.description"/>
<a-select
id="offering-selection"
v-decorator="['serviceofferingid', {}]"
showSearch
optionFilterProp="children"
:filterOption="(input, option) => {
return option.componentOptions.children[0].text.toLowerCase().indexOf(input.toLowerCase()) >= 0
}"
:loading="serviceOfferingLoading"
:placeholder="apiParams.serviceofferingid.description">
<a-select-option v-for="(opt, optIndex) in this.serviceOfferings" :key="optIndex">
{{ opt.name || opt.description }}
</a-select-option>
</a-select>
</a-form-item>
}]
}]"
:placeholder="apiParams.minsize.description"/>
</a-form-item>
<a-form-item>
<tooltip-label slot="label" :title="$t('label.cks.cluster.maxsize')" :tooltip="apiParams.maxsize.description"/>
<a-input
v-decorator="['maxsize', {
initialValue: maxsize,
rules: [{
validator: (rule, value, callback) => {
if (value && (isNaN(value) || value <= 0)) {
callback(this.$t('message.error.number'))
}
callback()
}
}]
}]"
:placeholder="apiParams.maxsize.description"/>
</a-form-item>
</span>
<span v-else>
<a-form-item>
<tooltip-label slot="label" :title="$t('label.serviceofferingid')" :tooltip="apiParams.serviceofferingid.description"/>
<a-select
id="offering-selection"
v-decorator="['serviceofferingid', {}]"
showSearch
optionFilterProp="children"
:filterOption="(input, option) => {
return option.componentOptions.children[0].text.toLowerCase().indexOf(input.toLowerCase()) >= 0
}"
:loading="serviceOfferingLoading"
:placeholder="apiParams.serviceofferingid.description">
<a-select-option v-for="(opt, optIndex) in this.serviceOfferings" :key="optIndex">
{{ opt.name || opt.description }}
</a-select-option>
</a-select>
</a-form-item>
<a-form-item>
<tooltip-label slot="label" :title="$t('label.cks.cluster.size')" :tooltip="apiParams.size.description"/>
<a-input
v-decorator="['size', {
initialValue: originalSize,
rules: [{
validator: (rule, value, callback) => {
if (value && (isNaN(value) || value <= 0)) {
callback(this.$t('message.error.number'))
}
callback()
}
}]
}]"
:placeholder="apiParams.size.description"/>
</a-form-item>
</span>
<div :span="24" class="action-button">
<a-button @click="closeAction">{{ this.$t('label.cancel') }}</a-button>
<a-button :loading="loading" ref="submit" type="primary" @click="handleSubmit">{{ this.$t('label.ok') }}</a-button>
@ -91,7 +131,11 @@ export default {
serviceOfferingLoading: false,
minCpu: 2,
minMemory: 2048,
loading: false
loading: false,
originalSize: 1,
autoscalingenabled: null,
minsize: null,
maxsize: null
}
},
beforeCreate () {
@ -99,7 +143,16 @@ export default {
this.apiParams = this.$getApiParams('scaleKubernetesCluster')
},
created () {
this.originalSize = !this.isObjectEmpty(this.resource) ? this.resource.size : 1
if (!this.isObjectEmpty(this.resource)) {
this.originalSize = this.resource.size
if (this.apiParams.autoscalingenabled) {
this.autoscalingenabled = this.resource.autoscalingenabled ? true : null
this.minsize = this.resource.minsize
this.maxsize = this.resource.maxsize
}
}
},
mounted () {
this.fetchData()
},
methods: {
@ -169,12 +222,21 @@ export default {
const params = {
id: this.resource.id
}
if (this.autoscalingenabled != null) {
params.autoscalingenabled = this.autoscalingenabled
}
if (this.isValidValueForKey(values, 'size') && values.size > 0) {
params.size = values.size
}
if (this.isValidValueForKey(values, 'serviceofferingid') && this.arrayHasItems(this.serviceOfferings)) {
if (this.isValidValueForKey(values, 'serviceofferingid') && this.arrayHasItems(this.serviceOfferings) && this.autoscalingenabled == null) {
params.serviceofferingid = this.serviceOfferings[values.serviceofferingid].id
}
if (this.isValidValueForKey(values, 'minsize')) {
params.minsize = values.minsize
}
if (this.isValidValueForKey(values, 'maxsize')) {
params.maxsize = values.maxsize
}
api('scaleKubernetesCluster', params).then(json => {
const jobId = json.scalekubernetesclusterresponse.jobid
this.$pollJob({

View File

@ -66,6 +66,10 @@ export default {
name: 'details',
component: () => import('@/components/view/DetailsTab.vue')
}],
defaultTabs: [{
name: 'details',
component: () => import('@/components/view/DetailsTab.vue')
}],
activeTab: ''
}
},
@ -100,38 +104,63 @@ export default {
}
this.loading = true
this.portFWRuleCount = await this.fetchPortFWRule()
// disable load balancing rules only if port forwarding is enabled and
// network belongs to VPC
if (this.portFWRuleCount > 0 && this.resource.vpcid) {
this.tabs = this.$route.meta.tabs.filter(tab => tab.name !== 'loadbalancing')
} else {
this.loadBalancerRuleCount = await this.fetchLoadBalancerRule()
// for isolated networks, display both LB and PF
// for VPC they are mutually exclusive
if (this.loadBalancerRuleCount > 0) {
this.tabs =
this.resource.vpcid ? this.$route.meta.tabs.filter(tab => tab.name !== 'portforwarding') : this.$route.meta.tabs
this.loading = false
} else {
this.tabs = this.$route.meta.tabs
}
}
await this.filterTabs()
await this.fetchAction()
this.loading = false
},
fetchAction () {
this.actions = []
if (this.$route.meta.actions) {
this.actions = this.$route.meta.actions
async filterTabs () {
// VPC IPs with source nat have only VPN
if (this.resource && this.resource.vpcid && this.resource.issourcenat) {
this.tabs = this.defaultTabs.concat(this.$route.meta.tabs.filter(tab => tab.name === 'vpn'))
return
}
// VPC IPs with vpnenabled have only VPN
if (this.resource && this.resource.vpcid && this.resource.vpnenabled) {
this.tabs = this.defaultTabs.concat(this.$route.meta.tabs.filter(tab => tab.name === 'vpn'))
return
}
// VPC IPs with static nat have nothing
if (this.resource && this.resource.vpcid && this.resource.isstaticnat) {
return
}
if (this.resource && this.resource.vpcid) {
// VPC IPs don't have firewall
let tabs = this.$route.meta.tabs.filter(tab => tab.name !== 'firewall')
this.portFWRuleCount = await this.fetchPortFWRule()
this.loadBalancerRuleCount = await this.fetchLoadBalancerRule()
// VPC IPs with PF only have PF
if (this.portFWRuleCount > 0) {
tabs = this.defaultTabs.concat(this.$route.meta.tabs.filter(tab => tab.name === 'portforwarding'))
}
// VPC IPs with LB rules only have LB
if (this.loadBalancerRuleCount > 0) {
tabs = this.defaultTabs.concat(this.$route.meta.tabs.filter(tab => tab.name === 'loadbalancing'))
}
this.tabs = tabs
return
}
if (this.portFWRuleCount > 0 || this.loadBalancerRuleCount > 0) {
this.actions = this.actions.filter(action => action.api !== 'enableStaticNat')
// Regular guest networks with Source Nat have everything
if (this.resource && !this.resource.vpcid && this.resource.issourcenat) {
this.tabs = this.$route.meta.tabs
return
}
// Regular guest networks with Static Nat only have Firewall
if (this.resource && !this.resource.vpcid && this.resource.isstaticnat) {
this.tabs = this.defaultTabs.concat(this.$route.meta.tabs.filter(tab => tab.name === 'firewall'))
return
}
// Regular guest networks have all tabs
if (this.resource && !this.resource.vpcid) {
this.tabs = this.$route.meta.tabs
}
},
fetchAction () {
this.actions = this.$route.meta.actions || []
},
fetchPortFWRule () {
return new Promise((resolve, reject) => {