Merge pull request #322 from shapeblue/4.18-migration-issue320

Issue-320: Backport and changes to help with 4.18 migration
This commit is contained in:
Rohit Yadav 2023-10-10 13:15:33 +05:30 committed by GitHub
commit a54f37890c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
111 changed files with 2660 additions and 519 deletions

View File

@ -279,6 +279,11 @@ hypervisor.type=kvm
# If this parameter is used, property host.overcommit.mem.mb must be set to 0.
#host.reserved.mem.mb=1024
# Number of CPU cores to subtract from advertised available cores.
# These are reserved for system activity, or otherwise share host CPU resources with
# CloudStack VM allocation.
#host.reserved.cpu.count = 0
# The model of Watchdog timer to present to the Guest.
# For all models refer to the libvirt documentation.
#vm.watchdog.model=i6300esb
@ -398,3 +403,7 @@ iscsi.session.cleanup.enabled=false
# The number of iothreads. There should be only 1 or 2 IOThreads per VM CPU (default is 1). The recommended number of iothreads is 1
# iothreads=1
# The path of an executable file/script for host health check for CloudStack to Auto Disable/Enable the host
# depending on the return value of the file/script
# agent.health.check.script.path=

View File

@ -40,6 +40,8 @@ import java.util.concurrent.atomic.AtomicInteger;
import javax.naming.ConfigurationException;
import com.cloud.resource.AgentStatusUpdater;
import com.cloud.resource.ResourceStatusUpdater;
import com.cloud.utils.NumbersUtil;
import org.apache.cloudstack.agent.lb.SetupMSListAnswer;
import org.apache.cloudstack.agent.lb.SetupMSListCommand;
@ -100,7 +102,7 @@ import com.cloud.utils.script.Script;
* For more configuration options, see the individual types.
*
**/
public class Agent implements HandlerFactory, IAgentControl {
public class Agent implements HandlerFactory, IAgentControl, AgentStatusUpdater {
protected static Logger s_logger = Logger.getLogger(Agent.class);
public enum ExitStatus {
@ -409,6 +411,20 @@ public class Agent implements HandlerFactory, IAgentControl {
}
}
public void triggerUpdate() {
PingCommand command = _resource.getCurrentStatus(getId());
command.setOutOfBand(true);
s_logger.debug("Sending out of band ping");
final Request request = new Request(_id, -1, command, false);
request.setSequence(getNextSequence());
try {
_link.send(request.toBytes());
} catch (final ClosedChannelException e) {
s_logger.warn("Unable to send ping update: " + request.toString());
}
}
protected void cancelTasks() {
synchronized (_watchList) {
for (final WatchTask task : _watchList) {
@ -461,6 +477,10 @@ public class Agent implements HandlerFactory, IAgentControl {
} catch (final ClosedChannelException e) {
s_logger.warn("Unable to send request: " + request.toString());
}
if (_resource instanceof ResourceStatusUpdater) {
((ResourceStatusUpdater) _resource).registerStatusUpdater(this);
}
}
}

View File

@ -312,6 +312,9 @@ public class AgentProperties{
*/
public static final Property<String> OPENVSWITCH_DPDK_OVS_PATH = new Property<>("openvswitch.dpdk.ovs.path", null, String.class);
public static final Property<String> HEALTH_CHECK_SCRIPT_PATH =
new Property<>("agent.health.check.script.path", null, String.class);
/**
* Sets the hypervisor type.<br>
* Possible values: kvm | lxc <br>

View File

@ -27,7 +27,7 @@ public class Hypervisor {
static Map<String, HypervisorType> hypervisorTypeMap;
static Map<HypervisorType, ImageFormat> supportedImageFormatMap;
public static enum HypervisorType {
public enum HypervisorType {
None, //for storage hosts
XenServer,
KVM,
@ -40,6 +40,7 @@ public class Hypervisor {
Ovm,
Ovm3,
LXC,
Custom,
Any; /*If you don't care about the hypervisor type*/
@ -57,6 +58,7 @@ public class Hypervisor {
hypervisorTypeMap.put("lxc", HypervisorType.LXC);
hypervisorTypeMap.put("any", HypervisorType.Any);
hypervisorTypeMap.put("ovm3", HypervisorType.Ovm3);
hypervisorTypeMap.put("custom", HypervisorType.Custom);
supportedImageFormatMap = new HashMap<>();
supportedImageFormatMap.put(HypervisorType.XenServer, ImageFormat.VHD);
@ -68,7 +70,19 @@ public class Hypervisor {
public static HypervisorType getType(String hypervisor) {
return hypervisor == null ? HypervisorType.None :
hypervisorTypeMap.getOrDefault(hypervisor.toLowerCase(Locale.ROOT), HypervisorType.None);
(hypervisor.toLowerCase(Locale.ROOT).equalsIgnoreCase(
HypervisorGuru.HypervisorCustomDisplayName.value()) ? Custom :
hypervisorTypeMap.getOrDefault(hypervisor.toLowerCase(Locale.ROOT), HypervisorType.None));
}
/**
* Returns the display name of a hypervisor type in case the custom hypervisor is used,
* using the 'hypervisor.custom.display.name' setting. Otherwise, returns hypervisor name
*/
public String getHypervisorDisplayName() {
return !Hypervisor.HypervisorType.Custom.equals(this) ?
this.toString() :
HypervisorGuru.HypervisorCustomDisplayName.value();
}
/**

View File

@ -20,6 +20,7 @@ import java.util.List;
import java.util.Map;
import org.apache.cloudstack.backup.Backup;
import org.apache.cloudstack.framework.config.ConfigKey;
import com.cloud.agent.api.Command;
import com.cloud.agent.api.to.NicTO;
@ -35,6 +36,10 @@ import com.cloud.vm.VirtualMachineProfile;
public interface HypervisorGuru extends Adapter {
ConfigKey<String> HypervisorCustomDisplayName = new ConfigKey<>(String.class,
"hypervisor.custom.display.name", ConfigKey.CATEGORY_ADVANCED, "Custom",
"Display name for custom hypervisor", true, ConfigKey.Scope.Global, null);
HypervisorType getHypervisorType();
/**

View File

@ -73,6 +73,7 @@ public interface NetworkModel {
String HYPERVISOR_HOST_NAME_FILE = "hypervisor-host-name";
String CLOUD_DOMAIN_FILE = "cloud-domain";
String CLOUD_DOMAIN_ID_FILE = "cloud-domain-id";
String CLOUD_NAME_FILE = "cloud-name";
int CONFIGDATA_DIR = 0;
int CONFIGDATA_FILE = 1;
int CONFIGDATA_CONTENT = 2;
@ -83,11 +84,12 @@ public interface NetworkModel {
.put(PUBLIC_HOSTNAME_FILE, "name")
.put(CLOUD_DOMAIN_FILE, CLOUD_DOMAIN_FILE)
.put(CLOUD_DOMAIN_ID_FILE, CLOUD_DOMAIN_ID_FILE)
.put(CLOUD_NAME_FILE, CLOUD_NAME_FILE)
.put(HYPERVISOR_HOST_NAME_FILE, HYPERVISOR_HOST_NAME_FILE)
.build();
List<String> metadataFileNames = new ArrayList<>(Arrays.asList(SERVICE_OFFERING_FILE, AVAILABILITY_ZONE_FILE, LOCAL_HOSTNAME_FILE, LOCAL_IPV4_FILE, PUBLIC_HOSTNAME_FILE, PUBLIC_IPV4_FILE,
INSTANCE_ID_FILE, VM_ID_FILE, PUBLIC_KEYS_FILE, CLOUD_IDENTIFIER_FILE, HYPERVISOR_HOST_NAME_FILE));
INSTANCE_ID_FILE, VM_ID_FILE, PUBLIC_KEYS_FILE, CLOUD_IDENTIFIER_FILE, CLOUD_NAME_FILE, HYPERVISOR_HOST_NAME_FILE));
static final ConfigKey<Integer> MACIdentifier = new ConfigKey<>("Advanced",Integer.class, "mac.identifier", "0",
"This value will be used while generating the mac addresses for isolated and shared networks. The hexadecimal equivalent value will be present at the 2nd octet of the mac address. Default value is zero (0) which means that the DB id of the zone will be used.", true, ConfigKey.Scope.Zone);

View File

@ -49,6 +49,8 @@ public interface ResourceService {
*/
Host updateHost(UpdateHostCmd cmd) throws NoTransitionException;
Host autoUpdateHostAllocationState(Long hostId, ResourceState.Event resourceEvent) throws NoTransitionException;
Host cancelMaintenance(CancelMaintenanceCmd cmd);
Host reconnectHost(ReconnectHostCmd cmd) throws AgentUnavailableException;

View File

@ -1020,6 +1020,7 @@ public class ApiConstants {
public static final String PUBLIC_MTU = "publicmtu";
public static final String PRIVATE_MTU = "privatemtu";
public static final String MTU = "mtu";
public static final String AUTO_ENABLE_KVM_HOST = "autoenablekvmhost";
public static final String LIST_APIS = "listApis";
/**

View File

@ -19,7 +19,6 @@ package org.apache.cloudstack.api.command.admin.host;
import com.cloud.host.Host;
import com.cloud.user.Account;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.ApiErrorCode;
@ -117,9 +116,6 @@ public class UpdateHostCmd extends BaseCmd {
Host result;
try {
result = _resourceService.updateHost(this);
if(getAnnotation() != null) {
annotationService.addAnnotation(getAnnotation(), AnnotationService.EntityType.HOST, result.getUuid(), true);
}
HostResponse hostResponse = _responseGenerator.createHostResponse(result);
hostResponse.setResponseName(getCommandName());
this.setResponseObject(hostResponse);

View File

@ -59,6 +59,7 @@ public class ListCapabilitiesCmd extends BaseCmd {
response.setAllowUserViewAllDomainAccounts((Boolean)capabilities.get("allowUserViewAllDomainAccounts"));
response.setKubernetesServiceEnabled((Boolean)capabilities.get("kubernetesServiceEnabled"));
response.setKubernetesClusterExperimentalFeaturesEnabled((Boolean)capabilities.get("kubernetesClusterExperimentalFeaturesEnabled"));
response.setCustomHypervisorDisplayName((String) capabilities.get("customHypervisorDisplayName"));
if (capabilities.containsKey("apiLimitInterval")) {
response.setApiLimitInterval((Integer)capabilities.get("apiLimitInterval"));
}

View File

@ -23,6 +23,7 @@ import java.util.Collection;
import java.util.List;
import java.util.Map;
import com.cloud.hypervisor.HypervisorGuru;
import org.apache.cloudstack.api.APICommand;
import org.apache.cloudstack.api.ApiCommandResourceType;
import org.apache.cloudstack.api.ApiConstants;
@ -342,9 +343,11 @@ public class RegisterTemplateCmd extends BaseCmd implements UserCmd {
throw new ServerApiException(ApiErrorCode.PARAM_ERROR,
"Parameter zoneids cannot combine all zones (-1) option with other zones");
if (isDirectDownload() && !getHypervisor().equalsIgnoreCase(Hypervisor.HypervisorType.KVM.toString())) {
throw new ServerApiException(ApiErrorCode.PARAM_ERROR,
"Parameter directdownload is only allowed for KVM templates");
String customHypervisor = HypervisorGuru.HypervisorCustomDisplayName.value();
if (isDirectDownload() && !(getHypervisor().equalsIgnoreCase(Hypervisor.HypervisorType.KVM.toString())
|| getHypervisor().equalsIgnoreCase(customHypervisor))) {
throw new ServerApiException(ApiErrorCode.PARAM_ERROR, String.format("Parameter directdownload " +
"is only allowed for KVM or %s templates", customHypervisor));
}
if (!isDeployAsIs() && osTypeId == null) {

View File

@ -100,6 +100,10 @@ public class CapabilitiesResponse extends BaseResponse {
@Param(description = "true if experimental features for Kubernetes cluster such as Docker private registry are enabled, false otherwise")
private boolean kubernetesClusterExperimentalFeaturesEnabled;
@SerializedName("customhypervisordisplayname")
@Param(description = "Display name for custom hypervisor", since = "4.19.0")
private String customHypervisorDisplayName;
@SerializedName("defaultuipagesize")
@Param(description = "default page size in the UI for various views, value set in the configurations", since = "4.15.2")
private Long defaultUiPageSize;
@ -215,4 +219,8 @@ public class CapabilitiesResponse extends BaseResponse {
public void setInstancesDisksStatsRetentionTime(Integer instancesDisksStatsRetentionTime) {
this.instancesDisksStatsRetentionTime = instancesDisksStatsRetentionTime;
}
public void setCustomHypervisorDisplayName(String customHypervisorDisplayName) {
this.customHypervisorDisplayName = customHypervisorDisplayName;
}
}

View File

@ -24,7 +24,6 @@ import org.apache.cloudstack.api.EntityReference;
import com.cloud.host.Host;
import com.cloud.host.Status;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
@ -84,7 +83,7 @@ public class HostForMigrationResponse extends BaseResponse {
@SerializedName(ApiConstants.HYPERVISOR)
@Param(description = "the host hypervisor")
private HypervisorType hypervisor;
private String hypervisor;
@SerializedName("cpunumber")
@Param(description = "the CPU number of the host")
@ -295,7 +294,7 @@ public class HostForMigrationResponse extends BaseResponse {
this.version = version;
}
public void setHypervisor(HypervisorType hypervisor) {
public void setHypervisor(String hypervisor) {
this.hypervisor = hypervisor;
}

View File

@ -29,7 +29,6 @@ import org.apache.cloudstack.outofbandmanagement.OutOfBandManagement;
import com.cloud.host.Host;
import com.cloud.host.Status;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
@ -89,7 +88,7 @@ public class HostResponse extends BaseResponseWithAnnotations {
@SerializedName(ApiConstants.HYPERVISOR)
@Param(description = "the host hypervisor")
private HypervisorType hypervisor;
private String hypervisor;
@SerializedName("cpusockets")
@Param(description = "the number of CPU sockets on the host")
@ -335,7 +334,7 @@ public class HostResponse extends BaseResponseWithAnnotations {
this.version = version;
}
public void setHypervisor(HypervisorType hypervisor) {
public void setHypervisor(String hypervisor) {
this.hypervisor = hypervisor;
}
@ -602,7 +601,7 @@ public class HostResponse extends BaseResponseWithAnnotations {
return version;
}
public HypervisorType getHypervisor() {
public String getHypervisor() {
return hypervisor;
}

View File

@ -20,7 +20,6 @@ import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseResponse;
import org.apache.cloudstack.api.EntityReference;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.hypervisor.HypervisorCapabilities;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
@ -37,7 +36,7 @@ public class HypervisorCapabilitiesResponse extends BaseResponse {
@SerializedName(ApiConstants.HYPERVISOR)
@Param(description = "the hypervisor type")
private HypervisorType hypervisor;
private String hypervisor;
@SerializedName(ApiConstants.MAX_GUESTS_LIMIT)
@Param(description = "the maximum number of guest vms recommended for this hypervisor")
@ -83,11 +82,11 @@ public class HypervisorCapabilitiesResponse extends BaseResponse {
this.hypervisorVersion = hypervisorVersion;
}
public HypervisorType getHypervisor() {
public String getHypervisor() {
return hypervisor;
}
public void setHypervisor(HypervisorType hypervisor) {
public void setHypervisor(String hypervisor) {
this.hypervisor = hypervisor;
}

View File

@ -25,7 +25,6 @@ import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseResponseWithTagInformation;
import org.apache.cloudstack.api.EntityReference;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.serializer.Param;
import com.cloud.vm.snapshot.VMSnapshot;
import com.google.gson.annotations.SerializedName;
@ -111,7 +110,7 @@ public class VMSnapshotResponse extends BaseResponseWithTagInformation implement
@SerializedName(ApiConstants.HYPERVISOR)
@Param(description = "the type of hypervisor on which snapshot is stored")
private Hypervisor.HypervisorType hypervisor;
private String hypervisor;
public VMSnapshotResponse() {
tags = new LinkedHashSet<ResourceTagResponse>();
@ -266,11 +265,11 @@ public class VMSnapshotResponse extends BaseResponseWithTagInformation implement
this.tags = tags;
}
public Hypervisor.HypervisorType getHypervisor() {
public String getHypervisor() {
return hypervisor;
}
public void setHypervisor(Hypervisor.HypervisorType hypervisor) {
public void setHypervisor(String hypervisor) {
this.hypervisor = hypervisor;
}
}

View File

@ -0,0 +1,27 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.userdata;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.framework.config.Configurable;
import com.cloud.utils.component.Manager;
public interface UserDataManager extends Manager, Configurable {
String concatenateUserData(String userdata1, String userdata2, String userdataProvider);
String validateUserData(String userData, BaseCmd.HTTPMethod httpmethod);
}

View File

@ -352,6 +352,16 @@
<artifactId>cloud-plugin-outofbandmanagement-driver-redfish</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-userdata-cloud-init</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-userdata</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-mom-rabbitmq</artifactId>

View File

@ -45,6 +45,7 @@ import org.eclipse.jetty.server.handler.MovedContextHandler;
import org.eclipse.jetty.server.handler.RequestLogHandler;
import org.eclipse.jetty.server.handler.gzip.GzipHandler;
import org.eclipse.jetty.server.session.SessionHandler;
import org.eclipse.jetty.util.ssl.KeyStoreScanner;
import org.eclipse.jetty.util.ssl.SslContextFactory;
import org.eclipse.jetty.util.thread.QueuedThreadPool;
import org.eclipse.jetty.util.thread.ScheduledExecutorScheduler;
@ -241,6 +242,14 @@ public class ServerDaemon implements Daemon {
sslConnector.setPort(httpsPort);
sslConnector.setHost(bindInterface);
server.addConnector(sslConnector);
// add scanner to auto-reload certs
try {
KeyStoreScanner scanner = new KeyStoreScanner(sslContextFactory);
server.addBean(scanner);
} catch (Exception ex) {
LOG.error("failed to set up keystore scanner, manual refresh of certificates will be required", ex);
}
}
}

View File

@ -24,6 +24,7 @@ import com.cloud.host.Host;
public class PingCommand extends Command {
Host.Type hostType;
long hostId;
boolean outOfBand;
protected PingCommand() {
}
@ -33,6 +34,12 @@ public class PingCommand extends Command {
hostId = id;
}
public PingCommand(Host.Type type, long id, boolean oob) {
hostType = type;
hostId = id;
outOfBand = oob;
}
public Host.Type getHostType() {
return hostType;
}
@ -41,6 +48,10 @@ public class PingCommand extends Command {
return hostId;
}
public boolean getOutOfBand() { return outOfBand; }
public void setOutOfBand(boolean oob) { this.outOfBand = oob; }
@Override
public boolean executeInSequence() {
return false;

View File

@ -29,6 +29,7 @@ public class PingRoutingCommand extends PingCommand {
boolean _gatewayAccessible = true;
boolean _vnetAccessible = true;
private Boolean hostHealthCheckResult;
protected PingRoutingCommand() {
}
@ -57,4 +58,12 @@ public class PingRoutingCommand extends PingCommand {
public void setVnetAccessible(boolean vnetAccessible) {
_vnetAccessible = vnetAccessible;
}
public Boolean getHostHealthCheckResult() {
return hostHealthCheckResult;
}
public void setHostHealthCheckResult(Boolean hostHealthCheckResult) {
this.hostHealthCheckResult = hostHealthCheckResult;
}
}

View File

@ -44,6 +44,7 @@ public class StartupRoutingCommand extends StartupCommand {
List<String> hostTags = new ArrayList<String>();
String hypervisorVersion;
HashMap<String, HashMap<String, VgpuTypesInfo>> groupDetails = new HashMap<String, HashMap<String, VgpuTypesInfo>>();
private Boolean hostHealthCheckResult;
public StartupRoutingCommand() {
super(Host.Type.Routing);
@ -188,4 +189,12 @@ public class StartupRoutingCommand extends StartupCommand {
public void setSupportsClonedVolumes(boolean supportsClonedVolumes) {
this.supportsClonedVolumes = supportsClonedVolumes;
}
public Boolean getHostHealthCheckResult() {
return hostHealthCheckResult;
}
public void setHostHealthCheckResult(Boolean hostHealthCheckResult) {
this.hostHealthCheckResult = hostHealthCheckResult;
}
}

View File

@ -0,0 +1,27 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.resource;
/**
* AgentStatusUpdater is an agent with triggerable update functionality
*/
public interface AgentStatusUpdater {
/**
* Trigger the sending of an update (Ping).
*/
void triggerUpdate();
}

View File

@ -0,0 +1,29 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.resource;
/**
* ResourceStatusUpdater is a resource that can trigger out of band status updates
*/
public interface ResourceStatusUpdater {
/**
* Register an AgentStatusUpdater to use for triggering out of band updates.
*
* @param updater The object to call triggerUpdate() on
*/
void registerStatusUpdater(AgentStatusUpdater updater);
}

View File

@ -25,6 +25,9 @@ public class CheckUrlCommand extends Command {
private String format;
private String url;
private Integer connectTimeout;
private Integer connectionRequestTimeout;
private Integer socketTimeout;
public String getFormat() {
return format;
@ -34,12 +37,27 @@ public class CheckUrlCommand extends Command {
return url;
}
public Integer getConnectTimeout() { return connectTimeout; }
public Integer getConnectionRequestTimeout() { return connectionRequestTimeout; }
public Integer getSocketTimeout() { return socketTimeout; }
public CheckUrlCommand(final String format,final String url) {
super();
this.format = format;
this.url = url;
}
public CheckUrlCommand(final String format,final String url, Integer connectTimeout, Integer connectionRequestTimeout, Integer socketTimeout) {
super();
this.format = format;
this.url = url;
this.connectTimeout = connectTimeout;
this.socketTimeout = socketTimeout;
this.connectionRequestTimeout = connectionRequestTimeout;
}
@Override
public boolean executeInSequence() {
return false;

View File

@ -54,7 +54,7 @@ public class DirectDownloadHelper {
public static boolean checkUrlExistence(String url) {
try {
DirectTemplateDownloader checker = getCheckerDownloader(url);
DirectTemplateDownloader checker = getCheckerDownloader(url, null, null, null);
return checker.checkUrl(url);
} catch (CloudRuntimeException e) {
LOGGER.error(String.format("Cannot check URL %s is reachable due to: %s", url, e.getMessage()), e);
@ -62,22 +62,37 @@ public class DirectDownloadHelper {
}
}
private static DirectTemplateDownloader getCheckerDownloader(String url) {
public static boolean checkUrlExistence(String url, Integer connectTimeout, Integer connectionRequestTimeout, Integer socketTimeout) {
try {
DirectTemplateDownloader checker = getCheckerDownloader(url, connectTimeout, connectionRequestTimeout, socketTimeout);
return checker.checkUrl(url);
} catch (CloudRuntimeException e) {
LOGGER.error(String.format("Cannot check URL %s is reachable due to: %s", url, e.getMessage()), e);
return false;
}
}
private static DirectTemplateDownloader getCheckerDownloader(String url, Integer connectTimeout, Integer connectionRequestTimeout, Integer socketTimeout) {
if (url.toLowerCase().startsWith("https:")) {
return new HttpsDirectTemplateDownloader(url);
return new HttpsDirectTemplateDownloader(url, connectTimeout, connectionRequestTimeout, socketTimeout);
} else if (url.toLowerCase().startsWith("http:")) {
return new HttpDirectTemplateDownloader(url);
return new HttpDirectTemplateDownloader(url, connectTimeout, socketTimeout);
} else if (url.toLowerCase().startsWith("nfs:")) {
return new NfsDirectTemplateDownloader(url);
} else if (url.toLowerCase().endsWith(".metalink")) {
return new MetalinkDirectTemplateDownloader(url);
return new MetalinkDirectTemplateDownloader(url, connectTimeout, socketTimeout);
} else {
throw new CloudRuntimeException(String.format("Cannot find a download checker for url: %s", url));
}
}
public static Long getFileSize(String url, String format) {
DirectTemplateDownloader checker = getCheckerDownloader(url);
DirectTemplateDownloader checker = getCheckerDownloader(url, null, null, null);
return checker.getRemoteFileSize(url, format);
}
public static Long getFileSize(String url, String format, Integer connectTimeout, Integer connectionRequestTimeout, Integer socketTimeout) {
DirectTemplateDownloader checker = getCheckerDownloader(url, connectTimeout, connectionRequestTimeout, socketTimeout);
return checker.getRemoteFileSize(url, format);
}
}

View File

@ -50,8 +50,8 @@ public class HttpDirectTemplateDownloader extends DirectTemplateDownloaderImpl {
protected GetMethod request;
protected Map<String, String> reqHeaders = new HashMap<>();
protected HttpDirectTemplateDownloader(String url) {
this(url, null, null, null, null, null, null, null);
protected HttpDirectTemplateDownloader(String url, Integer connectTimeout, Integer socketTimeout) {
this(url, null, null, null, null, connectTimeout, socketTimeout, null);
}
public HttpDirectTemplateDownloader(String url, Long templateId, String destPoolPath, String checksum,

View File

@ -65,8 +65,8 @@ public class HttpsDirectTemplateDownloader extends DirectTemplateDownloaderImpl
protected CloseableHttpClient httpsClient;
private HttpUriRequest req;
protected HttpsDirectTemplateDownloader(String url) {
this(url, null, null, null, null, null, null, null, null);
protected HttpsDirectTemplateDownloader(String url, Integer connectTimeout, Integer connectionRequestTimeout, Integer socketTimeout) {
this(url, null, null, null, null, connectTimeout, socketTimeout, connectionRequestTimeout, null);
}
public HttpsDirectTemplateDownloader(String url, Long templateId, String destPoolPath, String checksum, Map<String, String> headers,

View File

@ -60,8 +60,8 @@ public class MetalinkDirectTemplateDownloader extends DirectTemplateDownloaderIm
}
}
protected MetalinkDirectTemplateDownloader(String url) {
this(url, null, null, null, null, null, null, null);
protected MetalinkDirectTemplateDownloader(String url, Integer connectTimeout, Integer socketTimeout) {
this(url, null, null, null, null, connectTimeout, socketTimeout, null);
}
public MetalinkDirectTemplateDownloader(String url, String destPoolPath, Long templateId, String checksum,

View File

@ -39,5 +39,10 @@
<property name="typeClass"
value="com.cloud.utils.component.PluggableService" />
</bean>
<bean class="org.apache.cloudstack.spring.lifecycle.registry.RegistryLifecycle">
<property name="registry" ref="userDataProvidersRegistry" />
<property name="typeClass" value="org.apache.cloudstack.userdata.UserDataProvider" />
</bean>
</beans>

View File

@ -342,4 +342,8 @@
<bean id="kubernetesClusterHelperRegistry"
class="org.apache.cloudstack.spring.lifecycle.registry.ExtensionRegistry">
</bean>
<bean id="userDataProvidersRegistry"
class="org.apache.cloudstack.spring.lifecycle.registry.ExtensionRegistry">
</bean>
</beans>

View File

@ -56,7 +56,7 @@ public class BaseDirectTemplateDownloaderTest {
private HttpEntity httpEntity;
@InjectMocks
protected HttpsDirectTemplateDownloader httpsDownloader = new HttpsDirectTemplateDownloader(httpUrl);
protected HttpsDirectTemplateDownloader httpsDownloader = new HttpsDirectTemplateDownloader(httpUrl, 1000, 1000, 1000);
@Before
public void init() throws IOException {

View File

@ -25,7 +25,8 @@ import org.mockito.InjectMocks;
public class MetalinkDirectTemplateDownloaderTest extends BaseDirectTemplateDownloaderTest {
@InjectMocks
protected MetalinkDirectTemplateDownloader metalinkDownloader = new MetalinkDirectTemplateDownloader(httpsUrl);
protected MetalinkDirectTemplateDownloader metalinkDownloader = new MetalinkDirectTemplateDownloader(httpsUrl, 1000, 1000);
@Test
public void testCheckUrlMetalink() {
metalinkDownloader.downloader = httpsDownloader;

View File

@ -83,6 +83,9 @@ public interface VirtualMachineManager extends Manager {
ConfigKey<Boolean> AllowExposeDomainInMetadata = new ConfigKey<>("Advanced", Boolean.class, "metadata.allow.expose.domain",
"false", "If set to true, it allows the VM's domain to be seen in metadata.", true, ConfigKey.Scope.Domain);
ConfigKey<String> MetadataCustomCloudName = new ConfigKey<>("Advanced", String.class, "metadata.custom.cloud.name", "",
"If provided, a custom cloud-name in cloud-init metadata", true, ConfigKey.Scope.Zone);
interface Topics {
String VM_POWER_STATE = "vm.powerstate";
}

View File

@ -79,7 +79,7 @@ public interface VolumeOrchestrationService {
Long.class,
"storage.max.volume.size",
"2000",
"The maximum size for a volume (in GB).",
"The maximum size for a volume (in GiB).",
true);
VolumeInfo moveVolume(VolumeInfo volume, long destPoolDcId, Long destPoolPodId, Long destPoolClusterId, HypervisorType dataDiskHyperType)

View File

@ -39,6 +39,13 @@ import com.cloud.resource.ServerResource;
public interface AgentManager {
static final ConfigKey<Integer> Wait = new ConfigKey<Integer>("Advanced", Integer.class, "wait", "1800", "Time in seconds to wait for control commands to return",
true);
ConfigKey<Boolean> EnableKVMAutoEnableDisable = new ConfigKey<>(Boolean.class,
"enable.kvm.host.auto.enable.disable",
"Advanced",
"false",
"(KVM only) Enable Auto Disable/Enable KVM hosts in the cluster " +
"according to the hosts health check results",
true, ConfigKey.Scope.Cluster, null);
public enum TapAgentsAction {
Add, Del, Contains,

View File

@ -20,6 +20,7 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.impl.ConfigurationSubGroupVO;
import com.cloud.dc.ClusterVO;
@ -59,6 +60,10 @@ public interface ConfigurationManager {
public static final String MESSAGE_CREATE_VLAN_IP_RANGE_EVENT = "Message.CreateVlanIpRange.Event";
public static final String MESSAGE_DELETE_VLAN_IP_RANGE_EVENT = "Message.DeleteVlanIpRange.Event";
static final String VM_USERDATA_MAX_LENGTH_STRING = "vm.userdata.max.length";
static final ConfigKey<Integer> VM_USERDATA_MAX_LENGTH = new ConfigKey<>("Advanced", Integer.class, VM_USERDATA_MAX_LENGTH_STRING, "32768",
"Max length of vm userdata after base64 decoding. Default is 32768 and maximum is 1048576", true);
/**
* @param offering
* @return

View File

@ -51,6 +51,7 @@ import org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext;
import org.apache.cloudstack.managed.context.ManagedContextRunnable;
import org.apache.cloudstack.outofbandmanagement.dao.OutOfBandManagementDao;
import org.apache.cloudstack.utils.identity.ManagementServerNode;
import org.apache.commons.lang3.BooleanUtils;
import org.apache.log4j.Logger;
import org.apache.log4j.MDC;
@ -1250,6 +1251,52 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
super(type, link, data);
}
private void processHostHealthCheckResult(Boolean hostHealthCheckResult, long hostId) {
if (hostHealthCheckResult == null) {
return;
}
HostVO host = _hostDao.findById(hostId);
if (host == null) {
s_logger.error(String.format("Unable to find host with ID: %s", hostId));
return;
}
if (!BooleanUtils.toBoolean(EnableKVMAutoEnableDisable.valueIn(host.getClusterId()))) {
s_logger.debug(String.format("%s is disabled for the cluster %s, cannot process the health check result " +
"received for the host %s", EnableKVMAutoEnableDisable.key(), host.getClusterId(), host.getName()));
return;
}
ResourceState.Event resourceEvent = hostHealthCheckResult ? ResourceState.Event.Enable : ResourceState.Event.Disable;
try {
s_logger.info(String.format("Host health check %s, auto %s KVM host: %s",
hostHealthCheckResult ? "succeeds" : "fails",
hostHealthCheckResult ? "enabling" : "disabling",
host.getName()));
_resourceMgr.autoUpdateHostAllocationState(hostId, resourceEvent);
} catch (NoTransitionException e) {
s_logger.error(String.format("Cannot Auto %s host: %s", resourceEvent, host.getName()), e);
}
}
private void processStartupRoutingCommand(StartupRoutingCommand startup, long hostId) {
if (startup == null) {
s_logger.error("Empty StartupRoutingCommand received");
return;
}
Boolean hostHealthCheckResult = startup.getHostHealthCheckResult();
processHostHealthCheckResult(hostHealthCheckResult, hostId);
}
private void processPingRoutingCommand(PingRoutingCommand pingRoutingCommand, long hostId) {
if (pingRoutingCommand == null) {
s_logger.error("Empty PingRoutingCommand received");
return;
}
Boolean hostHealthCheckResult = pingRoutingCommand.getHostHealthCheckResult();
processHostHealthCheckResult(hostHealthCheckResult, hostId);
}
protected void processRequest(final Link link, final Request request) {
final AgentAttache attache = (AgentAttache)link.attachment();
final Command[] cmds = request.getCommands();
@ -1291,6 +1338,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
try {
if (cmd instanceof StartupRoutingCommand) {
final StartupRoutingCommand startup = (StartupRoutingCommand) cmd;
processStartupRoutingCommand(startup, hostId);
answer = new StartupAnswer(startup, attache.getId(), mgmtServiceConf.getPingInterval());
} else if (cmd instanceof StartupProxyCommand) {
final StartupProxyCommand startup = (StartupProxyCommand) cmd;
@ -1322,6 +1370,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
// if the router is sending a ping, verify the
// gateway was pingable
if (cmd instanceof PingRoutingCommand) {
processPingRoutingCommand((PingRoutingCommand) cmd, hostId);
final boolean gatewayAccessible = ((PingRoutingCommand)cmd).isGatewayAccessible();
final HostVO host = _hostDao.findById(Long.valueOf(cmdHostId));
@ -1748,8 +1797,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] { CheckTxnBeforeSending, Workers, Port, Wait, AlertWait, DirectAgentLoadSize, DirectAgentPoolSize,
DirectAgentThreadCap };
return new ConfigKey<?>[] { CheckTxnBeforeSending, Workers, Port, Wait, AlertWait, DirectAgentLoadSize,
DirectAgentPoolSize, DirectAgentThreadCap, EnableKVMAutoEnableDisable };
}
protected class SetHostParamsListener implements Listener {

View File

@ -47,6 +47,7 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import javax.persistence.EntityExistsException;
import com.cloud.event.ActionEventUtils;
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
@ -837,8 +838,15 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
public void start(final String vmUuid, final Map<VirtualMachineProfile.Param, Object> params, final DeploymentPlan planToDeploy, final DeploymentPlanner planner) {
try {
advanceStart(vmUuid, params, planToDeploy, planner);
} catch (ConcurrentOperationException | InsufficientCapacityException e) {
throw new CloudRuntimeException(String.format("Unable to start a VM [%s] due to [%s].", vmUuid, e.getMessage()), e).add(VirtualMachine.class, vmUuid);
} catch (final ConcurrentOperationException e) {
throw new CloudRuntimeException("Unable to start a VM due to concurrent operation", e).add(VirtualMachine.class, vmUuid);
} catch (final InsufficientCapacityException e) {
final CallContext cctxt = CallContext.current();
final Account account = cctxt.getCallingAccount();
if (account.getType() == Account.Type.ADMIN) {
throw new CloudRuntimeException("Unable to start a VM due to insufficient capacity: " + e.getMessage(), e).add(VirtualMachine.class, vmUuid);
}
throw new CloudRuntimeException("Unable to start a VM due to insufficient capacity", e).add(VirtualMachine.class, vmUuid);
} catch (final ResourceUnavailableException e) {
if (e.getScope() != null && e.getScope().equals(VirtualRouter.class)){
throw new CloudRuntimeException("Network is unavailable. Please contact administrator", e).add(VirtualMachine.class, vmUuid);
@ -1133,6 +1141,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
resourceCountIncrement(owner.getAccountId(),new Long(offering.getCpu()), new Long(offering.getRamSize()));
}
String adminError = null;
boolean canRetry = true;
ExcludeList avoids = null;
try {
@ -1222,6 +1231,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
reuseVolume = false;
continue;
}
if (account.getType() == Account.Type.ADMIN && adminError != null) {
String message = String.format("Unable to create a deployment for %s. Previous error: %s", vmProfile, adminError);
throw new InsufficientServerCapacityException(message, DataCenter.class, plan.getDataCenterId(), areAffinityGroupsAssociated(vmProfile));
}
throw new InsufficientServerCapacityException("Unable to create a deployment for " + vmProfile, DataCenter.class, plan.getDataCenterId(),
areAffinityGroupsAssociated(vmProfile));
}
@ -1386,7 +1399,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
throw new ExecutionException("Unable to start VM:"+vm.getUuid()+" due to error in finalizeStart, not retrying");
}
}
s_logger.info("Unable to start VM on " + dest.getHost() + " due to " + (startAnswer == null ? " no start answer" : startAnswer.getDetails()));
adminError = startAnswer == null ? " no start answer" : startAnswer.getDetails();
s_logger.info("Unable to start VM on " + dest.getHost() + " due to " + adminError);
if (startAnswer != null && startAnswer.getContextParam("stopRetry") != null) {
break;
}
@ -1399,7 +1414,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
canRetry = false;
throw new AgentUnavailableException("Unable to start " + vm.getHostName(), destHostId, e);
} catch (final ResourceUnavailableException e) {
s_logger.warn("Unable to contact resource.", e);
s_logger.info("Unable to contact resource.", e);
adminError = e.getMessage();
if (!avoids.add(e)) {
if (e.getScope() == Volume.class || e.getScope() == Nic.class) {
throw e;
@ -1455,6 +1471,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
}
if (startedVm == null) {
if (account.getType() == Account.Type.ADMIN && adminError != null) {
throw new CloudRuntimeException("Unable to start instance '" + vm.getHostName() + "' (" + vm.getUuid() + "): " + adminError);
}
throw new CloudRuntimeException("Unable to start instance '" + vm.getHostName() + "' (" + vm.getUuid() + "), see management server log for details");
}
}
@ -3726,7 +3745,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
if (cmd instanceof PingRoutingCommand) {
final PingRoutingCommand ping = (PingRoutingCommand)cmd;
if (ping.getHostVmStateReport() != null) {
_syncMgr.processHostVmStatePingReport(agentId, ping.getHostVmStateReport());
_syncMgr.processHostVmStatePingReport(agentId, ping.getHostVmStateReport(), ping.getOutOfBand());
}
scanStalledVMInTransitionStateOnUpHost(agentId);
@ -4708,7 +4727,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
VmOpLockStateRetry, VmOpWaitInterval, ExecuteInSequence, VmJobCheckInterval, VmJobTimeout, VmJobStateReportInterval,
VmConfigDriveLabel, VmConfigDriveOnPrimaryPool, VmConfigDriveForceHostCacheUse, VmConfigDriveUseHostCacheOnUnsupportedPool,
HaVmRestartHostUp, ResourceCountRunningVMsonly, AllowExposeHypervisorHostname, AllowExposeHypervisorHostnameAccountLevel, SystemVmRootDiskSize,
AllowExposeDomainInMetadata
AllowExposeDomainInMetadata, MetadataCustomCloudName
};
}
@ -4804,6 +4823,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
VM_SYNC_ALERT_SUBJECT, "VM " + vm.getHostName() + "(" + vm.getInstanceName() + ") state is sync-ed (" + vm.getState()
+ " -> Running) from out-of-context transition. VM network environment may need to be reset");
ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, vm.getDomainId(),
EventTypes.EVENT_VM_START, "Out of band VM power on", vm.getId(), ApiCommandResourceType.VirtualMachine.toString());
s_logger.info("VM " + vm.getInstanceName() + " is sync-ed to at Running state according to power-on report from hypervisor");
break;
@ -4837,6 +4858,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
case Stopping:
case Running:
case Stopped:
ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM,vm.getDomainId(),
EventTypes.EVENT_VM_STOP, "Out of band VM power off", vm.getId(), ApiCommandResourceType.VirtualMachine.toString());
case Migrating:
if (s_logger.isInfoEnabled()) {
s_logger.info(

View File

@ -27,7 +27,7 @@ public interface VirtualMachinePowerStateSync {
void processHostVmStateReport(long hostId, Map<String, HostVmStateReportEntry> report);
// to adapt legacy ping report
void processHostVmStatePingReport(long hostId, Map<String, HostVmStateReportEntry> report);
void processHostVmStatePingReport(long hostId, Map<String, HostVmStateReportEntry> report, boolean force);
Map<Long, VirtualMachine.PowerState> convertVmStateReport(Map<String, HostVmStateReportEntry> states);
}

View File

@ -55,19 +55,19 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
s_logger.debug("Process host VM state report. host: " + hostId);
Map<Long, VirtualMachine.PowerState> translatedInfo = convertVmStateReport(report);
processReport(hostId, translatedInfo);
processReport(hostId, translatedInfo, false);
}
@Override
public void processHostVmStatePingReport(long hostId, Map<String, HostVmStateReportEntry> report) {
public void processHostVmStatePingReport(long hostId, Map<String, HostVmStateReportEntry> report, boolean force) {
if (s_logger.isDebugEnabled())
s_logger.debug("Process host VM state report from ping process. host: " + hostId);
Map<Long, VirtualMachine.PowerState> translatedInfo = convertVmStateReport(report);
processReport(hostId, translatedInfo);
processReport(hostId, translatedInfo, force);
}
private void processReport(long hostId, Map<Long, VirtualMachine.PowerState> translatedInfo) {
private void processReport(long hostId, Map<Long, VirtualMachine.PowerState> translatedInfo, boolean force) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Process VM state report. host: " + hostId + ", number of records in report: " + translatedInfo.size());
@ -117,7 +117,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
// Make sure powerState is up to date for missing VMs
try {
if (!_instanceDao.isPowerStateUpToDate(instance.getId())) {
if (!force && !_instanceDao.isPowerStateUpToDate(instance.getId())) {
s_logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM id: " + instance.getId());
_instanceDao.resetVmPowerStateTracking(instance.getId());
continue;
@ -150,7 +150,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime();
if (milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) {
if (force || milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) {
s_logger.debug("vm id: " + instance.getId() + " - time since last state update(" + milliSecondsSinceLastStateUpdate + "ms) has passed graceful period");
// this is were a race condition might have happened if we don't re-fetch the instance;

View File

@ -28,6 +28,7 @@ import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Executors;
@ -3516,7 +3517,12 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
setRestartRequired(network, restartRequired);
return status;
} else if (livePatch) {
List<DomainRouterVO> domainRouters = routerDao.listByNetworkAndRole(network.getId(), VirtualRouter.Role.VIRTUAL_ROUTER, VirtualRouter.Role.INTERNAL_LB_VM);
List<DomainRouterVO> domainRouters;
if (Objects.nonNull(network.getVpcId())) {
domainRouters = routerDao.listByVpcId(network.getVpcId());
} else {
domainRouters = routerDao.listByNetworkAndRole(network.getId(), VirtualRouter.Role.VIRTUAL_ROUTER, VirtualRouter.Role.INTERNAL_LB_VM);
}
for (DomainRouterVO router: domainRouters) {
try {
VMInstanceVO instanceVO = _vmDao.findById(router.getId());

View File

@ -58,6 +58,8 @@
<module>storage/image</module>
<module>storage/snapshot</module>
<module>storage/volume</module>
<module>userdata/cloud-init</module>
<module>userdata</module>
</modules>
<profiles>
<profile>

View File

@ -26,6 +26,7 @@ import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.utils.db.GenericDao;
@Entity
@ -72,7 +73,7 @@ public class GuestOSHypervisorVO implements GuestOSHypervisor {
@Override
public String getHypervisorType() {
return hypervisorType;
return Hypervisor.HypervisorType.getType(hypervisorType).toString();
}
@Override

View File

@ -51,7 +51,7 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'manag
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'secstorage.capacity.standby', '10', 'The minimal number of command execution sessions that system is able to serve immediately(standby capacity)');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'secstorage.cmd.execution.time.max', '30', 'The max command execution time in minute');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'secstorage.session.max', '50', 'The max number of command execution sessions that a SSVM can handle');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Storage', 'DEFAULT', 'management-server', 'storage.max.volume.size', '2000', 'The maximum size for a volume (in GB).');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Storage', 'DEFAULT', 'management-server', 'storage.max.volume.size', '2000', 'The maximum size for a volume (in GiB).');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'task.cleanup.retry.interval', '600', 'Time (in seconds) to wait before retrying cleanup of tasks if the cleanup failed previously. 0 means to never retry.');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vmware.additional.vnc.portrange.start', '50000', 'Start port number of additional VNC port range');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'vmware.percluster.host.max', '8', 'maxmium hosts per vCenter cluster(do not let it grow over 8)');

View File

@ -121,6 +121,11 @@
<property name="name" value="KVM Agent"/>
</bean>
<bean id="CustomServerDiscoverer"
class="com.cloud.hypervisor.discoverer.CustomServerDiscoverer">
<property name="name" value="CustomHW Agent" />
</bean>
<bean id="BareMetalDiscoverer" class="com.cloud.baremetal.BareMetalDiscoverer">
<property name="name" value="Bare Metal Agent"/>
</bean>

View File

@ -0,0 +1,36 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-engine-userdata-cloud-init</artifactId>
<name>Apache CloudStack Engine Cloud-Init Userdata Component</name>
<parent>
<artifactId>cloud-engine</artifactId>
<groupId>org.apache.cloudstack</groupId>
<version>4.18.1.0</version>
<relativePath>../../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-userdata</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
</project>

View File

@ -0,0 +1,286 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.userdata;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.stream.Collectors;
import java.util.zip.GZIPInputStream;
import javax.mail.BodyPart;
import javax.mail.MessagingException;
import javax.mail.Multipart;
import javax.mail.Session;
import javax.mail.internet.MimeBodyPart;
import javax.mail.internet.MimeMessage;
import javax.mail.internet.MimeMultipart;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
import com.cloud.utils.component.AdapterBase;
import com.cloud.utils.exception.CloudRuntimeException;
import com.sun.mail.util.BASE64DecoderStream;
public class CloudInitUserDataProvider extends AdapterBase implements UserDataProvider {
protected enum FormatType {
CLOUD_CONFIG, BASH_SCRIPT, MIME, CLOUD_BOOTHOOK, INCLUDE_FILE
}
private static final String CLOUD_CONFIG_CONTENT_TYPE = "text/cloud-config";
private static final String BASH_SCRIPT_CONTENT_TYPE = "text/x-shellscript";
private static final String INCLUDE_FILE_CONTENT_TYPE = "text/x-include-url";
private static final String CLOUD_BOOTHOOK_CONTENT_TYPE = "text/cloud-boothook";
private static final Map<FormatType, String> formatContentTypeMap = Map.ofEntries(
Map.entry(FormatType.CLOUD_CONFIG, CLOUD_CONFIG_CONTENT_TYPE),
Map.entry(FormatType.BASH_SCRIPT, BASH_SCRIPT_CONTENT_TYPE),
Map.entry(FormatType.CLOUD_BOOTHOOK, CLOUD_BOOTHOOK_CONTENT_TYPE),
Map.entry(FormatType.INCLUDE_FILE, INCLUDE_FILE_CONTENT_TYPE)
);
private static final Logger LOGGER = Logger.getLogger(CloudInitUserDataProvider.class);
private static final Session session = Session.getDefaultInstance(new Properties());
@Override
public String getName() {
return "cloud-init";
}
protected boolean isGZipped(String encodedUserdata) {
if (StringUtils.isEmpty(encodedUserdata)) {
return false;
}
byte[] data = Base64.decodeBase64(encodedUserdata);
if (data.length < 2) {
return false;
}
int magic = data[0] & 0xff | ((data[1] << 8) & 0xff00);
return magic == GZIPInputStream.GZIP_MAGIC;
}
protected String extractUserDataHeader(String userdata) {
List<String> lines = Arrays.stream(userdata.split("\n"))
.filter(x -> (x.startsWith("#") && !x.startsWith("##")) || (x.startsWith("Content-Type:")))
.collect(Collectors.toList());
if (CollectionUtils.isEmpty(lines)) {
throw new CloudRuntimeException("Failed to detect the user data format type as it " +
"does not contain a header");
}
return lines.get(0);
}
protected FormatType mapUserDataHeaderToFormatType(String header) {
if (header.equalsIgnoreCase("#cloud-config")) {
return FormatType.CLOUD_CONFIG;
} else if (header.startsWith("#!")) {
return FormatType.BASH_SCRIPT;
} else if (header.equalsIgnoreCase("#cloud-boothook")) {
return FormatType.CLOUD_BOOTHOOK;
} else if (header.startsWith("#include")) {
return FormatType.INCLUDE_FILE;
} else if (header.startsWith("Content-Type:")) {
return FormatType.MIME;
} else {
String msg = String.format("Cannot recognise the user data format type from the header line: %s." +
"Supported types are: cloud-config, bash script, cloud-boothook, include file or MIME", header);
LOGGER.error(msg);
throw new CloudRuntimeException(msg);
}
}
/**
* Detect the user data type
* Reference: <a href="https://canonical-cloud-init.readthedocs-hosted.com/en/latest/explanation/format.html#user-data-formats" />
*/
protected FormatType getUserDataFormatType(String userdata) {
if (StringUtils.isBlank(userdata)) {
String msg = "User data expected but provided empty user data";
LOGGER.error(msg);
throw new CloudRuntimeException(msg);
}
String header = extractUserDataHeader(userdata);
return mapUserDataHeaderToFormatType(header);
}
private String getContentType(String userData, FormatType formatType) throws MessagingException {
if (formatType == FormatType.MIME) {
NoIdMimeMessage msg = new NoIdMimeMessage(session, new ByteArrayInputStream(userData.getBytes()));
return msg.getContentType();
}
if (!formatContentTypeMap.containsKey(formatType)) {
throw new CloudRuntimeException(String.format("Cannot get the user data content type as " +
"its format type %s is invalid", formatType.name()));
}
return formatContentTypeMap.get(formatType);
}
protected String getBodyPartContentAsString(BodyPart bodyPart) throws MessagingException, IOException {
Object content = bodyPart.getContent();
if (content instanceof BASE64DecoderStream) {
return new String(((BASE64DecoderStream)bodyPart.getContent()).readAllBytes());
} else if (content instanceof ByteArrayInputStream) {
return new String(((ByteArrayInputStream)bodyPart.getContent()).readAllBytes());
} else if (content instanceof String) {
return (String)bodyPart.getContent();
}
throw new CloudRuntimeException(String.format("Failed to get content for multipart data with content type: %s", getBodyPartContentType(bodyPart)));
}
private String getBodyPartContentType(BodyPart bodyPart) throws MessagingException {
String contentType = StringUtils.defaultString(bodyPart.getDataHandler().getContentType(), bodyPart.getContentType());
return contentType.contains(";") ? contentType.substring(0, contentType.indexOf(';')) : contentType;
}
protected MimeBodyPart generateBodyPartMimeMessage(String userData, String contentType) throws MessagingException {
MimeBodyPart bodyPart = new MimeBodyPart();
bodyPart.setContent(userData, contentType);
bodyPart.addHeader("Content-Transfer-Encoding", "base64");
return bodyPart;
}
protected MimeBodyPart generateBodyPartMimeMessage(String userData, FormatType formatType) throws MessagingException {
return generateBodyPartMimeMessage(userData, getContentType(userData, formatType));
}
private Multipart getMessageContent(NoIdMimeMessage message) {
Multipart messageContent;
try {
messageContent = (MimeMultipart) message.getContent();
} catch (IOException | MessagingException e) {
messageContent = new MimeMultipart();
}
return messageContent;
}
private void addBodyPartToMultipart(Multipart existingMultipart, MimeBodyPart bodyPart) throws MessagingException, IOException {
boolean added = false;
final int existingCount = existingMultipart.getCount();
for (int j = 0; j < existingCount; ++j) {
MimeBodyPart existingBodyPart = (MimeBodyPart)existingMultipart.getBodyPart(j);
String existingContentType = getBodyPartContentType(existingBodyPart);
String newContentType = getBodyPartContentType(bodyPart);
if (existingContentType.equals(newContentType)) {
String existingContent = getBodyPartContentAsString(existingBodyPart);
String newContent = getBodyPartContentAsString(bodyPart);
// generating a combined content MimeBodyPart to replace
MimeBodyPart combinedBodyPart = generateBodyPartMimeMessage(
simpleAppendSameFormatTypeUserData(existingContent, newContent), existingContentType);
existingMultipart.removeBodyPart(j);
existingMultipart.addBodyPart(combinedBodyPart, j);
added = true;
break;
}
}
if (!added) {
existingMultipart.addBodyPart(bodyPart);
}
}
private void addBodyPartsToMessageContentFromUserDataContent(Multipart existingMultipart,
NoIdMimeMessage msgFromUserdata) throws MessagingException, IOException {
MimeMultipart newMultipart = (MimeMultipart)msgFromUserdata.getContent();
final int existingCount = existingMultipart.getCount();
final int newCount = newMultipart.getCount();
for (int i = 0; i < newCount; ++i) {
BodyPart bodyPart = newMultipart.getBodyPart(i);
if (existingCount == 0) {
existingMultipart.addBodyPart(bodyPart);
continue;
}
addBodyPartToMultipart(existingMultipart, (MimeBodyPart)bodyPart);
}
}
private NoIdMimeMessage createMultipartMessageAddingUserdata(String userData, FormatType formatType,
NoIdMimeMessage message) throws MessagingException, IOException {
NoIdMimeMessage newMessage = new NoIdMimeMessage(session);
Multipart messageContent = getMessageContent(message);
if (formatType == FormatType.MIME) {
NoIdMimeMessage msgFromUserdata = new NoIdMimeMessage(session, new ByteArrayInputStream(userData.getBytes()));
addBodyPartsToMessageContentFromUserDataContent(messageContent, msgFromUserdata);
} else {
MimeBodyPart part = generateBodyPartMimeMessage(userData, formatType);
addBodyPartToMultipart(messageContent, part);
}
newMessage.setContent(messageContent);
return newMessage;
}
private String simpleAppendSameFormatTypeUserData(String userData1, String userData2) {
return String.format("%s\n\n%s", userData1, userData2.substring(userData2.indexOf('\n')+1));
}
private void checkGzipAppend(String encodedUserData1, String encodedUserData2) {
if (isGZipped(encodedUserData1) || isGZipped(encodedUserData2)) {
throw new CloudRuntimeException("Gzipped user data can not be used together with other user data formats");
}
}
@Override
public String appendUserData(String encodedUserData1, String encodedUserData2) {
try {
checkGzipAppend(encodedUserData1, encodedUserData2);
String userData1 = new String(Base64.decodeBase64(encodedUserData1));
String userData2 = new String(Base64.decodeBase64(encodedUserData2));
FormatType formatType1 = getUserDataFormatType(userData1);
FormatType formatType2 = getUserDataFormatType(userData2);
if (formatType1.equals(formatType2) && List.of(FormatType.CLOUD_CONFIG, FormatType.BASH_SCRIPT).contains(formatType1)) {
return simpleAppendSameFormatTypeUserData(userData1, userData2);
}
NoIdMimeMessage message = new NoIdMimeMessage(session);
message = createMultipartMessageAddingUserdata(userData1, formatType1, message);
message = createMultipartMessageAddingUserdata(userData2, formatType2, message);
ByteArrayOutputStream output = new ByteArrayOutputStream();
message.writeTo(output);
return output.toString();
} catch (MessagingException | IOException | CloudRuntimeException e) {
String msg = String.format("Error attempting to merge user data as a multipart user data. " +
"Reason: %s", e.getMessage());
LOGGER.error(msg, e);
throw new CloudRuntimeException(msg, e);
}
}
/* This is a wrapper class just to remove Message-ID header from the resultant
multipart data which may contain server details.
*/
private class NoIdMimeMessage extends MimeMessage {
NoIdMimeMessage (Session session) {
super(session);
}
NoIdMimeMessage (Session session, InputStream is) throws MessagingException {
super(session, is);
}
@Override
protected void updateMessageID() throws MessagingException {
removeHeader("Message-ID");
}
}
}

View File

@ -0,0 +1,27 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans-3.0.xsd"
>
<bean id="cloudInitUserDataProvider" class="org.apache.cloudstack.userdata.CloudInitUserDataProvider">
<property name="name" value="cloud-init" />
</bean>
</beans>

View File

@ -0,0 +1,206 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.userdata;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Properties;
import java.util.zip.GZIPOutputStream;
import javax.mail.BodyPart;
import javax.mail.MessagingException;
import javax.mail.Session;
import javax.mail.internet.MimeMessage;
import javax.mail.internet.MimeMultipart;
import org.apache.commons.codec.binary.Base64;
import org.junit.Assert;
import org.junit.Test;
import com.cloud.utils.exception.CloudRuntimeException;
public class CloudInitUserDataProviderTest {
private final CloudInitUserDataProvider provider = new CloudInitUserDataProvider();
private final static String CLOUD_CONFIG_USERDATA = "## template: jinja\n" +
"#cloud-config\n" +
"runcmd:\n" +
" - echo 'TestVariable {{ ds.meta_data.variable1 }}' >> /tmp/variable\n" +
" - echo 'Hostname {{ ds.meta_data.public_hostname }}' > /tmp/hostname";
private final static String CLOUD_CONFIG_USERDATA1 = "#cloud-config\n" +
"password: atomic\n" +
"chpasswd: { expire: False }\n" +
"ssh_pwauth: True";
private final static String SHELL_SCRIPT_USERDATA = "#!/bin/bash\n" +
"date > /provisioned";
private final static String SHELL_SCRIPT_USERDATA1 = "#!/bin/bash\n" +
"mkdir /tmp/test";
private final static String SINGLE_BODYPART_CLOUDCONFIG_MULTIPART_USERDATA =
"Content-Type: multipart/mixed; boundary=\"//\"\n" +
"MIME-Version: 1.0\n" +
"\n" +
"--//\n" +
"Content-Type: text/cloud-config; charset=\"us-ascii\"\n" +
"MIME-Version: 1.0\n" +
"Content-Transfer-Encoding: 7bit\n" +
"Content-Disposition: attachment; filename=\"cloud-config.txt\"\n" +
"\n" +
"#cloud-config\n" +
"\n" +
"# Upgrade the instance on first boot\n" +
"# (ie run apt-get upgrade)\n" +
"#\n" +
"# Default: false\n" +
"# Aliases: apt_upgrade\n" +
"package_upgrade: true";
private static final Session session = Session.getDefaultInstance(new Properties());
@Test
public void testGetUserDataFormatType() {
CloudInitUserDataProvider.FormatType type = provider.getUserDataFormatType(CLOUD_CONFIG_USERDATA);
Assert.assertEquals(CloudInitUserDataProvider.FormatType.CLOUD_CONFIG, type);
}
@Test(expected = CloudRuntimeException.class)
public void testGetUserDataFormatTypeNoHeader() {
String userdata = "password: password\nchpasswd: { expire: False }\nssh_pwauth: True";
provider.getUserDataFormatType(userdata);
}
@Test(expected = CloudRuntimeException.class)
public void testGetUserDataFormatTypeInvalidType() {
String userdata = "#invalid-type\n" +
"password: password\nchpasswd: { expire: False }\nssh_pwauth: True";
provider.getUserDataFormatType(userdata);
}
private MimeMultipart getCheckedMultipartFromMultipartData(String multipartUserData, int count) {
MimeMultipart multipart = null;
Assert.assertTrue(multipartUserData.contains("Content-Type: multipart"));
try {
MimeMessage msgFromUserdata = new MimeMessage(session,
new ByteArrayInputStream(multipartUserData.getBytes()));
multipart = (MimeMultipart)msgFromUserdata.getContent();
Assert.assertEquals(count, multipart.getCount());
} catch (MessagingException | IOException e) {
Assert.fail(String.format("Failed with exception, %s", e.getMessage()));
}
return multipart;
}
@Test
public void testAppendUserData() {
String multipartUserData = provider.appendUserData(Base64.encodeBase64String(CLOUD_CONFIG_USERDATA1.getBytes()),
Base64.encodeBase64String(SHELL_SCRIPT_USERDATA.getBytes()));
getCheckedMultipartFromMultipartData(multipartUserData, 2);
}
@Test
public void testAppendSameShellScriptTypeUserData() {
String result = SHELL_SCRIPT_USERDATA + "\n\n" +
SHELL_SCRIPT_USERDATA1.replace("#!/bin/bash\n", "");
String appendUserData = provider.appendUserData(Base64.encodeBase64String(SHELL_SCRIPT_USERDATA.getBytes()),
Base64.encodeBase64String(SHELL_SCRIPT_USERDATA1.getBytes()));
Assert.assertEquals(result, appendUserData);
}
@Test
public void testAppendSameCloudConfigTypeUserData() {
String result = CLOUD_CONFIG_USERDATA + "\n\n" +
CLOUD_CONFIG_USERDATA1.replace("#cloud-config\n", "");
String appendUserData = provider.appendUserData(Base64.encodeBase64String(CLOUD_CONFIG_USERDATA.getBytes()),
Base64.encodeBase64String(CLOUD_CONFIG_USERDATA1.getBytes()));
Assert.assertEquals(result, appendUserData);
}
@Test
public void testAppendUserDataMIMETemplateData() {
String multipartUserData = provider.appendUserData(
Base64.encodeBase64String(SINGLE_BODYPART_CLOUDCONFIG_MULTIPART_USERDATA.getBytes()),
Base64.encodeBase64String(SHELL_SCRIPT_USERDATA.getBytes()));
getCheckedMultipartFromMultipartData(multipartUserData, 2);
}
@Test
public void testAppendUserDataExistingMultipartWithSameType() {
String templateData = provider.appendUserData(Base64.encodeBase64String(CLOUD_CONFIG_USERDATA1.getBytes()),
Base64.encodeBase64String(SHELL_SCRIPT_USERDATA.getBytes()));
String multipartUserData = provider.appendUserData(Base64.encodeBase64String(templateData.getBytes()),
Base64.encodeBase64String(SHELL_SCRIPT_USERDATA1.getBytes()));
String resultantShellScript = SHELL_SCRIPT_USERDATA + "\n\n" +
SHELL_SCRIPT_USERDATA1.replace("#!/bin/bash\n", "");
MimeMultipart mimeMultipart = getCheckedMultipartFromMultipartData(multipartUserData, 2);
try {
for (int i = 0; i < mimeMultipart.getCount(); ++i) {
BodyPart bodyPart = mimeMultipart.getBodyPart(i);
if (bodyPart.getContentType().startsWith("text/x-shellscript")) {
Assert.assertEquals(resultantShellScript, provider.getBodyPartContentAsString(bodyPart));
} else if (bodyPart.getContentType().startsWith("text/cloud-config")) {
Assert.assertEquals(CLOUD_CONFIG_USERDATA1, provider.getBodyPartContentAsString(bodyPart));
}
}
} catch (MessagingException | IOException | CloudRuntimeException e) {
Assert.fail(String.format("Failed with exception, %s", e.getMessage()));
}
}
@Test(expected = CloudRuntimeException.class)
public void testAppendUserDataInvalidUserData() {
String templateData = CLOUD_CONFIG_USERDATA1.replace("#cloud-config\n", "");
provider.appendUserData(Base64.encodeBase64String(templateData.getBytes()),
Base64.encodeBase64String(SHELL_SCRIPT_USERDATA.getBytes()));
}
@Test
public void testIsGzippedUserDataWithCloudConfigData() {
Assert.assertFalse(provider.isGZipped(CLOUD_CONFIG_USERDATA));
}
private String createBase64EncodedGzipDataAsString() throws IOException {
byte[] input = CLOUD_CONFIG_USERDATA.getBytes(StandardCharsets.ISO_8859_1);
ByteArrayOutputStream arrayOutputStream = new ByteArrayOutputStream();
GZIPOutputStream outputStream = new GZIPOutputStream(arrayOutputStream);
outputStream.write(input,0, input.length);
outputStream.close();
return Base64.encodeBase64String(arrayOutputStream.toByteArray());
}
@Test
public void testIsGzippedUserDataWithValidGzipData() {
try {
String gzipped = createBase64EncodedGzipDataAsString();
Assert.assertTrue(provider.isGZipped(gzipped));
} catch (IOException e) {
Assert.fail(e.getMessage());
}
}
@Test(expected = CloudRuntimeException.class)
public void testAppendUserDataWithGzippedData() {
try {
provider.appendUserData(Base64.encodeBase64String(CLOUD_CONFIG_USERDATA.getBytes()),
createBase64EncodedGzipDataAsString());
Assert.fail("Gzipped data shouldn't be appended with other data");
} catch (IOException e) {
Assert.fail("Exception encountered: " + e.getMessage());
}
}
}

53
engine/userdata/pom.xml Normal file
View File

@ -0,0 +1,53 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-engine-userdata</artifactId>
<name>Apache CloudStack Engine Userdata Component</name>
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine</artifactId>
<version>4.18.1.0</version>
<relativePath>../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-utils</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>javax.activation</groupId>
<artifactId>activation</artifactId>
<version>1.1.1</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-components-api</artifactId>
<version>${project.version}</version>
<scope>compile</scope>
</dependency>
</dependencies>
</project>

View File

@ -0,0 +1,138 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.userdata;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.lang3.StringUtils;
import com.cloud.configuration.ConfigurationManager;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.utils.component.ManagerBase;
import com.cloud.utils.exception.CloudRuntimeException;
public class UserDataManagerImpl extends ManagerBase implements UserDataManager {
private static final int MAX_USER_DATA_LENGTH_BYTES = 2048;
private static final int MAX_HTTP_GET_LENGTH = 2 * MAX_USER_DATA_LENGTH_BYTES;
private static final int NUM_OF_2K_BLOCKS = 512;
private static final int MAX_HTTP_POST_LENGTH = NUM_OF_2K_BLOCKS * MAX_USER_DATA_LENGTH_BYTES;
private List<UserDataProvider> userDataProviders;
private static Map<String, UserDataProvider> userDataProvidersMap = new HashMap<>();
public void setUserDataProviders(final List<UserDataProvider> userDataProviders) {
this.userDataProviders = userDataProviders;
}
private void initializeUserdataProvidersMap() {
if (userDataProviders != null) {
for (final UserDataProvider provider : userDataProviders) {
userDataProvidersMap.put(provider.getName().toLowerCase(), provider);
}
}
}
@Override
public boolean start() {
initializeUserdataProvidersMap();
return true;
}
@Override
public String getConfigComponentName() {
return UserDataManagerImpl.class.getSimpleName();
}
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey[] {};
}
protected UserDataProvider getUserdataProvider(String name) {
if (StringUtils.isEmpty(name)) {
// Use cloud-init as the default userdata provider
name = "cloud-init";
}
if (!userDataProvidersMap.containsKey(name)) {
throw new CloudRuntimeException("Failed to find userdata provider by the name: " + name);
}
return userDataProvidersMap.get(name);
}
@Override
public String concatenateUserData(String userdata1, String userdata2, String userdataProvider) {
UserDataProvider provider = getUserdataProvider(userdataProvider);
String appendUserData = provider.appendUserData(userdata1, userdata2);
return Base64.encodeBase64String(appendUserData.getBytes());
}
@Override
public String validateUserData(String userData, BaseCmd.HTTPMethod httpmethod) {
byte[] decodedUserData = null;
if (userData != null) {
if (userData.contains("%")) {
try {
userData = URLDecoder.decode(userData, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new InvalidParameterValueException("Url decoding of userdata failed.");
}
}
if (!Base64.isBase64(userData)) {
throw new InvalidParameterValueException("User data is not base64 encoded");
}
// If GET, use 4K. If POST, support up to 1M.
if (httpmethod.equals(BaseCmd.HTTPMethod.GET)) {
decodedUserData = validateAndDecodeByHTTPMethod(userData, MAX_HTTP_GET_LENGTH, BaseCmd.HTTPMethod.GET);
} else if (httpmethod.equals(BaseCmd.HTTPMethod.POST)) {
decodedUserData = validateAndDecodeByHTTPMethod(userData, MAX_HTTP_POST_LENGTH, BaseCmd.HTTPMethod.POST);
}
if (decodedUserData == null || decodedUserData.length < 1) {
throw new InvalidParameterValueException("User data is too short");
}
// Re-encode so that the '=' paddings are added if necessary since 'isBase64' does not require it, but python does on the VR.
return Base64.encodeBase64String(decodedUserData);
}
return null;
}
private byte[] validateAndDecodeByHTTPMethod(String userData, int maxHTTPLength, BaseCmd.HTTPMethod httpMethod) {
byte[] decodedUserData = null;
if (userData.length() >= maxHTTPLength) {
throw new InvalidParameterValueException(String.format("User data is too long for an http %s request", httpMethod.toString()));
}
if (userData.length() > ConfigurationManager.VM_USERDATA_MAX_LENGTH.value()) {
throw new InvalidParameterValueException("User data has exceeded configurable max length : " + ConfigurationManager.VM_USERDATA_MAX_LENGTH.value());
}
decodedUserData = Base64.decodeBase64(userData.getBytes());
if (decodedUserData.length > maxHTTPLength) {
throw new InvalidParameterValueException(String.format("User data is too long for http %s request", httpMethod.toString()));
}
return decodedUserData;
}
}

View File

@ -0,0 +1,28 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.userdata;
public interface UserDataProvider {
String getName();
/**
* Append user data into a single user data.
* NOTE: userData1 and userData2 are Base64 encoded user data strings
* @return a non-encrypted string containing both user data inputs
*/
String appendUserData(String encodedUserData1, String encodedUserData2);
}

View File

@ -0,0 +1,34 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:context="http://www.springframework.org/schema/context"
xmlns:aop="http://www.springframework.org/schema/aop"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans.xsd
http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context.xsd"
>
<bean id="userDataManager" class="org.apache.cloudstack.userdata.UserDataManagerImpl">
<property name="userDataProviders" value="#{userDataProvidersRegistry.registered}" />
</bean>
</beans>

View File

@ -0,0 +1,59 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.userdata;
import static org.junit.Assert.assertEquals;
import java.nio.charset.StandardCharsets;
import org.apache.cloudstack.api.BaseCmd;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class UserDataManagerImplTest {
@Spy
@InjectMocks
private UserDataManagerImpl userDataManager;
@Test
public void testValidateBase64WithoutPadding() {
// fo should be encoded in base64 either as Zm8 or Zm8=
String encodedUserdata = "Zm8";
String encodedUserdataWithPadding = "Zm8=";
// Verify that we accept both but return the padded version
assertEquals("validate return the value with padding", encodedUserdataWithPadding, userDataManager.validateUserData(encodedUserdata, BaseCmd.HTTPMethod.GET));
assertEquals("validate return the value with padding", encodedUserdataWithPadding, userDataManager.validateUserData(encodedUserdataWithPadding, BaseCmd.HTTPMethod.GET));
}
@Test
public void testValidateUrlEncodedBase64() {
// fo should be encoded in base64 either as Zm8 or Zm8=
String encodedUserdata = "Zm+8/w8=";
String urlEncodedUserdata = java.net.URLEncoder.encode(encodedUserdata, StandardCharsets.UTF_8);
// Verify that we accept both but return the padded version
assertEquals("validate return the value with padding", encodedUserdata, userDataManager.validateUserData(encodedUserdata, BaseCmd.HTTPMethod.GET));
assertEquals("validate return the value with padding", encodedUserdata, userDataManager.validateUserData(urlEncodedUserdata, BaseCmd.HTTPMethod.GET));
}
}

View File

@ -486,6 +486,8 @@ public class EncryptionSecretKeyChanger {
migrateImageStoreUrlForCifs(conn);
migrateStoragePoolPathForSMB(conn);
preparePassphraseTableForMigration(conn);
// migrate columns with annotation @Encrypt
migrateEncryptedTableColumns(conn);
@ -665,6 +667,56 @@ public class EncryptionSecretKeyChanger {
System.out.println("End migrate user vm deploy_as_is details");
}
// encrypt any unencrypted passphrases using old style encryptor before we migrate
private void preparePassphraseTableForMigration(Connection conn) throws SQLException {
System.out.println("Preparing passphrase table by checking for unencrypted passphrases");
try(PreparedStatement selectPstmt = conn.prepareStatement("SELECT id, passphrase FROM passphrase");
ResultSet rs = selectPstmt.executeQuery();
PreparedStatement updatePstmt = conn.prepareStatement("UPDATE passphrase SET passphrase=? WHERE id=?")
) {
while(rs.next()) {
long id = rs.getLong(1);
String value = rs.getString(2);
if (StringUtils.isBlank(value)) {
continue;
}
// passphrases are 64 bytes long when unencrypted, longer when encrypted
if (value.length() == 64) {
// just confirm it won't decrypt, to be safe, before assuming raw value and encrypting
try {
oldEncryptor.decrypt(value);
System.out.printf("Passphrase table entry db id %d was already encrypted with old encryption\n", id);
} catch(EncryptionException | CloudRuntimeException ex) {
String message = null;
if (ex instanceof CloudRuntimeException && ex.getCause() != null) {
if ((ex.getCause() instanceof EncryptionException)) {
message = ex.getCause().getMessage();
}
} else if (ex instanceof EncryptionException) {
message = ex.getMessage();
}
if (message != null && message.contains("Failed to decrypt")) {
System.out.printf("Encrypting unencrypted passphrase table entry db id %d before migration using old encryption\n", id);
String encrypted = oldEncryptor.encrypt(value);
updatePstmt.setBytes(1, encrypted.getBytes(StandardCharsets.UTF_8));
updatePstmt.setLong(2, id);
updatePstmt.executeUpdate();
} else {
throwCloudRuntimeException("Unhandled EncryptionException", ex);
}
}
}
}
} catch (SQLException e) {
throwCloudRuntimeException("Unable to prepare passphrase table", e);
}
System.out.println("End preparing passphrase table");
}
private void migrateImageStoreUrlForCifs(Connection conn) {
System.out.println("Begin migrate image store url if protocol is cifs");

View File

@ -83,6 +83,7 @@ import org.libvirt.DomainInfo;
import org.libvirt.DomainInfo.DomainState;
import org.libvirt.DomainInterfaceStats;
import org.libvirt.DomainSnapshot;
import org.libvirt.Library;
import org.libvirt.LibvirtException;
import org.libvirt.MemoryStatistic;
import org.libvirt.Network;
@ -90,6 +91,9 @@ import org.libvirt.SchedParameter;
import org.libvirt.SchedUlongParameter;
import org.libvirt.Secret;
import org.libvirt.VcpuInfo;
import org.libvirt.event.DomainEvent;
import org.libvirt.event.DomainEventDetail;
import org.libvirt.event.StoppedDetail;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
@ -97,6 +101,7 @@ import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.Command;
import com.cloud.agent.api.HostVmStateReportEntry;
@ -175,6 +180,8 @@ import com.cloud.network.Networks.BroadcastDomainType;
import com.cloud.network.Networks.IsolationType;
import com.cloud.network.Networks.RouterPrivateIpStrategy;
import com.cloud.network.Networks.TrafficType;
import com.cloud.resource.AgentStatusUpdater;
import com.cloud.resource.ResourceStatusUpdater;
import com.cloud.resource.RequestWrapper;
import com.cloud.resource.ServerResource;
import com.cloud.resource.ServerResourceBase;
@ -224,11 +231,12 @@ import com.google.gson.Gson;
* private mac addresses for domrs | mac address | start + 126 || ||
* pool | the parent of the storage pool hierarchy * }
**/
public class LibvirtComputingResource extends ServerResourceBase implements ServerResource, VirtualRouterDeployer {
public class LibvirtComputingResource extends ServerResourceBase implements ServerResource, VirtualRouterDeployer, ResourceStatusUpdater {
protected static Logger s_logger = Logger.getLogger(LibvirtComputingResource.class);
private static final String CONFIG_VALUES_SEPARATOR = ",";
private static final String LEGACY = "legacy";
private static final String SECURE = "secure";
@ -322,6 +330,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
private String _dcId;
private String _clusterId;
private final Properties _uefiProperties = new Properties();
private String hostHealthCheckScriptPath;
private long _hvVersion;
private Duration _timeout;
@ -451,11 +460,14 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
private long _dom0OvercommitMem;
private int _dom0MinCpuCores;
protected int _cmdsTimeout;
protected int _stopTimeout;
protected CPUStat _cpuStat = new CPUStat();
protected MemStat _memStat = new MemStat(_dom0MinMem, _dom0OvercommitMem);
private final LibvirtUtilitiesHelper libvirtUtilitiesHelper = new LibvirtUtilitiesHelper();
private AgentStatusUpdater _agentStatusUpdater;
protected Boolean enableManuallySettingCpuTopologyOnKvmVm = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.ENABLE_MANUALLY_SETTING_CPU_TOPOLOGY_ON_KVM_VM);
@ -480,6 +492,11 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
return _hypervisorQemuVersion;
}
@Override
public void registerStatusUpdater(AgentStatusUpdater updater) {
_agentStatusUpdater = updater;
}
@Override
public ExecutionResult executeInVR(final String routerIp, final String script, final String args) {
return executeInVR(routerIp, script, args, _timeout);
@ -717,6 +734,10 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
NATIVE, OPENVSWITCH, TUNGSTEN
}
protected enum HealthCheckResult {
SUCCESS, FAILURE, IGNORE
}
protected BridgeType _bridgeType;
protected StorageSubsystemCommandHandler storageHandler;
@ -943,6 +964,12 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
throw new ConfigurationException("Unable to find the ovs-pvlan-kvm-vm.sh");
}
hostHealthCheckScriptPath = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.HEALTH_CHECK_SCRIPT_PATH);
if (StringUtils.isNotBlank(hostHealthCheckScriptPath) && !new File(hostHealthCheckScriptPath).exists()) {
s_logger.info(String.format("Unable to find the host health check script at: %s, " +
"discarding it", hostHealthCheckScriptPath));
}
setupTungstenVrouterPath = Script.findScript(tungstenScriptsDir, "setup_tungsten_vrouter.sh");
if (setupTungstenVrouterPath == null) {
throw new ConfigurationException("Unable to find the setup_tungsten_vrouter.sh");
@ -1038,6 +1065,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
// Reserve 1GB unless admin overrides
_dom0MinMem = ByteScaleUtils.mebibytesToBytes(AgentPropertiesFileHandler.getPropertyValue(AgentProperties.HOST_RESERVED_MEM_MB));
value = (String)params.get("host.reserved.cpu.count");
_dom0MinCpuCores = NumbersUtil.parseInt(value, 0);
// Support overcommit memory for host if host uses ZSWAP, KSM and other memory
// compressing technologies
_dom0OvercommitMem = ByteScaleUtils.mebibytesToBytes(AgentPropertiesFileHandler.getPropertyValue(AgentProperties.HOST_OVERCOMMIT_MEM_MB));
@ -3442,13 +3472,54 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
@Override
public PingCommand getCurrentStatus(final long id) {
PingRoutingCommand pingRoutingCommand;
if (!_canBridgeFirewall) {
return new PingRoutingCommand(com.cloud.host.Host.Type.Routing, id, this.getHostVmStateReport());
pingRoutingCommand = new PingRoutingCommand(com.cloud.host.Host.Type.Routing, id, this.getHostVmStateReport());
} else {
final HashMap<String, Pair<Long, Long>> nwGrpStates = syncNetworkGroups(id);
return new PingRoutingWithNwGroupsCommand(getType(), id, this.getHostVmStateReport(), nwGrpStates);
pingRoutingCommand = new PingRoutingWithNwGroupsCommand(getType(), id, this.getHostVmStateReport(), nwGrpStates);
}
HealthCheckResult healthCheckResult = getHostHealthCheckResult();
if (healthCheckResult != HealthCheckResult.IGNORE) {
pingRoutingCommand.setHostHealthCheckResult(healthCheckResult == HealthCheckResult.SUCCESS);
}
return pingRoutingCommand;
}
/**
* The health check result is true, if the script is executed successfully and the exit code is 0
* The health check result is false, if the script is executed successfully and the exit code is 1
* The health check result is null, if
* - Script file is not specified, or
* - Script file does not exist, or
* - Script file is not accessible by the user of the cloudstack-agent process, or
* - Script file is not executable
* - There are errors when the script is executed (exit codes other than 0 or 1)
*/
private HealthCheckResult getHostHealthCheckResult() {
if (StringUtils.isBlank(hostHealthCheckScriptPath)) {
s_logger.debug("Host health check script path is not specified");
return HealthCheckResult.IGNORE;
}
File script = new File(hostHealthCheckScriptPath);
if (!script.exists() || !script.isFile() || !script.canExecute()) {
s_logger.warn(String.format("The host health check script file set at: %s cannot be executed, " +
"reason: %s", hostHealthCheckScriptPath,
!script.exists() ? "file does not exist" : "please check file permissions to execute this file"));
return HealthCheckResult.IGNORE;
}
int exitCode = executeBashScriptAndRetrieveExitValue(hostHealthCheckScriptPath);
if (s_logger.isDebugEnabled()) {
s_logger.debug(String.format("Host health check script exit code: %s", exitCode));
}
return retrieveHealthCheckResultFromExitCode(exitCode);
}
private HealthCheckResult retrieveHealthCheckResultFromExitCode(int exitCode) {
if (exitCode != 0 && exitCode != 1) {
return HealthCheckResult.IGNORE;
}
return exitCode == 0 ? HealthCheckResult.SUCCESS : HealthCheckResult.FAILURE;
}
@Override
@ -3470,7 +3541,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
@Override
public StartupCommand[] initialize() {
final KVMHostInfo info = new KVMHostInfo(_dom0MinMem, _dom0OvercommitMem, _manualCpuSpeed);
final KVMHostInfo info = new KVMHostInfo(_dom0MinMem, _dom0OvercommitMem, _manualCpuSpeed, _dom0MinCpuCores);
String capabilities = String.join(",", info.getCapabilities());
if (dpdkSupport) {
@ -3478,7 +3549,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
final StartupRoutingCommand cmd =
new StartupRoutingCommand(info.getCpus(), info.getCpuSpeed(), info.getTotalMemory(), info.getReservedMemory(), capabilities, _hypervisorType,
new StartupRoutingCommand(info.getAllocatableCpus(), info.getCpuSpeed(), info.getTotalMemory(), info.getReservedMemory(), capabilities, _hypervisorType,
RouterPrivateIpStrategy.HostLocal);
cmd.setCpuSockets(info.getCpuSockets());
fillNetworkInformation(cmd);
@ -3490,6 +3561,10 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
cmd.setGatewayIpAddress(_localGateway);
cmd.setIqn(getIqn());
cmd.getHostDetails().put(HOST_VOLUME_ENCRYPTION, String.valueOf(hostSupportsVolumeEncryption()));
HealthCheckResult healthCheckResult = getHostHealthCheckResult();
if (healthCheckResult != HealthCheckResult.IGNORE) {
cmd.setHostHealthCheckResult(healthCheckResult == HealthCheckResult.SUCCESS);
}
if (cmd.getHostDetails().containsKey("Host.OS")) {
_hostDistro = cmd.getHostDetails().get("Host.OS");
@ -3530,9 +3605,63 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
} catch (final CloudRuntimeException e) {
s_logger.debug("Unable to initialize local storage pool: " + e);
}
setupLibvirtEventListener();
return sscmd;
}
private void setupLibvirtEventListener() {
final Thread libvirtListenerThread = new Thread(() -> {
try {
Library.runEventLoop();
} catch (LibvirtException e) {
s_logger.error("LibvirtException was thrown in event loop: ", e);
} catch (InterruptedException e) {
s_logger.error("Libvirt event loop was interrupted: ", e);
}
});
try {
libvirtListenerThread.setDaemon(true);
libvirtListenerThread.start();
Connect conn = LibvirtConnection.getConnection();
conn.addLifecycleListener(this::onDomainLifecycleChange);
s_logger.debug("Set up the libvirt domain event lifecycle listener");
} catch (LibvirtException e) {
s_logger.error("Failed to get libvirt connection for domain event lifecycle", e);
}
}
private int onDomainLifecycleChange(Domain domain, DomainEvent domainEvent) {
try {
s_logger.debug(String.format("Got event lifecycle change on Domain %s, event %s", domain.getName(), domainEvent));
if (domainEvent != null) {
switch (domainEvent.getType()) {
case STOPPED:
/* libvirt-destroyed VMs have detail StoppedDetail.DESTROYED, self shutdown guests are StoppedDetail.SHUTDOWN
* Checking for this helps us differentiate between events where cloudstack or admin stopped the VM vs guest
* initiated, and avoid pushing extra updates for actions we are initiating without a need for extra tracking */
DomainEventDetail detail = domainEvent.getDetail();
if (StoppedDetail.SHUTDOWN.equals(detail) || StoppedDetail.CRASHED.equals(detail)) {
s_logger.info("Triggering out of band status update due to completed self-shutdown or crash of VM");
_agentStatusUpdater.triggerUpdate();
} else {
s_logger.debug("Event detail: " + detail);
}
break;
default:
s_logger.debug(String.format("No handling for event %s", domainEvent));
}
}
} catch (LibvirtException e) {
s_logger.error("Libvirt exception while processing lifecycle event", e);
} catch (Throwable e) {
s_logger.error("Error during lifecycle", e);
}
return 0;
}
public String diskUuidToSerial(String uuid) {
String uuidWithoutHyphen = uuid.replace("-","");
return uuidWithoutHyphen.substring(0, Math.min(uuidWithoutHyphen.length(), 20));

View File

@ -35,11 +35,16 @@ public class LibvirtCheckUrlCommand extends CommandWrapper<CheckUrlCommand, Chec
@Override
public CheckUrlAnswer execute(CheckUrlCommand cmd, LibvirtComputingResource serverResource) {
final String url = cmd.getUrl();
s_logger.info("Checking URL: " + url);
final Integer connectTimeout = cmd.getConnectTimeout();
final Integer connectionRequestTimeout = cmd.getConnectionRequestTimeout();
final Integer socketTimeout = cmd.getSocketTimeout();
s_logger.info(String.format("Checking URL: %s, with connect timeout: %d, connect request timeout: %d, socket timeout: %d", url, connectTimeout, connectionRequestTimeout, socketTimeout));
Long remoteSize = null;
boolean checkResult = DirectDownloadHelper.checkUrlExistence(url);
boolean checkResult = DirectDownloadHelper.checkUrlExistence(url, connectTimeout, connectionRequestTimeout, socketTimeout);
if (checkResult) {
remoteSize = DirectDownloadHelper.getFileSize(url, cmd.getFormat());
remoteSize = DirectDownloadHelper.getFileSize(url, cmd.getFormat(), connectTimeout, connectionRequestTimeout, socketTimeout);
if (remoteSize == null || remoteSize < 0) {
s_logger.error(String.format("Couldn't properly retrieve the remote size of the template on " +
"url %s, obtained size = %s", url, remoteSize));

View File

@ -1715,11 +1715,11 @@ public class KVMStorageProcessor implements StorageProcessor {
snapshotPath = getSnapshotPathInPrimaryStorage(primaryPool.getLocalPath(), snapshotName);
String diskLabel = takeVolumeSnapshot(resource.getDisks(conn, vmName), snapshotName, diskPath, vm);
String copyResult = copySnapshotToPrimaryStorageDir(primaryPool, diskPath, snapshotPath, volume);
String convertResult = convertBaseFileToSnapshotFileInPrimaryStorageDir(primaryPool, diskPath, snapshotPath, volume, cmd.getWait());
mergeSnapshotIntoBaseFile(vm, diskLabel, diskPath, snapshotName, volume, conn);
validateCopyResult(copyResult, snapshotPath);
validateConvertResult(convertResult, snapshotPath);
} catch (LibvirtException e) {
if (!e.getMessage().contains(LIBVIRT_OPERATION_NOT_SUPPORTED_MESSAGE)) {
throw e;
@ -1784,8 +1784,8 @@ public class KVMStorageProcessor implements StorageProcessor {
}
} else {
snapshotPath = getSnapshotPathInPrimaryStorage(primaryPool.getLocalPath(), snapshotName);
String copyResult = copySnapshotToPrimaryStorageDir(primaryPool, diskPath, snapshotPath, volume);
validateCopyResult(copyResult, snapshotPath);
String convertResult = convertBaseFileToSnapshotFileInPrimaryStorageDir(primaryPool, diskPath, snapshotPath, volume, cmd.getWait());
validateConvertResult(convertResult, snapshotPath);
}
}
@ -1838,13 +1838,13 @@ public class KVMStorageProcessor implements StorageProcessor {
s_logger.debug(String.format("Full VM Snapshot [%s] of VM [%s] took [%s] seconds to finish.", snapshotName, vmName, (System.currentTimeMillis() - start)/1000));
}
protected void validateCopyResult(String copyResult, String snapshotPath) throws CloudRuntimeException, IOException {
if (copyResult == null) {
protected void validateConvertResult(String convertResult, String snapshotPath) throws CloudRuntimeException, IOException {
if (convertResult == null) {
return;
}
Files.deleteIfExists(Paths.get(snapshotPath));
throw new CloudRuntimeException(copyResult);
throw new CloudRuntimeException(convertResult);
}
/**
@ -1901,20 +1901,31 @@ public class KVMStorageProcessor implements StorageProcessor {
}
/**
* Creates the snapshot directory in the primary storage, if it does not exist; then copies the base file (VM's old writing file) to the snapshot dir..
* Creates the snapshot directory in the primary storage, if it does not exist; then, converts the base file (VM's old writing file) to the snapshot directory.
* @param primaryPool Storage to create folder, if not exists;
* @param baseFile Base file of VM, which will be copied;
* @param snapshotPath Path to copy the base file;
* @return null if copies successfully or a error message.
* @param baseFile Base file of VM, which will be converted;
* @param snapshotPath Path to convert the base file;
* @return null if the conversion occurs successfully or an error message that must be handled.
*/
protected String copySnapshotToPrimaryStorageDir(KVMStoragePool primaryPool, String baseFile, String snapshotPath, VolumeObjectTO volume) {
protected String convertBaseFileToSnapshotFileInPrimaryStorageDir(KVMStoragePool primaryPool, String baseFile, String snapshotPath, VolumeObjectTO volume, int wait) {
try {
s_logger.debug(String.format("Trying to convert volume [%s] (%s) to snapshot [%s].", volume, baseFile, snapshotPath));
primaryPool.createFolder(TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR);
Files.copy(Paths.get(baseFile), Paths.get(snapshotPath));
s_logger.debug(String.format("Copied %s snapshot from [%s] to [%s].", volume, baseFile, snapshotPath));
QemuImgFile srcFile = new QemuImgFile(baseFile);
srcFile.setFormat(PhysicalDiskFormat.QCOW2);
QemuImgFile destFile = new QemuImgFile(snapshotPath);
destFile.setFormat(PhysicalDiskFormat.QCOW2);
QemuImg q = new QemuImg(wait);
q.convert(srcFile, destFile);
s_logger.debug(String.format("Converted volume [%s] (from path \"%s\") to snapshot [%s].", volume, baseFile, snapshotPath));
return null;
} catch (IOException ex) {
return String.format("Unable to copy %s snapshot [%s] to [%s] due to [%s].", volume, baseFile, snapshotPath, ex.getMessage());
} catch (QemuImgException | LibvirtException ex) {
return String.format("Failed to convert %s snapshot of volume [%s] to [%s] due to [%s].", volume, baseFile, snapshotPath, ex.getMessage());
}
}

View File

@ -552,12 +552,12 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor {
/**
* Calculates usable size from raw size, assuming qcow2 requires 192k/1GB for metadata
* We also remove 32MiB for potential encryption/safety factor.
* We also remove 128MiB for encryption/fragmentation/safety factor.
* @param raw size in bytes
* @return usable size in bytesbytes
*/
public static long getUsableBytesFromRawBytes(Long raw) {
long usable = raw - (32 << 20) - ((raw >> 30) * 200704);
long usable = raw - (128 << 20) - ((raw >> 30) * 200704);
if (usable < 0) {
usable = 0L;
}

View File

@ -48,7 +48,8 @@ public class KVMHostInfo {
private static final Logger LOGGER = Logger.getLogger(KVMHostInfo.class);
private int cpus;
private int totalCpus;
private int allocatableCpus;
private int cpusockets;
private long cpuSpeed;
private long totalMemory;
@ -58,16 +59,25 @@ public class KVMHostInfo {
private static String cpuInfoFreqFileName = "/sys/devices/system/cpu/cpu0/cpufreq/base_frequency";
public KVMHostInfo(long reservedMemory, long overCommitMemory, long manualSpeed) {
public KVMHostInfo(long reservedMemory, long overCommitMemory, long manualSpeed, int reservedCpus) {
this.cpuSpeed = manualSpeed;
this.reservedMemory = reservedMemory;
this.overCommitMemory = overCommitMemory;
this.getHostInfoFromLibvirt();
this.totalMemory = new MemStat(this.getReservedMemory(), this.getOverCommitMemory()).getTotal();
this.allocatableCpus = totalCpus - reservedCpus;
if (allocatableCpus < 1) {
LOGGER.warn(String.format("Aggressive reserved CPU config leaves no usable CPUs for VMs! Total system CPUs: %d, Reserved: %d, Allocatable: %d", totalCpus, reservedCpus, allocatableCpus));
allocatableCpus = 0;
}
}
public int getCpus() {
return this.cpus;
public int getTotalCpus() {
return this.totalCpus;
}
public int getAllocatableCpus() {
return this.allocatableCpus;
}
public int getCpuSockets() {
@ -189,7 +199,7 @@ public class KVMHostInfo {
if (hosts.nodes > 0) {
this.cpusockets = hosts.sockets * hosts.nodes;
}
this.cpus = hosts.cpus;
this.totalCpus = hosts.cpus;
final LibvirtCapXMLParser parser = new LibvirtCapXMLParser();
parser.parseCapabilitiesXML(capabilities);

View File

@ -39,6 +39,9 @@ import java.util.List;
import java.util.Set;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImgException;
import org.apache.cloudstack.utils.qemu.QemuImgFile;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@ -91,6 +94,9 @@ public class KVMStorageProcessorTest {
@Mock
Connect connectMock;
@Mock
QemuImg qemuImgMock;
@Mock
LibvirtDomainXMLParser libvirtDomainXMLParserMock;
@Mock
@ -251,32 +257,53 @@ public class KVMStorageProcessorTest {
@Test
@PrepareForTest(KVMStorageProcessor.class)
public void validateCopySnapshotToPrimaryStorageDirFailToCopyReturnErrorMessage() throws Exception {
public void convertBaseFileToSnapshotFileInPrimaryStorageDirTestFailToConvertWithQemuImgExceptionReturnErrorMessage() throws Exception {
String baseFile = "baseFile";
String snapshotPath = "snapshotPath";
String errorMessage = "error";
String expectedResult = String.format("Unable to copy %s snapshot [%s] to [%s] due to [%s].", volumeObjectToMock, baseFile, snapshotPath, errorMessage);
String expectedResult = String.format("Failed to convert %s snapshot of volume [%s] to [%s] due to [%s].", volumeObjectToMock, baseFile, snapshotPath, errorMessage);
Mockito.doReturn(true).when(kvmStoragePoolMock).createFolder(Mockito.anyString());
PowerMockito.mockStatic(Files.class);
PowerMockito.when(Files.copy(Mockito.any(Path.class), Mockito.any(Path.class), Mockito.any())).thenThrow(new IOException(errorMessage));
String result = storageProcessorSpy.copySnapshotToPrimaryStorageDir(kvmStoragePoolMock, baseFile, snapshotPath, volumeObjectToMock);
PowerMockito.whenNew(QemuImg.class).withArguments(Mockito.anyInt()).thenReturn(qemuImgMock);
Mockito.doThrow(new QemuImgException(errorMessage)).when(qemuImgMock).convert(Mockito.any(QemuImgFile.class), Mockito.any(QemuImgFile.class));
String result = storageProcessorSpy.convertBaseFileToSnapshotFileInPrimaryStorageDir(kvmStoragePoolMock, baseFile, snapshotPath, volumeObjectToMock, 1);
Assert.assertEquals(expectedResult, result);
}
@Test
@PrepareForTest(KVMStorageProcessor.class)
public void validateCopySnapshotToPrimaryStorageDirCopySuccessReturnNull() throws Exception {
public void convertBaseFileToSnapshotFileInPrimaryStorageDirTestFailToConvertWithLibvirtExceptionReturnErrorMessage() throws Exception {
String baseFile = "baseFile";
String snapshotPath = "snapshotPath";
String errorMessage = "null";
String expectedResult = String.format("Failed to convert %s snapshot of volume [%s] to [%s] due to [%s].", volumeObjectToMock, baseFile, snapshotPath, errorMessage);
Mockito.doReturn(true).when(kvmStoragePoolMock).createFolder(Mockito.anyString());
PowerMockito.whenNew(QemuImg.class).withArguments(Mockito.anyInt()).thenReturn(qemuImgMock);
Mockito.doThrow(LibvirtException.class).when(qemuImgMock).convert(Mockito.any(QemuImgFile.class), Mockito.any(QemuImgFile.class));
String result = storageProcessorSpy.convertBaseFileToSnapshotFileInPrimaryStorageDir(kvmStoragePoolMock, baseFile, snapshotPath, volumeObjectToMock, 1);
Assert.assertEquals(expectedResult, result);
}
@Test
@PrepareForTest(KVMStorageProcessor.class)
public void convertBaseFileToSnapshotFileInPrimaryStorageDirTestConvertSuccessReturnNull() throws Exception {
String baseFile = "baseFile";
String snapshotPath = "snapshotPath";
Mockito.doReturn(true).when(kvmStoragePoolMock).createFolder(Mockito.anyString());
PowerMockito.mockStatic(Files.class);
PowerMockito.when(Files.copy(Mockito.any(Path.class), Mockito.any(Path.class), Mockito.any())).thenReturn(null);
String result = storageProcessorSpy.copySnapshotToPrimaryStorageDir(kvmStoragePoolMock, baseFile, snapshotPath, volumeObjectToMock);
PowerMockito.whenNew(QemuImg.class).withArguments(Mockito.anyInt()).thenReturn(qemuImgMock);
Mockito.doNothing().when(qemuImgMock).convert(Mockito.any(QemuImgFile.class), Mockito.any(QemuImgFile.class));
String result = storageProcessorSpy.convertBaseFileToSnapshotFileInPrimaryStorageDir(kvmStoragePoolMock, baseFile, snapshotPath, volumeObjectToMock, 1);
Assert.assertNull(result);
}
@ -321,14 +348,14 @@ public class KVMStorageProcessorTest {
@Test
public void validateValidateCopyResultResultIsNullReturn() throws CloudRuntimeException, IOException{
storageProcessorSpy.validateCopyResult(null, "");
storageProcessorSpy.validateConvertResult(null, "");
}
@Test (expected = IOException.class)
public void validateValidateCopyResultFailToDeleteThrowIOException() throws CloudRuntimeException, IOException{
PowerMockito.mockStatic(Files.class);
PowerMockito.when(Files.deleteIfExists(Mockito.any())).thenThrow(new IOException(""));
storageProcessorSpy.validateCopyResult("", "");
storageProcessorSpy.validateConvertResult("", "");
}
@Test (expected = CloudRuntimeException.class)
@ -336,7 +363,7 @@ public class KVMStorageProcessorTest {
public void validateValidateCopyResulResultNotNullThrowCloudRuntimeException() throws CloudRuntimeException, IOException{
PowerMockito.mockStatic(Files.class);
PowerMockito.when(Files.deleteIfExists(Mockito.any())).thenReturn(true);
storageProcessorSpy.validateCopyResult("", "");
storageProcessorSpy.validateConvertResult("", "");
}
@Test (expected = CloudRuntimeException.class)

View File

@ -23,9 +23,9 @@ import org.junit.Test;
public class ScaleIOStorageAdaptorTest {
@Test
public void getUsableBytesFromRawBytesTest() {
Assert.assertEquals("Overhead calculated for 8Gi size", 8554774528L, ScaleIOStorageAdaptor.getUsableBytesFromRawBytes(8L << 30));
Assert.assertEquals("Overhead calculated for 4Ti size", 4294130925568L, ScaleIOStorageAdaptor.getUsableBytesFromRawBytes(4000L << 30));
Assert.assertEquals("Overhead calculated for 500Gi size", 536737005568L, ScaleIOStorageAdaptor.getUsableBytesFromRawBytes(500L << 30));
Assert.assertEquals("Overhead calculated for 8Gi size", 8454111232L, ScaleIOStorageAdaptor.getUsableBytesFromRawBytes(8L << 30));
Assert.assertEquals("Overhead calculated for 4Ti size", 4294030262272L, ScaleIOStorageAdaptor.getUsableBytesFromRawBytes(4000L << 30));
Assert.assertEquals("Overhead calculated for 500Gi size", 536636342272L, ScaleIOStorageAdaptor.getUsableBytesFromRawBytes(500L << 30));
Assert.assertEquals("Unsupported small size", 0, ScaleIOStorageAdaptor.getUsableBytesFromRawBytes(1L));
}
}

View File

@ -78,7 +78,33 @@ public class KVMHostInfoTest {
PowerMockito.when(conn.close()).thenReturn(0);
int manualSpeed = 500;
KVMHostInfo kvmHostInfo = new KVMHostInfo(10, 10, manualSpeed);
KVMHostInfo kvmHostInfo = new KVMHostInfo(10, 10, manualSpeed, 0);
Assert.assertEquals(kvmHostInfo.getCpuSpeed(), manualSpeed);
}
@Test
public void reservedCpuCoresTest() throws Exception {
if (!System.getProperty("os.name").equals("Linux")) {
return;
}
PowerMockito.mockStatic(LibvirtConnection.class);
Connect conn = Mockito.mock(Connect.class);
NodeInfo nodeInfo = Mockito.mock(NodeInfo.class);
nodeInfo.cpus = 10;
String capabilitiesXml = "<capabilities></capabilities>";
PowerMockito.doReturn(conn).when(LibvirtConnection.class, "getConnection");
PowerMockito.when(conn.nodeInfo()).thenReturn(nodeInfo);
PowerMockito.when(conn.getCapabilities()).thenReturn(capabilitiesXml);
PowerMockito.when(conn.close()).thenReturn(0);
KVMHostInfo kvmHostInfo = new KVMHostInfo(10, 10, 100, 2);
Assert.assertEquals("reserve two CPU cores", 8, kvmHostInfo.getAllocatableCpus());
kvmHostInfo = new KVMHostInfo(10, 10, 100, 0);
Assert.assertEquals("no reserve CPU core setting", 10, kvmHostInfo.getAllocatableCpus());
kvmHostInfo = new KVMHostInfo(10, 10, 100, 12);
Assert.assertEquals("Misconfigured/too large CPU reserve", 0, kvmHostInfo.getAllocatableCpus());
}
}

View File

@ -111,6 +111,7 @@ under the License.
<adapter name="XCP Agent" class="com.cloud.hypervisor.xenserver.discoverer.XcpServerDiscoverer"/>
<adapter name="SecondaryStorage" class="com.cloud.storage.secondary.SecondaryStorageDiscoverer"/>
<adapter name="KVM Agent" class="com.cloud.hypervisor.kvm.discoverer.KvmServerDiscoverer"/>
<adapter name="CustomHW Agent" class="com.cloud.hypervisor.discoverer.CustomServerDiscoverer"/>
<adapter name="Bare Metal Agent" class="com.cloud.baremetal.BareMetalDiscoverer"/>
<adapter name="Ovm Discover" class="com.cloud.ovm.hypervisor.OvmDiscoverer" />
</adapters>

View File

@ -428,6 +428,19 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
}
}
private void resizeResource(DevelopersApi api, String resourceName, long sizeByte) throws ApiException {
VolumeDefinitionModify dfm = new VolumeDefinitionModify();
dfm.setSizeKib(sizeByte / 1024);
ApiCallRcList answers = api.volumeDefinitionModify(resourceName, 0, dfm);
if (answers.hasError()) {
s_logger.error("Resize error: " + answers.get(0).getMessage());
throw new CloudRuntimeException(answers.get(0).getMessage());
} else {
s_logger.info(String.format("Successfully resized %s to %d kib", resourceName, dfm.getSizeKib()));
}
}
private String cloneResource(long csCloneId, VolumeInfo volumeInfo, StoragePoolVO storagePoolVO) {
// get the cached template on this storage
VMTemplateStoragePoolVO tmplPoolRef = _vmTemplatePoolDao.findByPoolTemplate(
@ -452,6 +465,11 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
}
s_logger.info("Clone resource definition " + cloneRes + " to " + rscName + " finished");
if (volumeInfo.getSize() != null && volumeInfo.getSize() > 0) {
resizeResource(linstorApi, rscName, volumeInfo.getSize());
}
applyAuxProps(linstorApi, rscName, volumeInfo.getName(), volumeInfo.getAttachedVmName());
applyQoSSettings(storagePoolVO, linstorApi, rscName, volumeInfo.getMaxIops());
@ -738,26 +756,16 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
dfm.setSizeKib(resizeParameter.newSize / 1024);
try
{
resizeResource(api, rscName, resizeParameter.newSize);
applyQoSSettings(pool, api, rscName, resizeParameter.newMaxIops);
{
final VolumeVO volume = _volumeDao.findById(vol.getId());
volume.setMinIops(resizeParameter.newMinIops);
volume.setMaxIops(resizeParameter.newMaxIops);
volume.setSize(resizeParameter.newSize);
_volumeDao.update(volume.getId(), volume);
}
ApiCallRcList answers = api.volumeDefinitionModify(rscName, 0, dfm);
if (answers.hasError())
{
s_logger.error("Resize error: " + answers.get(0).getMessage());
errMsg = answers.get(0).getMessage();
} else
{
s_logger.info(String.format("Successfully resized %s to %d kib", rscName, dfm.getSizeKib()));
vol.setSize(resizeParameter.newSize);
vol.update();
}
} catch (ApiException apiExc)
{
s_logger.error(apiExc);
@ -765,12 +773,10 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
}
CreateCmdResult result;
if (errMsg != null)
{
if (errMsg != null) {
result = new CreateCmdResult(null, new Answer(null, false, errMsg));
result.setResult(errMsg);
} else
{
} else {
// notify guests
result = notifyResize(vol, oldSize, resizeParameter);
}

View File

@ -44,9 +44,6 @@
<plugins>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<skipTests>true</skipTests>
</configuration>
<executions>
<execution>
<phase>integration-test</phase>

View File

@ -48,7 +48,7 @@ import com.github.tomakehurst.wiremock.junit.WireMockRule;
@RunWith(MockitoJUnitRunner.class)
public class ScaleIOGatewayClientImplTest {
private final int port = 443;
private final int port = 8443;
private final int timeout = 30;
private final int maxConnections = 50;
private final String username = "admin";
@ -70,7 +70,7 @@ public class ScaleIOGatewayClientImplTest {
.withHeader("content-type", "application/json;charset=UTF-8")
.withBody(sessionKey)));
client = new ScaleIOGatewayClientImpl("https://localhost/api", username, password, false, timeout, maxConnections);
client = new ScaleIOGatewayClientImpl(String.format("https://localhost:%d/api", port), username, password, false, timeout, maxConnections);
wireMockRule.stubFor(post("/api/types/Volume/instances")
.willReturn(aResponse()

View File

@ -19,23 +19,13 @@
package org.apache.cloudstack.storage.datastore.driver;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.storage.MigrateVolumeAnswer;
import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.configuration.Config;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.storage.Storage;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.dao.VolumeDetailsDao;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.dao.VMInstanceDao;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.when;
import java.util.Optional;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
@ -62,12 +52,23 @@ import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import java.util.Optional;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.when;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.storage.MigrateVolumeAnswer;
import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.configuration.Config;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.storage.Storage;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.dao.VolumeDetailsDao;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.dao.VMInstanceDao;
@RunWith(PowerMockRunner.class)
@PrepareForTest(RemoteHostEndPoint.class)

View File

@ -24,8 +24,6 @@ import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import static org.mockito.MockitoAnnotations.initMocks;
@ -40,13 +38,11 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManag
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient;
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool;
import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientImpl;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.provider.ScaleIOHostListener;
import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.junit.Before;
@ -55,14 +51,14 @@ import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.Spy;
import org.mockito.stubbing.Answer;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.springframework.test.util.ReflectionTestUtils;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.ModifyStoragePoolAnswer;
import com.cloud.agent.api.ModifyStoragePoolCommand;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
@ -81,7 +77,7 @@ import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.template.TemplateManager;
import com.cloud.utils.exception.CloudRuntimeException;
@PrepareForTest(ScaleIOGatewayClient.class)
@PrepareForTest(ScaleIOGatewayClientConnectionPool.class)
@RunWith(PowerMockRunner.class)
public class ScaleIOPrimaryDataStoreLifeCycleTest {
@ -114,20 +110,16 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest {
@Mock
ModifyStoragePoolAnswer answer;
@Spy
@InjectMocks
private StorageManager storageMgr = new StorageManagerImpl();
@Spy
@InjectMocks
private HypervisorHostListener hostListener = new ScaleIOHostListener();
@InjectMocks
private ScaleIOPrimaryDataStoreLifeCycle scaleIOPrimaryDataStoreLifeCycleTest;
@Before
public void setUp() {
initMocks(this);
ReflectionTestUtils.setField(scaleIOPrimaryDataStoreLifeCycleTest, "storageMgr", storageMgr);
}
@Test
@ -135,9 +127,11 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest {
final DataStore dataStore = mock(DataStore.class);
when(dataStore.getId()).thenReturn(1L);
PowerMockito.mockStatic(ScaleIOGatewayClient.class);
PowerMockito.mockStatic(ScaleIOGatewayClientConnectionPool.class);
ScaleIOGatewayClientImpl client = mock(ScaleIOGatewayClientImpl.class);
when(ScaleIOGatewayClientConnectionPool.getInstance().getClient(1L, storagePoolDetailsDao)).thenReturn(client);
ScaleIOGatewayClientConnectionPool pool = mock(ScaleIOGatewayClientConnectionPool.class);
when(pool.getClient(1L, storagePoolDetailsDao)).thenReturn(client);
when(ScaleIOGatewayClientConnectionPool.getInstance()).thenAnswer((Answer<ScaleIOGatewayClientConnectionPool>) invocation -> pool);
when(client.haveConnectedSdcs()).thenReturn(true);
@ -157,28 +151,20 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest {
when(dataStoreMgr.getDataStore(anyLong(), eq(DataStoreRole.Primary))).thenReturn(store);
when(store.getId()).thenReturn(1L);
when(store.getPoolType()).thenReturn(Storage.StoragePoolType.PowerFlex);
when(store.isShared()).thenReturn(true);
when(store.getName()).thenReturn("ScaleIOPool");
when(store.getStorageProviderName()).thenReturn(ScaleIOUtil.PROVIDER_NAME);
when(dataStoreProviderMgr.getDataStoreProvider(ScaleIOUtil.PROVIDER_NAME)).thenReturn(dataStoreProvider);
when(dataStoreProvider.getName()).thenReturn(ScaleIOUtil.PROVIDER_NAME);
HypervisorHostListener hostListener = Mockito.mock(HypervisorHostListener.class);
when(hostListener.hostConnect(Mockito.anyLong(), Mockito.anyLong())).thenReturn(true);
storageMgr.registerHostListener(ScaleIOUtil.PROVIDER_NAME, hostListener);
when(agentMgr.easySend(anyLong(), Mockito.any(ModifyStoragePoolCommand.class))).thenReturn(answer);
when(answer.getResult()).thenReturn(true);
when(storagePoolHostDao.findByPoolHost(anyLong(), anyLong())).thenReturn(null);
when(hostDao.findById(1L)).thenReturn(host1);
when(hostDao.findById(2L)).thenReturn(host2);
when(dataStoreHelper.attachZone(Mockito.any(DataStore.class))).thenReturn(null);
scaleIOPrimaryDataStoreLifeCycleTest.attachZone(dataStore, scope, Hypervisor.HypervisorType.KVM);
verify(storageMgr,times(2)).connectHostToSharedPool(Mockito.any(Long.class), Mockito.any(Long.class));
verify(storagePoolHostDao,times(2)).persist(Mockito.any(StoragePoolHostVO.class));
boolean result = scaleIOPrimaryDataStoreLifeCycleTest.attachZone(dataStore, scope, Hypervisor.HypervisorType.KVM);
assertThat(result).isTrue();
}
@Test(expected = CloudRuntimeException.class)
@ -239,6 +225,9 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest {
List<StoragePoolHostVO> poolHostVOs = new ArrayList<>();
when(storagePoolHostDao.listByPoolId(anyLong())).thenReturn(poolHostVOs);
when(dataStoreHelper.deletePrimaryDataStore(any(DataStore.class))).thenReturn(true);
PowerMockito.mockStatic(ScaleIOGatewayClientConnectionPool.class);
ScaleIOGatewayClientConnectionPool pool = mock(ScaleIOGatewayClientConnectionPool.class);
when(ScaleIOGatewayClientConnectionPool.getInstance()).thenAnswer((Answer<ScaleIOGatewayClientConnectionPool>) invocation -> pool);
final boolean result = scaleIOPrimaryDataStoreLifeCycleTest.deleteDataStore(store);
assertThat(result).isTrue();
}

View File

@ -28,6 +28,7 @@ import java.util.Map;
import java.util.UUID;
import com.cloud.user.AccountManager;
import com.cloud.user.DomainManager;
import com.cloud.utils.component.ComponentLifecycleBase;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.api.LdapValidator;
@ -107,6 +108,13 @@ public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManag
super.configure(name, params);
LOGGER.debug("Configuring LDAP Manager");
addAccountRemovalListener();
addDomainRemovalListener();
return true;
}
private void addAccountRemovalListener() {
messageBus.subscribe(AccountManager.MESSAGE_REMOVE_ACCOUNT_EVENT, new MessageSubscriber() {
@Override
public void onPublishMessage(String senderAddress, String subject, Object args) {
@ -115,18 +123,37 @@ public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManag
long domainId = account.getDomainId();
LdapTrustMapVO ldapTrustMapVO = _ldapTrustMapDao.findByAccount(domainId, account.getAccountId());
if (ldapTrustMapVO != null) {
String msg = String.format("Removing link between LDAP: %s - type: %s and account: %s on domain: %s",
ldapTrustMapVO.getName(), ldapTrustMapVO.getType().name(), account.getAccountId(), domainId);
LOGGER.debug(msg);
_ldapTrustMapDao.remove(ldapTrustMapVO.getId());
removeTrustmap(ldapTrustMapVO);
}
} catch (final Exception e) {
LOGGER.error("Caught exception while removing account linked to LDAP", e);
}
}
});
}
return true;
private void addDomainRemovalListener() {
messageBus.subscribe(DomainManager.MESSAGE_REMOVE_DOMAIN_EVENT, new MessageSubscriber() {
@Override
public void onPublishMessage(String senderAddress, String subject, Object args) {
try {
long domainId = ((DomainVO) args).getId();
List<LdapTrustMapVO> ldapTrustMapVOs = _ldapTrustMapDao.searchByDomainId(domainId);
for (LdapTrustMapVO ldapTrustMapVO : ldapTrustMapVOs) {
removeTrustmap(ldapTrustMapVO);
}
} catch (final Exception e) {
LOGGER.error("Caught exception while removing trust-map for domain linked to LDAP", e);
}
}
});
}
private void removeTrustmap(LdapTrustMapVO ldapTrustMapVO) {
String msg = String.format("Removing link between LDAP: %s - type: %s and account: %s on domain: %s",
ldapTrustMapVO.getName(), ldapTrustMapVO.getType().name(), ldapTrustMapVO.getAccountId(), ldapTrustMapVO.getDomainId());
LOGGER.debug(msg);
_ldapTrustMapDao.remove(ldapTrustMapVO.getId());
}
@Override

View File

@ -1283,6 +1283,9 @@ public class ApiDBUtils {
// If this check is not passed, the hypervisor type will remain OVM.
type = HypervisorType.KVM;
break;
} else if (pool.getHypervisor() == HypervisorType.Custom) {
type = HypervisorType.Custom;
break;
}
}
}

View File

@ -37,6 +37,7 @@ import java.util.stream.Collectors;
import javax.inject.Inject;
import com.cloud.hypervisor.Hypervisor;
import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.acl.ControlledEntity.ACLType;
import org.apache.cloudstack.affinity.AffinityGroup;
@ -730,7 +731,7 @@ public class ApiResponseHelper implements ResponseGenerator {
if (vm != null) {
vmSnapshotResponse.setVirtualMachineId(vm.getUuid());
vmSnapshotResponse.setVirtualMachineName(StringUtils.isEmpty(vm.getDisplayName()) ? vm.getHostName() : vm.getDisplayName());
vmSnapshotResponse.setHypervisor(vm.getHypervisorType());
vmSnapshotResponse.setHypervisor(vm.getHypervisorType().getHypervisorDisplayName());
DataCenterVO datacenter = ApiDBUtils.findZoneById(vm.getDataCenterId());
if (datacenter != null) {
vmSnapshotResponse.setZoneId(datacenter.getUuid());
@ -1393,7 +1394,7 @@ public class ApiResponseHelper implements ResponseGenerator {
clusterResponse.setZoneId(dc.getUuid());
clusterResponse.setZoneName(dc.getName());
}
clusterResponse.setHypervisorType(cluster.getHypervisorType().toString());
clusterResponse.setHypervisorType(cluster.getHypervisorType().getHypervisorDisplayName());
clusterResponse.setClusterType(cluster.getClusterType().toString());
clusterResponse.setAllocationState(cluster.getAllocationState().toString());
clusterResponse.setManagedState(cluster.getManagedState().toString());
@ -1589,7 +1590,7 @@ public class ApiResponseHelper implements ResponseGenerator {
vmResponse.setTemplateName(template.getName());
}
vmResponse.setCreated(vm.getCreated());
vmResponse.setHypervisor(vm.getHypervisorType().toString());
vmResponse.setHypervisor(vm.getHypervisorType().getHypervisorDisplayName());
if (vm.getHostId() != null) {
Host host = ApiDBUtils.findHostById(vm.getHostId());
@ -2752,7 +2753,7 @@ public class ApiResponseHelper implements ResponseGenerator {
public HypervisorCapabilitiesResponse createHypervisorCapabilitiesResponse(HypervisorCapabilities hpvCapabilities) {
HypervisorCapabilitiesResponse hpvCapabilitiesResponse = new HypervisorCapabilitiesResponse();
hpvCapabilitiesResponse.setId(hpvCapabilities.getUuid());
hpvCapabilitiesResponse.setHypervisor(hpvCapabilities.getHypervisorType());
hpvCapabilitiesResponse.setHypervisor(hpvCapabilities.getHypervisorType().getHypervisorDisplayName());
hpvCapabilitiesResponse.setHypervisorVersion(hpvCapabilities.getHypervisorVersion());
hpvCapabilitiesResponse.setIsSecurityGroupEnabled(hpvCapabilities.isSecurityGroupEnabled());
hpvCapabilitiesResponse.setMaxGuestsLimit(hpvCapabilities.getMaxGuestsLimit());
@ -3660,7 +3661,7 @@ public class ApiResponseHelper implements ResponseGenerator {
public GuestOsMappingResponse createGuestOSMappingResponse(GuestOSHypervisor guestOSHypervisor) {
GuestOsMappingResponse response = new GuestOsMappingResponse();
response.setId(guestOSHypervisor.getUuid());
response.setHypervisor(guestOSHypervisor.getHypervisorType());
response.setHypervisor(Hypervisor.HypervisorType.getType(guestOSHypervisor.getHypervisorType()).getHypervisorDisplayName());
response.setHypervisorVersion(guestOSHypervisor.getHypervisorVersion());
response.setOsNameForHypervisor((guestOSHypervisor.getGuestOsName()));
response.setIsUserDefined(Boolean.valueOf(guestOSHypervisor.getIsUserDefined()).toString());
@ -4888,7 +4889,7 @@ public class ApiResponseHelper implements ResponseGenerator {
response.setId(certificate.getUuid());
response.setAlias(certificate.getAlias());
handleCertificateResponse(certificate.getCertificate(), response);
response.setHypervisor(certificate.getHypervisorType().name());
response.setHypervisor(certificate.getHypervisorType().getHypervisorDisplayName());
response.setObjectName("directdownloadcertificate");
return response;
}

View File

@ -3758,6 +3758,9 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
}
} else if (templateFilter == TemplateFilter.sharedexecutable || templateFilter == TemplateFilter.shared) {
// only show templates shared by others
if (permittedAccounts.isEmpty()) {
return new Pair<>(new ArrayList<>(), 0);
}
sc.addAnd("sharedAccountId", SearchCriteria.Op.IN, permittedAccountIds.toArray());
} else if (templateFilter == TemplateFilter.executable) {
SearchCriteria<TemplateJoinVO> scc = _templateJoinDao.createSearchCriteria();

View File

@ -126,7 +126,7 @@ public class DomainRouterJoinDaoImpl extends GenericDaoBase<DomainRouterJoinVO,
}
if (router.getHypervisorType() != null) {
routerResponse.setHypervisor(router.getHypervisorType().toString());
routerResponse.setHypervisor(router.getHypervisorType().getHypervisorDisplayName());
}
routerResponse.setHasAnnotation(annotationDao.hasAnnotations(router.getUuid(), AnnotationService.EntityType.VR.name(),
_accountMgr.isRootAdmin(CallContext.current().getCallingAccount().getId())));

View File

@ -125,7 +125,10 @@ public class HostJoinDaoImpl extends GenericDaoBase<HostJoinVO, Long> implements
hostResponse.setCpuNumber(host.getCpus());
hostResponse.setZoneId(host.getZoneUuid());
hostResponse.setDisconnectedOn(host.getDisconnectedOn());
hostResponse.setHypervisor(host.getHypervisorType());
if (host.getHypervisorType() != null) {
String hypervisorType = host.getHypervisorType().getHypervisorDisplayName();
hostResponse.setHypervisor(hypervisorType);
}
hostResponse.setHostType(host.getType());
hostResponse.setLastPinged(new Date(host.getLastPinged()));
Long mshostId = host.getManagementServerId();
@ -239,7 +242,8 @@ public class HostJoinDaoImpl extends GenericDaoBase<HostJoinVO, Long> implements
hostResponse.setUefiCapabilty(new Boolean(false));
}
}
if (details.contains(HostDetails.all) && host.getHypervisorType() == Hypervisor.HypervisorType.KVM) {
if (details.contains(HostDetails.all) && (host.getHypervisorType() == Hypervisor.HypervisorType.KVM ||
host.getHypervisorType() == Hypervisor.HypervisorType.Custom)) {
//only kvm has the requirement to return host details
try {
hostResponse.setDetails(hostDetails);
@ -303,7 +307,7 @@ public class HostJoinDaoImpl extends GenericDaoBase<HostJoinVO, Long> implements
hostResponse.setCpuNumber(host.getCpus());
hostResponse.setZoneId(host.getZoneUuid());
hostResponse.setDisconnectedOn(host.getDisconnectedOn());
hostResponse.setHypervisor(host.getHypervisorType());
hostResponse.setHypervisor(host.getHypervisorType().getHypervisorDisplayName());
hostResponse.setHostType(host.getType());
hostResponse.setLastPinged(new Date(host.getLastPinged()));
hostResponse.setManagementServerId(host.getManagementServerId());

View File

@ -110,7 +110,7 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
poolResponse.setScope(pool.getScope().toString());
}
if (pool.getHypervisor() != null) {
poolResponse.setHypervisor(pool.getHypervisor().toString());
poolResponse.setHypervisor(pool.getHypervisor().getHypervisorDisplayName());
}
StoragePoolDetailVO poolType = storagePoolDetailsDao.findDetail(pool.getId(), "pool_type");
@ -201,7 +201,7 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
poolResponse.setCreated(pool.getCreated());
poolResponse.setScope(pool.getScope().toString());
if (pool.getHypervisor() != null) {
poolResponse.setHypervisor(pool.getHypervisor().toString());
poolResponse.setHypervisor(pool.getHypervisor().getHypervisorDisplayName());
}
long allocatedSize = pool.getUsedCapacity();

View File

@ -208,7 +208,7 @@ public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation<Templa
templateResponse.setTemplateType(template.getTemplateType().toString());
}
templateResponse.setHypervisor(template.getHypervisorType().toString());
templateResponse.setHypervisor(template.getHypervisorType().getHypervisorDisplayName());
templateResponse.setOsTypeId(template.getGuestOSUuid());
templateResponse.setOsTypeName(template.getGuestOSName());
@ -330,7 +330,7 @@ public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation<Templa
response.setOsTypeId(result.getGuestOSUuid());
response.setOsTypeName(result.getGuestOSName());
response.setBootable(result.isBootable());
response.setHypervisor(result.getHypervisorType().toString());
response.setHypervisor(result.getHypervisorType().getHypervisorDisplayName());
response.setDynamicallyScalable(result.isDynamicallyScalable());
// populate owner.

View File

@ -128,7 +128,7 @@ public class UserVmJoinDaoImpl extends GenericDaoBaseWithTagInformation<UserVmJo
UserVmResponse userVmResponse = new UserVmResponse();
if (userVm.getHypervisorType() != null) {
userVmResponse.setHypervisor(userVm.getHypervisorType().toString());
userVmResponse.setHypervisor(userVm.getHypervisorType().getHypervisorDisplayName());
}
userVmResponse.setId(userVm.getUuid());
userVmResponse.setName(userVm.getName());

View File

@ -21,6 +21,7 @@ import java.util.List;
import javax.inject.Inject;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.offering.DiskOffering;
import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
@ -147,8 +148,10 @@ public class VolumeJoinDaoImpl extends GenericDaoBaseWithTagInformation<VolumeJo
volResponse.setSize(volume.getVolumeStoreSize());
volResponse.setCreated(volume.getCreatedOnStore());
if (view == ResponseView.Full)
volResponse.setHypervisor(ApiDBUtils.getHypervisorTypeFromFormat(volume.getDataCenterId(), volume.getFormat()).toString());
if (view == ResponseView.Full) {
Hypervisor.HypervisorType hypervisorTypeFromFormat = ApiDBUtils.getHypervisorTypeFromFormat(volume.getDataCenterId(), volume.getFormat());
volResponse.setHypervisor(hypervisorTypeFromFormat.getHypervisorDisplayName());
}
if (volume.getDownloadState() != Status.DOWNLOADED) {
String volumeStatus = "Processing";
if (volume.getDownloadState() == Status.DOWNLOAD_IN_PROGRESS) {
@ -209,9 +212,10 @@ public class VolumeJoinDaoImpl extends GenericDaoBaseWithTagInformation<VolumeJo
if (view == ResponseView.Full) {
if (volume.getState() != Volume.State.UploadOp) {
if (volume.getHypervisorType() != null) {
volResponse.setHypervisor(volume.getHypervisorType().toString());
volResponse.setHypervisor(volume.getHypervisorType().getHypervisorDisplayName());
} else {
volResponse.setHypervisor(ApiDBUtils.getHypervisorTypeFromFormat(volume.getDataCenterId(), volume.getFormat()).toString());
Hypervisor.HypervisorType hypervisorTypeFromFormat = ApiDBUtils.getHypervisorTypeFromFormat(volume.getDataCenterId(), volume.getFormat());
volResponse.setHypervisor(hypervisorTypeFromFormat.getHypervisorDisplayName());
}
}
Long poolId = volume.getPoolId();

View File

@ -46,6 +46,7 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.hypervisor.HypervisorGuru;
import org.apache.cloudstack.acl.SecurityChecker;
import org.apache.cloudstack.affinity.AffinityGroup;
import org.apache.cloudstack.affinity.AffinityGroupService;
@ -458,7 +459,6 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
protected Set<String> configValuesForValidation;
private Set<String> weightBasedParametersForValidation;
private Set<String> overprovisioningFactorsForValidation;
public static final String VM_USERDATA_MAX_LENGTH_STRING = "vm.userdata.max.length";
public static final ConfigKey<Boolean> SystemVMUseLocalStorage = new ConfigKey<Boolean>(Boolean.class, "system.vm.use.local.storage", "Advanced", "false",
"Indicates whether to use local storage pools or shared storage pools for system VMs.", false, ConfigKey.Scope.Zone, null);
@ -489,8 +489,6 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
public static ConfigKey<Integer> VM_SERVICE_OFFERING_MAX_RAM_SIZE = new ConfigKey<Integer>("Advanced", Integer.class, "vm.serviceoffering.ram.size.max", "0", "Maximum RAM size in "
+ "MB for vm service offering. If 0 - no limitation", true);
public static final ConfigKey<Integer> VM_USERDATA_MAX_LENGTH = new ConfigKey<Integer>("Advanced", Integer.class, VM_USERDATA_MAX_LENGTH_STRING, "32768",
"Max length of vm userdata after base64 decoding. Default is 32768 and maximum is 1048576", true);
public static final ConfigKey<Boolean> MIGRATE_VM_ACROSS_CLUSTERS = new ConfigKey<Boolean>(Boolean.class, "migrate.vm.across.clusters", "Advanced", "false",
"Indicates whether the VM can be migrated to different cluster if no host is found in same cluster",true, ConfigKey.Scope.Zone, null);
@ -774,6 +772,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
final TransactionLegacy txn = TransactionLegacy.currentTxn();
txn.start();
String previousValue = _configDao.getValue(name);
if (!_configDao.update(name, category, value)) {
s_logger.error("Failed to update configuration option, name: " + name + ", value:" + value);
throw new CloudRuntimeException("Failed to update configuration value. Please contact Cloud Support.");
@ -854,6 +853,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
} catch (final Throwable e) {
throw new CloudRuntimeException("Failed to clean up download URLs in template_store_ref or volume_store_ref due to exception ", e);
}
} else if (HypervisorGuru.HypervisorCustomDisplayName.key().equals(name)) {
updateCustomDisplayNameOnHypervisorsList(previousValue, value);
}
txn.commit();
@ -861,6 +862,20 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
return _configDao.getValue(name);
}
/**
* Updates the 'hypervisor.list' value to match the new custom hypervisor name set as newValue if the previous value was set
*/
private void updateCustomDisplayNameOnHypervisorsList(String previousValue, String newValue) {
String hypervisorListConfigName = Config.HypervisorList.key();
String hypervisors = _configDao.getValue(hypervisorListConfigName);
if (Arrays.asList(hypervisors.split(",")).contains(previousValue)) {
hypervisors = hypervisors.replace(previousValue, newValue);
s_logger.info(String.format("Updating the hypervisor list configuration '%s' " +
"to match the new custom hypervisor display name", hypervisorListConfigName));
_configDao.update(hypervisorListConfigName, hypervisors);
}
}
@Override
@ActionEvent(eventType = EventTypes.EVENT_CONFIGURATION_VALUE_EDIT, eventDescription = "updating configuration")
public Configuration updateConfiguration(final UpdateCfgCmd cmd) throws InvalidParameterValueException {

View File

@ -360,7 +360,10 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] {VmMinMemoryEqualsMemoryDividedByMemOverprovisioningFactor, VmMinCpuSpeedEqualsCpuSpeedDividedByCpuOverprovisioningFactor };
return new ConfigKey<?>[] {VmMinMemoryEqualsMemoryDividedByMemOverprovisioningFactor,
VmMinCpuSpeedEqualsCpuSpeedDividedByCpuOverprovisioningFactor,
HypervisorCustomDisplayName
};
}
}

View File

@ -0,0 +1,37 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.discoverer;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.hypervisor.kvm.discoverer.LibvirtServerDiscoverer;
public class CustomServerDiscoverer extends LibvirtServerDiscoverer {
@Override
public Hypervisor.HypervisorType getHypervisorType() {
return Hypervisor.HypervisorType.Custom;
}
@Override
protected String getPatchPath() {
return "scripts/vm/hypervisor/kvm/";
}
@Override
public void processHostAdded(long hostId) {
// Not using super class implementation here.
}
}

View File

@ -110,7 +110,7 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements
@Override
public void processHostAdded(long hostId) {
HostVO host = hostDao.findById(hostId);
if (host != null) {
if (host != null && getHypervisorType().equals(host.getHypervisorType())) {
directDownloadManager.syncCertificatesToHost(hostId, host.getDataCenterId());
}
}

View File

@ -2673,6 +2673,11 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi
vmData.add(new String[]{METATDATA_DIR, CLOUD_DOMAIN_ID_FILE, domain.getUuid()});
}
String customCloudName = VirtualMachineManager.MetadataCustomCloudName.valueIn(datacenterId);
if (org.apache.commons.lang3.StringUtils.isNotBlank(customCloudName)) {
vmData.add(new String[]{METATDATA_DIR, CLOUD_NAME_FILE, customCloudName});
}
return vmData;
}

View File

@ -231,6 +231,11 @@ public class CommandSetupHelper {
vmDataCommand.addVmData(NetworkModel.METATDATA_DIR, NetworkModel.CLOUD_DOMAIN_ID_FILE, domain.getUuid());
}
String customCloudName = VirtualMachineManager.MetadataCustomCloudName.valueIn(vm.getDataCenterId());
if (org.apache.commons.lang3.StringUtils.isNotBlank(customCloudName)) {
vmDataCommand.addVmData(NetworkModel.METATDATA_DIR, NetworkModel.CLOUD_NAME_FILE, customCloudName);
}
cmds.addCommand("vmdata", vmDataCommand);
}
}

View File

@ -36,11 +36,14 @@ import java.util.Random;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.alert.AlertManager;
import com.cloud.exception.StorageConflictException;
import com.cloud.exception.StorageUnavailableException;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.hypervisor.HypervisorGuru;
import org.apache.cloudstack.alert.AlertService;
import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
import org.apache.cloudstack.api.ApiConstants;
@ -299,6 +302,10 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
private AnnotationDao annotationDao;
@Inject
private VolumeDao volumeDao;
@Inject
private AlertManager alertManager;
@Inject
private AnnotationService annotationService;
private final long _nodeId = ManagementServerNode.getManagementServerId();
@ -646,7 +653,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
}
}
return discoverHostsFull(dcId, podId, clusterId, clusterName, url, username, password, cmd.getHypervisor(), hostTags, cmd.getFullUrlParams(), false);
String hypervisorType = cmd.getHypervisor().equalsIgnoreCase(HypervisorGuru.HypervisorCustomDisplayName.value()) ?
"Custom" : cmd.getHypervisor();
return discoverHostsFull(dcId, podId, clusterId, clusterName, url, username, password, hypervisorType, hostTags, cmd.getFullUrlParams(), false);
}
@Override
@ -1801,73 +1810,149 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
return hostInMaintenance;
}
private ResourceState.Event getResourceEventFromAllocationStateString(String allocationState) {
final ResourceState.Event resourceEvent = ResourceState.Event.toEvent(allocationState);
if (resourceEvent != ResourceState.Event.Enable && resourceEvent != ResourceState.Event.Disable) {
throw new InvalidParameterValueException(String.format("Invalid allocation state: %s, " +
"only Enable/Disable are allowed", allocationState));
}
return resourceEvent;
}
private void handleAutoEnableDisableKVMHost(boolean autoEnableDisableKVMSetting,
boolean isUpdateFromHostHealthCheck,
HostVO host, DetailVO hostDetail,
ResourceState.Event resourceEvent) {
if (autoEnableDisableKVMSetting) {
if (!isUpdateFromHostHealthCheck && hostDetail != null &&
!Boolean.parseBoolean(hostDetail.getValue()) && resourceEvent == ResourceState.Event.Enable) {
hostDetail.setValue(Boolean.TRUE.toString());
_hostDetailsDao.update(hostDetail.getId(), hostDetail);
} else if (!isUpdateFromHostHealthCheck && hostDetail != null &&
Boolean.parseBoolean(hostDetail.getValue()) && resourceEvent == ResourceState.Event.Disable) {
s_logger.info(String.format("The setting %s is enabled but the host %s is manually set into %s state," +
"ignoring future auto enabling of the host based on health check results",
AgentManager.EnableKVMAutoEnableDisable.key(), host.getName(), resourceEvent));
hostDetail.setValue(Boolean.FALSE.toString());
_hostDetailsDao.update(hostDetail.getId(), hostDetail);
} else if (hostDetail == null) {
String autoEnableValue = !isUpdateFromHostHealthCheck ? Boolean.FALSE.toString() : Boolean.TRUE.toString();
hostDetail = new DetailVO(host.getId(), ApiConstants.AUTO_ENABLE_KVM_HOST, autoEnableValue);
_hostDetailsDao.persist(hostDetail);
}
}
}
private boolean updateHostAllocationState(HostVO host, String allocationState,
boolean isUpdateFromHostHealthCheck) throws NoTransitionException {
boolean autoEnableDisableKVMSetting = AgentManager.EnableKVMAutoEnableDisable.valueIn(host.getClusterId()) &&
host.getHypervisorType() == HypervisorType.KVM;
ResourceState.Event resourceEvent = getResourceEventFromAllocationStateString(allocationState);
DetailVO hostDetail = _hostDetailsDao.findDetail(host.getId(), ApiConstants.AUTO_ENABLE_KVM_HOST);
if ((host.getResourceState() == ResourceState.Enabled && resourceEvent == ResourceState.Event.Enable) ||
(host.getResourceState() == ResourceState.Disabled && resourceEvent == ResourceState.Event.Disable)) {
s_logger.info(String.format("The host %s is already on the allocated state", host.getName()));
return false;
}
if (isAutoEnableAttemptForADisabledHost(autoEnableDisableKVMSetting, isUpdateFromHostHealthCheck, hostDetail, resourceEvent)) {
s_logger.debug(String.format("The setting '%s' is enabled and the health check succeeds on the host, " +
"but the host has been manually disabled previously, ignoring auto enabling",
AgentManager.EnableKVMAutoEnableDisable.key()));
return false;
}
handleAutoEnableDisableKVMHost(autoEnableDisableKVMSetting, isUpdateFromHostHealthCheck, host,
hostDetail, resourceEvent);
resourceStateTransitTo(host, resourceEvent, _nodeId);
return true;
}
private boolean isAutoEnableAttemptForADisabledHost(boolean autoEnableDisableKVMSetting,
boolean isUpdateFromHostHealthCheck,
DetailVO hostDetail, ResourceState.Event resourceEvent) {
return autoEnableDisableKVMSetting && isUpdateFromHostHealthCheck && hostDetail != null &&
!Boolean.parseBoolean(hostDetail.getValue()) && resourceEvent == ResourceState.Event.Enable;
}
private void updateHostName(HostVO host, String name) {
s_logger.debug("Updating Host name to: " + name);
host.setName(name);
_hostDao.update(host.getId(), host);
}
private void updateHostGuestOSCategory(Long hostId, Long guestOSCategoryId) {
// Verify that the guest OS Category exists
if (!(guestOSCategoryId > 0) || _guestOSCategoryDao.findById(guestOSCategoryId) == null) {
throw new InvalidParameterValueException("Please specify a valid guest OS category.");
}
final GuestOSCategoryVO guestOSCategory = _guestOSCategoryDao.findById(guestOSCategoryId);
final DetailVO guestOSDetail = _hostDetailsDao.findDetail(hostId, "guest.os.category.id");
if (guestOSCategory != null && !GuestOSCategoryVO.CATEGORY_NONE.equalsIgnoreCase(guestOSCategory.getName())) {
// Create/Update an entry for guest.os.category.id
if (guestOSDetail != null) {
guestOSDetail.setValue(String.valueOf(guestOSCategory.getId()));
_hostDetailsDao.update(guestOSDetail.getId(), guestOSDetail);
} else {
final Map<String, String> detail = new HashMap<String, String>();
detail.put("guest.os.category.id", String.valueOf(guestOSCategory.getId()));
_hostDetailsDao.persist(hostId, detail);
}
} else {
// Delete any existing entry for guest.os.category.id
if (guestOSDetail != null) {
_hostDetailsDao.remove(guestOSDetail.getId());
}
}
}
private void updateHostTags(HostVO host, Long hostId, List<String> hostTags) {
List<VMInstanceVO> activeVMs = _vmDao.listByHostId(hostId);
s_logger.warn(String.format("The following active VMs [%s] are using the host [%s]. " +
"Updating the host tags will not affect them.", activeVMs, host));
if (s_logger.isDebugEnabled()) {
s_logger.debug("Updating Host Tags to :" + hostTags);
}
_hostTagsDao.persist(hostId, new ArrayList<>(new HashSet<>(hostTags)));
}
@Override
public Host updateHost(final UpdateHostCmd cmd) throws NoTransitionException {
Long hostId = cmd.getId();
String name = cmd.getName();
Long guestOSCategoryId = cmd.getOsCategoryId();
return updateHost(cmd.getId(), cmd.getName(), cmd.getOsCategoryId(),
cmd.getAllocationState(), cmd.getUrl(), cmd.getHostTags(), cmd.getAnnotation(), false);
}
private Host updateHost(Long hostId, String name, Long guestOSCategoryId, String allocationState,
String url, List<String> hostTags, String annotation, boolean isUpdateFromHostHealthCheck) throws NoTransitionException {
// Verify that the host exists
final HostVO host = _hostDao.findById(hostId);
if (host == null) {
throw new InvalidParameterValueException("Host with id " + hostId + " doesn't exist");
}
if (cmd.getAllocationState() != null) {
final ResourceState.Event resourceEvent = ResourceState.Event.toEvent(cmd.getAllocationState());
if (resourceEvent != ResourceState.Event.Enable && resourceEvent != ResourceState.Event.Disable) {
throw new CloudRuntimeException("Invalid allocation state:" + cmd.getAllocationState() + ", only Enable/Disable are allowed");
}
resourceStateTransitTo(host, resourceEvent, _nodeId);
boolean isUpdateHostAllocation = false;
if (StringUtils.isNotBlank(allocationState)) {
isUpdateHostAllocation = updateHostAllocationState(host, allocationState, isUpdateFromHostHealthCheck);
}
if (StringUtils.isNotBlank(name)) {
s_logger.debug("Updating Host name to: " + name);
host.setName(name);
_hostDao.update(host.getId(), host);
updateHostName(host, name);
}
if (guestOSCategoryId != null) {
// Verify that the guest OS Category exists
if (!(guestOSCategoryId > 0) || _guestOSCategoryDao.findById(guestOSCategoryId) == null) {
throw new InvalidParameterValueException("Please specify a valid guest OS category.");
}
final GuestOSCategoryVO guestOSCategory = _guestOSCategoryDao.findById(guestOSCategoryId);
final DetailVO guestOSDetail = _hostDetailsDao.findDetail(hostId, "guest.os.category.id");
if (guestOSCategory != null && !GuestOSCategoryVO.CATEGORY_NONE.equalsIgnoreCase(guestOSCategory.getName())) {
// Create/Update an entry for guest.os.category.id
if (guestOSDetail != null) {
guestOSDetail.setValue(String.valueOf(guestOSCategory.getId()));
_hostDetailsDao.update(guestOSDetail.getId(), guestOSDetail);
} else {
final Map<String, String> detail = new HashMap<String, String>();
detail.put("guest.os.category.id", String.valueOf(guestOSCategory.getId()));
_hostDetailsDao.persist(hostId, detail);
}
} else {
// Delete any existing entry for guest.os.category.id
if (guestOSDetail != null) {
_hostDetailsDao.remove(guestOSDetail.getId());
}
}
updateHostGuestOSCategory(hostId, guestOSCategoryId);
}
final List<String> hostTags = cmd.getHostTags();
if (hostTags != null) {
List<VMInstanceVO> activeVMs = _vmDao.listByHostId(hostId);
s_logger.warn(String.format("The following active VMs [%s] are using the host [%s]. Updating the host tags will not affect them.", activeVMs, host));
if (s_logger.isDebugEnabled()) {
s_logger.debug("Updating Host Tags to :" + hostTags);
}
_hostTagsDao.persist(hostId, new ArrayList(new HashSet<String>(hostTags)));
updateHostTags(host, hostId, hostTags);
}
final String url = cmd.getUrl();
if (url != null) {
_storageMgr.updateSecondaryStorage(cmd.getId(), cmd.getUrl());
_storageMgr.updateSecondaryStorage(hostId, url);
}
try {
_storageMgr.enableHost(hostId);
@ -1876,9 +1961,55 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager,
}
final HostVO updatedHost = _hostDao.findById(hostId);
sendAlertAndAnnotationForAutoEnableDisableKVMHostFeature(host, allocationState,
isUpdateFromHostHealthCheck, isUpdateHostAllocation, annotation);
return updatedHost;
}
private void sendAlertAndAnnotationForAutoEnableDisableKVMHostFeature(HostVO host, String allocationState,
boolean isUpdateFromHostHealthCheck,
boolean isUpdateHostAllocation, String annotation) {
boolean isAutoEnableDisableKVMSettingEnabled = host.getHypervisorType() == HypervisorType.KVM &&
AgentManager.EnableKVMAutoEnableDisable.valueIn(host.getClusterId());
if (!isAutoEnableDisableKVMSettingEnabled) {
if (StringUtils.isNotBlank(annotation)) {
annotationService.addAnnotation(annotation, AnnotationService.EntityType.HOST, host.getUuid(), true);
}
return;
}
if (!isUpdateHostAllocation) {
return;
}
String msg = String.format("The host %s (%s) ", host.getName(), host.getUuid());
ResourceState.Event resourceEvent = getResourceEventFromAllocationStateString(allocationState);
boolean isEventEnable = resourceEvent == ResourceState.Event.Enable;
if (isUpdateFromHostHealthCheck) {
msg += String.format("is auto-%s after %s health check results",
isEventEnable ? "enabled" : "disabled",
isEventEnable ? "successful" : "failed");
alertManager.sendAlert(AlertService.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(),
host.getPodId(), msg, msg);
} else {
msg += String.format("is %s despite the setting '%s' is enabled for the cluster %s",
isEventEnable ? "enabled" : "disabled", AgentManager.EnableKVMAutoEnableDisable.key(),
host.getClusterId());
if (StringUtils.isNotBlank(annotation)) {
msg += String.format(", reason: %s", annotation);
}
}
annotationService.addAnnotation(msg, AnnotationService.EntityType.HOST, host.getUuid(), true);
}
@Override
public Host autoUpdateHostAllocationState(Long hostId, ResourceState.Event resourceEvent) throws NoTransitionException {
return updateHost(hostId, null, null, resourceEvent.toString(), null, null, null, true);
}
@Override
public Cluster getCluster(final Long clusterId) {
return _clusterDao.findById(clusterId);

View File

@ -16,12 +16,7 @@
// under the License.
package com.cloud.server;
import static com.cloud.configuration.ConfigurationManagerImpl.VM_USERDATA_MAX_LENGTH;
import static com.cloud.vm.UserVmManager.MAX_USER_DATA_LENGTH_BYTES;
import java.io.UnsupportedEncodingException;
import java.lang.reflect.Field;
import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
@ -48,6 +43,7 @@ import javax.crypto.spec.SecretKeySpec;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.hypervisor.HypervisorGuru;
import org.apache.cloudstack.acl.ControlledEntity;
import org.apache.cloudstack.acl.SecurityChecker;
import org.apache.cloudstack.affinity.AffinityGroupProcessor;
@ -56,7 +52,6 @@ import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
import org.apache.cloudstack.api.ApiCommandResourceType;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.command.admin.account.CreateAccountCmd;
import org.apache.cloudstack.api.command.admin.account.DeleteAccountCmd;
import org.apache.cloudstack.api.command.admin.account.DisableAccountCmd;
@ -610,6 +605,7 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
import org.apache.cloudstack.userdata.UserDataManager;
import org.apache.cloudstack.utils.CloudStackVersion;
import org.apache.cloudstack.utils.identity.ManagementServerNode;
import org.apache.commons.codec.binary.Base64;
@ -691,7 +687,6 @@ import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.host.dao.HostDetailsDao;
import com.cloud.host.dao.HostTagsDao;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.hypervisor.HypervisorCapabilities;
import com.cloud.hypervisor.HypervisorCapabilitiesVO;
@ -821,10 +816,6 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
static final ConfigKey<Boolean> humanReadableSizes = new ConfigKey<Boolean>("Advanced", Boolean.class, "display.human.readable.sizes", "true", "Enables outputting human readable byte sizes to logs and usage records.", false, ConfigKey.Scope.Global);
public static final ConfigKey<String> customCsIdentifier = new ConfigKey<String>("Advanced", String.class, "custom.cs.identifier", UUID.randomUUID().toString().split("-")[0].substring(4), "Custom identifier for the cloudstack installation", true, ConfigKey.Scope.Global);
private static final VirtualMachine.Type []systemVmTypes = { VirtualMachine.Type.SecondaryStorageVm, VirtualMachine.Type.ConsoleProxy};
private static final int MAX_HTTP_GET_LENGTH = 2 * MAX_USER_DATA_LENGTH_BYTES;
private static final int NUM_OF_2K_BLOCKS = 512;
private static final int MAX_HTTP_POST_LENGTH = NUM_OF_2K_BLOCKS * MAX_USER_DATA_LENGTH_BYTES;
private static final List<HypervisorType> LIVE_MIGRATION_SUPPORTING_HYPERVISORS = List.of(HypervisorType.Hyperv, HypervisorType.KVM,
HypervisorType.LXC, HypervisorType.Ovm, HypervisorType.Ovm3, HypervisorType.Simulator, HypervisorType.VMware, HypervisorType.XenServer);
@ -976,6 +967,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
protected VMTemplateDao templateDao;
@Inject
protected AnnotationDao annotationDao;
@Inject
UserDataManager userDataManager;
private LockControllerListener _lockControllerListener;
private final ScheduledExecutorService _eventExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("EventChecker"));
@ -993,7 +986,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
protected List<DeploymentPlanner> _planners;
private final List<HypervisorType> supportedHypervisors = new ArrayList<Hypervisor.HypervisorType>();
private final List<HypervisorType> supportedHypervisors = new ArrayList<HypervisorType>();
public List<DeploymentPlanner> getPlanners() {
return _planners;
@ -1266,7 +1259,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
}
if (hypervisorType != null) {
sc.setParameters("hypervisorType", hypervisorType);
String hypervisorStr = (String) hypervisorType;
String hypervisorSearch = HypervisorType.getType(hypervisorStr).toString();
sc.setParameters("hypervisorType", hypervisorSearch);
}
if (clusterType != null) {
@ -4245,6 +4240,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
capabilities.put("allowUserViewAllDomainAccounts", allowUserViewAllDomainAccounts);
capabilities.put("kubernetesServiceEnabled", kubernetesServiceEnabled);
capabilities.put("kubernetesClusterExperimentalFeaturesEnabled", kubernetesClusterExperimentalFeaturesEnabled);
capabilities.put("customHypervisorDisplayName", HypervisorGuru.HypervisorCustomDisplayName.value());
capabilities.put(ApiServiceConfiguration.DefaultUIPageSize.key(), ApiServiceConfiguration.DefaultUIPageSize.value());
capabilities.put(ApiConstants.INSTANCES_STATS_RETENTION_TIME, StatsCollector.vmStatsMaxRetentionTime.value());
capabilities.put(ApiConstants.INSTANCES_STATS_USER_ONLY, StatsCollector.vmStatsCollectUserVMOnly.value());
@ -4376,7 +4372,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
} else {
final List<ClusterVO> clustersForZone = _clusterDao.listByZoneId(zoneId);
for (final ClusterVO cluster : clustersForZone) {
result.add(cluster.getHypervisorType().toString());
result.add(cluster.getHypervisorType().getHypervisorDisplayName());
}
}
@ -4609,58 +4605,11 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
String userdata = cmd.getUserData();
final String params = cmd.getParams();
userdata = validateUserData(userdata, cmd.getHttpMethod());
userdata = userDataManager.validateUserData(userdata, cmd.getHttpMethod());
return createAndSaveUserData(name, userdata, params, owner);
}
private String validateUserData(String userData, BaseCmd.HTTPMethod httpmethod) {
byte[] decodedUserData = null;
if (userData != null) {
if (userData.contains("%")) {
try {
userData = URLDecoder.decode(userData, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new InvalidParameterValueException("Url decoding of userdata failed.");
}
}
if (!Base64.isBase64(userData)) {
throw new InvalidParameterValueException("User data is not base64 encoded");
}
// If GET, use 4K. If POST, support up to 1M.
if (httpmethod.equals(BaseCmd.HTTPMethod.GET)) {
decodedUserData = validateAndDecodeByHTTPmethod(userData, MAX_HTTP_GET_LENGTH, BaseCmd.HTTPMethod.GET);
} else if (httpmethod.equals(BaseCmd.HTTPMethod.POST)) {
decodedUserData = validateAndDecodeByHTTPmethod(userData, MAX_HTTP_POST_LENGTH, BaseCmd.HTTPMethod.POST);
}
if (decodedUserData == null || decodedUserData.length < 1) {
throw new InvalidParameterValueException("User data is too short");
}
// Re-encode so that the '=' paddings are added if necessary since 'isBase64' does not require it, but python does on the VR.
return Base64.encodeBase64String(decodedUserData);
}
return null;
}
private byte[] validateAndDecodeByHTTPmethod(String userData, int maxHTTPlength, BaseCmd.HTTPMethod httpMethod) {
byte[] decodedUserData = null;
if (userData.length() >= maxHTTPlength) {
throw new InvalidParameterValueException(String.format("User data is too long for an http %s request", httpMethod.toString()));
}
if (userData.length() > VM_USERDATA_MAX_LENGTH.value()) {
throw new InvalidParameterValueException("User data has exceeded configurable max length : " + VM_USERDATA_MAX_LENGTH.value());
}
decodedUserData = Base64.decodeBase64(userData.getBytes());
if (decodedUserData.length > maxHTTPlength) {
throw new InvalidParameterValueException(String.format("User data is too long for http %s request", httpMethod.toString()));
}
return decodedUserData;
}
/**
* @param cmd
* @param owner

View File

@ -39,6 +39,7 @@ import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd;
import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd;
import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd;
import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd;
import org.apache.cloudstack.direct.download.DirectDownloadManager;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
@ -148,26 +149,30 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
}
/**
* Validate on random running KVM host that URL is reachable
* Validate on random running host that URL is reachable
* @param url url
*/
private Long performDirectDownloadUrlValidation(final String format, final String url, final List<Long> zoneIds) {
private Long performDirectDownloadUrlValidation(final String format, final Hypervisor.HypervisorType hypervisor,
final String url, final List<Long> zoneIds) {
HostVO host = null;
if (zoneIds != null && !zoneIds.isEmpty()) {
for (Long zoneId : zoneIds) {
host = resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, zoneId);
host = resourceManager.findOneRandomRunningHostByHypervisor(hypervisor, zoneId);
if (host != null) {
break;
}
}
} else {
host = resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, null);
host = resourceManager.findOneRandomRunningHostByHypervisor(hypervisor, null);
}
if (host == null) {
throw new CloudRuntimeException("Couldn't find a host to validate URL " + url);
}
CheckUrlCommand cmd = new CheckUrlCommand(format, url);
Integer socketTimeout = DirectDownloadManager.DirectDownloadSocketTimeout.value();
Integer connectRequestTimeout = DirectDownloadManager.DirectDownloadConnectionRequestTimeout.value();
Integer connectTimeout = DirectDownloadManager.DirectDownloadConnectTimeout.value();
CheckUrlCommand cmd = new CheckUrlCommand(format, url, connectTimeout, connectRequestTimeout, socketTimeout);
s_logger.debug("Performing URL " + url + " validation on host " + host.getId());
Answer answer = _agentMgr.easySend(host.getId(), cmd);
if (answer == null || !answer.getResult()) {
@ -198,7 +203,8 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
zoneIds = new ArrayList<>();
zoneIds.add(cmd.getZoneId());
}
Long templateSize = performDirectDownloadUrlValidation(ImageFormat.ISO.getFileExtension(), url, zoneIds);
Long templateSize = performDirectDownloadUrlValidation(ImageFormat.ISO.getFileExtension(),
Hypervisor.HypervisorType.KVM, url, zoneIds);
profile.setSize(templateSize);
}
profile.setUrl(url);
@ -221,9 +227,11 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase {
TemplateProfile profile = super.prepare(cmd);
String url = profile.getUrl();
UriUtils.validateUrl(cmd.getFormat(), url, cmd.isDirectDownload());
Hypervisor.HypervisorType hypervisor = Hypervisor.HypervisorType.getType(cmd.getHypervisor());
if (cmd.isDirectDownload()) {
DigestHelper.validateChecksumString(cmd.getChecksum());
Long templateSize = performDirectDownloadUrlValidation(cmd.getFormat(), url, cmd.getZoneIds());
Long templateSize = performDirectDownloadUrlValidation(cmd.getFormat(),
hypervisor, url, cmd.getZoneIds());
profile.setSize(templateSize);
}
profile.setUrl(url);

View File

@ -309,8 +309,12 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager,
if (type == HypervisorType.BareMetal) {
adapter = AdapterBase.getAdapterByName(_adapters, TemplateAdapterType.BareMetal.getName());
} else {
// see HypervisorTemplateAdapter
adapter = AdapterBase.getAdapterByName(_adapters, TemplateAdapterType.Hypervisor.getName());
// Get template adapter according to hypervisor
adapter = AdapterBase.getAdapterByName(_adapters, type.name());
// Otherwise, default to generic hypervisor template adapter
if (adapter == null) {
adapter = AdapterBase.getAdapterByName(_adapters, TemplateAdapterType.Hypervisor.getName());
}
}
if (adapter == null) {

View File

@ -1812,15 +1812,37 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M
// If the user is a System user, return an error. We do not allow this
AccountVO account = _accountDao.findById(accountId);
if (account == null || account.getRemoved() != null) {
if (account != null) {
s_logger.info("The account:" + account.getAccountName() + " is already removed");
}
if (! isDeleteNeeded(account, accountId, caller)) {
return true;
}
// Account that manages project(s) can't be removed
List<Long> managedProjectIds = _projectAccountDao.listAdministratedProjectIds(accountId);
if (!managedProjectIds.isEmpty()) {
StringBuilder projectIds = new StringBuilder();
for (Long projectId : managedProjectIds) {
projectIds.append(projectId).append(", ");
}
throw new InvalidParameterValueException("The account id=" + accountId + " manages project(s) with ids " + projectIds + "and can't be removed");
}
CallContext.current().putContextParameter(Account.class, account.getUuid());
return deleteAccount(account, callerUserId, caller);
}
private boolean isDeleteNeeded(AccountVO account, long accountId, Account caller) {
if (account == null) {
s_logger.info(String.format("The account, identified by id %d, doesn't exist", accountId ));
return false;
}
if (account.getRemoved() != null) {
s_logger.info("The account:" + account.getAccountName() + " is already removed");
return false;
}
// don't allow removing Project account
if (account == null || account.getType() == Account.Type.PROJECT) {
if (account.getType() == Account.Type.PROJECT) {
throw new InvalidParameterValueException("The specified account does not exist in the system");
}
@ -1830,21 +1852,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M
if (account.isDefault()) {
throw new InvalidParameterValueException("The account is default and can't be removed");
}
// Account that manages project(s) can't be removed
List<Long> managedProjectIds = _projectAccountDao.listAdministratedProjectIds(accountId);
if (!managedProjectIds.isEmpty()) {
StringBuilder projectIds = new StringBuilder();
for (Long projectId : managedProjectIds) {
projectIds.append(projectId + ", ");
}
throw new InvalidParameterValueException("The account id=" + accountId + " manages project(s) with ids " + projectIds + "and can't be removed");
}
CallContext.current().putContextParameter(Account.class, account.getUuid());
return deleteAccount(account, callerUserId, caller);
return true;
}
@Override
@ -3251,7 +3259,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M
_userDetailsDao.update(userDetailVO.getId(), userDetailVO);
}
} catch (CloudTwoFactorAuthenticationException e) {
UserDetailVO userDetailVO = _userDetailsDao.findDetail(userAccountId, "2FAsetupComplete");
UserDetailVO userDetailVO = _userDetailsDao.findDetail(userAccountId, UserDetailVO.Setup2FADetail);
if (userDetailVO != null && userDetailVO.getValue().equals(UserAccountVO.Setup2FAstatus.ENABLED.name())) {
disableTwoFactorAuthentication(userAccountId, caller, owner);
}

View File

@ -344,16 +344,8 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom
@Override
public boolean deleteDomain(DomainVO domain, Boolean cleanup) {
GlobalLock lock = getGlobalLock("AccountCleanup");
if (lock == null) {
s_logger.debug("Couldn't get the global lock");
return false;
}
if (!lock.lock(30)) {
s_logger.debug("Couldn't lock the db");
return false;
}
GlobalLock lock = getGlobalLock();
if (lock == null) return false;
try {
// mark domain as inactive
@ -361,42 +353,60 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom
domain.setState(Domain.State.Inactive);
_domainDao.update(domain.getId(), domain);
try {
long ownerId = domain.getAccountId();
if (BooleanUtils.toBoolean(cleanup)) {
tryCleanupDomain(domain, ownerId);
} else {
removeDomainWithNoAccountsForCleanupNetworksOrDedicatedResources(domain);
}
if (!_configMgr.releaseDomainSpecificVirtualRanges(domain.getId())) {
CloudRuntimeException e = new CloudRuntimeException("Can't delete the domain yet because failed to release domain specific virtual ip ranges");
e.addProxyObject(domain.getUuid(), "domainId");
throw e;
} else {
s_logger.debug("Domain specific Virtual IP ranges " + " are successfully released as a part of domain id=" + domain.getId() + " cleanup.");
}
cleanupDomainDetails(domain.getId());
cleanupDomainOfferings(domain.getId());
annotationDao.removeByEntityType(AnnotationService.EntityType.DOMAIN.name(), domain.getUuid());
CallContext.current().putContextParameter(Domain.class, domain.getUuid());
return true;
} catch (Exception ex) {
s_logger.error("Exception deleting domain with id " + domain.getId(), ex);
if (ex instanceof CloudRuntimeException) {
rollbackDomainState(domain);
throw (CloudRuntimeException)ex;
}
else
return false;
}
return cleanDomain(domain, cleanup);
}
finally {
lock.unlock();
}
}
private GlobalLock getGlobalLock() {
GlobalLock lock = getGlobalLock("DomainCleanup");
if (lock == null) {
s_logger.debug("Couldn't get the global lock");
return null;
}
if (!lock.lock(30)) {
s_logger.debug("Couldn't lock the db");
return null;
}
return lock;
}
private boolean cleanDomain(DomainVO domain, Boolean cleanup) {
try {
long ownerId = domain.getAccountId();
if (BooleanUtils.toBoolean(cleanup)) {
tryCleanupDomain(domain, ownerId);
} else {
removeDomainWithNoAccountsForCleanupNetworksOrDedicatedResources(domain);
}
if (!_configMgr.releaseDomainSpecificVirtualRanges(domain.getId())) {
CloudRuntimeException e = new CloudRuntimeException("Can't delete the domain yet because failed to release domain specific virtual ip ranges");
e.addProxyObject(domain.getUuid(), "domainId");
throw e;
} else {
s_logger.debug("Domain specific Virtual IP ranges " + " are successfully released as a part of domain id=" + domain.getId() + " cleanup.");
}
cleanupDomainDetails(domain.getId());
cleanupDomainOfferings(domain.getId());
annotationDao.removeByEntityType(AnnotationService.EntityType.DOMAIN.name(), domain.getUuid());
CallContext.current().putContextParameter(Domain.class, domain.getUuid());
return true;
} catch (Exception ex) {
s_logger.error("Exception deleting domain with id " + domain.getId(), ex);
if (ex instanceof CloudRuntimeException) {
rollbackDomainState(domain);
throw (CloudRuntimeException)ex;
}
else
return false;
}
}
/**
* Roll back domain state to Active
* @param domain domain

View File

@ -58,8 +58,6 @@ public interface UserVmManager extends UserVmService {
"Destroys the VM's root volume when the VM is destroyed.",
true, ConfigKey.Scope.Domain);
static final int MAX_USER_DATA_LENGTH_BYTES = 2048;
public static final String CKS_NODE = "cksnode";
/**

View File

@ -123,6 +123,7 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
import org.apache.cloudstack.userdata.UserDataManager;
import org.apache.cloudstack.utils.bytescale.ByteScaleUtils;
import org.apache.cloudstack.utils.security.ParserUtils;
import org.apache.commons.codec.binary.Base64;
@ -598,6 +599,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
protected static long ROOT_DEVICE_ID = 0;
private static final int MAX_USER_DATA_LENGTH_BYTES = 2048;
private static final int MAX_HTTP_GET_LENGTH = 2 * MAX_USER_DATA_LENGTH_BYTES;
private static final int NUM_OF_2K_BLOCKS = 512;
private static final int MAX_HTTP_POST_LENGTH = NUM_OF_2K_BLOCKS * MAX_USER_DATA_LENGTH_BYTES;
@ -611,6 +613,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
@Inject
private ManagementService _mgr;
@Inject
private UserDataManager userDataManager;
private static final ConfigKey<Integer> VmIpFetchWaitInterval = new ConfigKey<Integer>("Advanced", Integer.class, "externaldhcp.vmip.retrieval.interval", "180",
"Wait Interval (in seconds) for shared network vm dhcp ip addr fetch for next iteration ", true);
@ -648,6 +653,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
HypervisorType.Simulator
));
protected static final List<HypervisorType> ROOT_DISK_SIZE_OVERRIDE_SUPPORTING_HYPERVISORS = Arrays.asList(
HypervisorType.KVM,
HypervisorType.XenServer,
HypervisorType.VMware,
HypervisorType.Simulator,
HypervisorType.Custom
);
private static final List<HypervisorType> HYPERVISORS_THAT_CAN_DO_STORAGE_MIGRATION_ON_NON_USER_VMS = Arrays.asList(HypervisorType.KVM, HypervisorType.VMware);
@Override
@ -939,7 +952,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
userDataDetails = cmd.getUserdataDetails().toString();
}
userData = finalizeUserData(userData, userDataId, template);
userData = validateUserData(userData, cmd.getHttpMethod());
userData = userDataManager.validateUserData(userData, cmd.getHttpMethod());
userVm.setUserDataId(userDataId);
userVm.setUserData(userData);
@ -2093,6 +2106,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
for (final VolumeVO rootVolumeOfVm : vols) {
DiskOfferingVO currentRootDiskOffering = _diskOfferingDao.findById(rootVolumeOfVm.getDiskOfferingId());
if (currentRootDiskOffering.getDiskSize() == 0 && newDiskOffering.getDiskSize() == 0) {
s_logger.debug("This change of service offering doesn't involve custom root disk sizes, skipping volume resize for volume: " + rootVolumeOfVm);
continue;
}
Long rootDiskSize= null;
Long rootDiskSizeBytes = null;
if (customParameters.containsKey(ApiConstants.ROOT_DISK_SIZE)) {
@ -2957,7 +2976,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
if (userData != null) {
// check and replace newlines
userData = userData.replace("\\n", "");
userData = validateUserData(userData, httpMethod);
userData = userDataManager.validateUserData(userData, httpMethod);
// update userData on domain router.
updateUserdata = true;
} else {
@ -4091,7 +4110,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
_accountMgr.checkAccess(owner, AccessType.UseEntry, false, template);
// check if the user data is correct
userData = validateUserData(userData, httpmethod);
userData = userDataManager.validateUserData(userData, httpmethod);
// Find an SSH public key corresponding to the key pair name, if one is
// given
@ -4354,7 +4373,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
* @throws InvalidParameterValueException if the hypervisor does not support rootdisksize override
*/
protected void verifyIfHypervisorSupportsRootdiskSizeOverride(HypervisorType hypervisorType) {
if (!(hypervisorType == HypervisorType.KVM || hypervisorType == HypervisorType.XenServer || hypervisorType == HypervisorType.VMware || hypervisorType == HypervisorType.Simulator)) {
if (!ROOT_DISK_SIZE_OVERRIDE_SUPPORTING_HYPERVISORS.contains(hypervisorType)) {
throw new InvalidParameterValueException("Hypervisor " + hypervisorType + " does not support rootdisksize override");
}
}
@ -5070,6 +5089,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
Answer startAnswer = cmds.getAnswer(StartAnswer.class);
String returnedIp = null;
String originalIp = null;
String originalVncPassword = profile.getVirtualMachine().getVncPassword();
String returnedVncPassword = null;
if (startAnswer != null) {
StartAnswer startAns = (StartAnswer)startAnswer;
VirtualMachineTO vmTO = startAns.getVirtualMachine();
@ -5078,6 +5099,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
returnedIp = nicTO.getIp();
}
}
returnedVncPassword = vmTO.getVncPassword();
}
List<NicVO> nics = _nicDao.listByVmId(vm.getId());
@ -5129,6 +5151,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
}
}
updateVncPasswordIfItHasChanged(originalVncPassword, returnedVncPassword, profile);
// get system ip and create static nat rule for the vm
try {
_rulesMgr.getSystemIpAndEnableStaticNatForVm(profile.getVirtualMachine(), false);
@ -5163,6 +5187,14 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
return true;
}
protected void updateVncPasswordIfItHasChanged(String originalVncPassword, String returnedVncPassword, VirtualMachineProfile profile) {
if (returnedVncPassword != null && !originalVncPassword.equals(returnedVncPassword)) {
UserVmVO userVm = _vmDao.findById(profile.getId());
userVm.setVncPassword(returnedVncPassword);
_vmDao.update(userVm.getId(), userVm);
}
}
@Override
public void finalizeExpunge(VirtualMachine vm) {
}
@ -5760,9 +5792,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
}
if (userDataId != null) {
UserData apiUserDataVO = userDataDao.findById(userDataId);
return doConcateUserDatas(templateUserDataVO.getUserData(), apiUserDataVO.getUserData());
return userDataManager.concatenateUserData(templateUserDataVO.getUserData(), apiUserDataVO.getUserData(), null);
} else if (StringUtils.isNotEmpty(userData)) {
return doConcateUserDatas(templateUserDataVO.getUserData(), userData);
return userDataManager.concatenateUserData(templateUserDataVO.getUserData(), userData, null);
} else {
return templateUserDataVO.getUserData();
}
@ -5780,16 +5812,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
return null;
}
private String doConcateUserDatas(String userdata1, String userdata2) {
byte[] userdata1Bytes = Base64.decodeBase64(userdata1.getBytes());
byte[] userdata2Bytes = Base64.decodeBase64(userdata2.getBytes());
byte[] finalUserDataBytes = new byte[userdata1Bytes.length + userdata2Bytes.length];
System.arraycopy(userdata1Bytes, 0, finalUserDataBytes, 0, userdata1Bytes.length);
System.arraycopy(userdata2Bytes, 0, finalUserDataBytes, userdata1Bytes.length, userdata2Bytes.length);
return Base64.encodeBase64String(finalUserDataBytes);
}
@Override
public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException,
StorageUnavailableException, ResourceAllocationException {
@ -5885,6 +5907,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
}
String userData = cmd.getUserData();
userData = userDataManager.validateUserData(userData, cmd.getHttpMethod());
Long userDataId = cmd.getUserdataId();
String userDataDetails = null;
if (MapUtils.isNotEmpty(cmd.getUserdataDetails())) {

View File

@ -42,6 +42,7 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.user.Account;
import com.cloud.utils.Pair;
import org.apache.cloudstack.agent.directdownload.DirectDownloadAnswer;
import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand;
@ -329,6 +330,8 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown
Long[] hostsToRetry = getHostsToRetryOn(host, storagePoolVO);
int hostIndex = 0;
Answer answer = null;
String answerDetails = "";
String errorDetails = "";
Long hostToSendDownloadCmd = hostsToRetry[hostIndex];
boolean continueRetrying = true;
while (!downloaded && retry > 0 && continueRetrying) {
@ -349,6 +352,7 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown
if (answer != null) {
DirectDownloadAnswer ans = (DirectDownloadAnswer)answer;
downloaded = answer.getResult();
answerDetails = answer.getDetails();
continueRetrying = ans.isRetryOnOtherHosts();
}
hostToSendDownloadCmd = hostsToRetry[(hostIndex + 1) % hostsToRetry.length];
@ -362,7 +366,13 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown
}
if (!downloaded) {
logUsageEvent(template, poolId);
throw new CloudRuntimeException("Template " + template.getId() + " could not be downloaded on pool " + poolId + ", failing after trying on several hosts");
if (!answerDetails.isEmpty()){
Account caller = CallContext.current().getCallingAccount();
if (caller != null && caller.getType() == Account.Type.ADMIN){
errorDetails = String.format(" Details: %s", answerDetails);
}
}
throw new CloudRuntimeException(String.format("Template %d could not be downloaded on pool %d, failing after trying on several hosts%s", template.getId(), poolId, errorDetails));
}
return answer;
}

View File

@ -38,6 +38,11 @@
<property name="name" value="Lxc Discover" />
</bean>
<bean id="CustomServerDiscoverer"
class="com.cloud.hypervisor.discoverer.CustomServerDiscoverer">
<property name="name" value="CustomHW Agent" />
</bean>
<bean id="dummyHostDiscoverer" class="com.cloud.resource.DummyHostDiscoverer">
<property name="name" value="dummyHostDiscoverer" />
</bean>

View File

@ -73,6 +73,11 @@ public class MockResourceManagerImpl extends ManagerBase implements ResourceMana
return null;
}
@Override
public Host autoUpdateHostAllocationState(Long hostId, ResourceState.Event resourceEvent) throws NoTransitionException {
return null;
}
/* (non-Javadoc)
* @see com.cloud.resource.ResourceService#cancelMaintenance(com.cloud.api.commands.CancelMaintenanceCmd)
*/

View File

@ -22,6 +22,35 @@ import static org.mockito.Mockito.any;
import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.List;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.command.user.address.ListPublicIpAddressesCmd;
import org.apache.cloudstack.api.command.user.ssh.RegisterSSHKeyPairCmd;
import org.apache.cloudstack.api.command.user.userdata.DeleteUserDataCmd;
import org.apache.cloudstack.api.command.user.userdata.ListUserDataCmd;
import org.apache.cloudstack.api.command.user.userdata.RegisterUserDataCmd;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.userdata.UserDataManager;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
import org.mockito.Spy;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.powermock.reflect.Whitebox;
import org.springframework.test.util.ReflectionTestUtils;
import com.cloud.dc.Vlan.VlanType;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.DetailVO;
@ -49,37 +78,8 @@ import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.UserVmDetailVO;
import com.cloud.vm.UserVmVO;
import com.cloud.vm.dao.UserVmDetailsDao;
import com.cloud.vm.dao.UserVmDao;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseCmd;
import org.apache.cloudstack.api.command.user.address.ListPublicIpAddressesCmd;
import org.apache.cloudstack.api.command.user.ssh.RegisterSSHKeyPairCmd;
import org.apache.cloudstack.api.command.user.userdata.DeleteUserDataCmd;
import org.apache.cloudstack.api.command.user.userdata.ListUserDataCmd;
import org.apache.cloudstack.api.command.user.userdata.RegisterUserDataCmd;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
import org.mockito.Spy;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.powermock.reflect.Whitebox;
import org.springframework.test.util.ReflectionTestUtils;
import java.util.ArrayList;
import java.util.List;
import com.cloud.vm.dao.UserVmDetailsDao;
@RunWith(PowerMockRunner.class)
@PrepareForTest(CallContext.class)
@ -121,6 +121,9 @@ public class ManagementServerImplTest {
@Mock
UserVmDao _userVmDao;
@Mock
UserDataManager userDataManager;
@Spy
ManagementServerImpl spy = new ManagementServerImpl();
@ -145,6 +148,7 @@ public class ManagementServerImplTest {
spy.annotationDao = annotationDao;
spy._UserVmDetailsDao = userVmDetailsDao;
spy._detailsDao = hostDetailsDao;
spy.userDataManager = userDataManager;
}
@After
@ -304,13 +308,15 @@ public class ManagementServerImplTest {
when(callContextMock.getCallingAccount()).thenReturn(account);
when(_accountMgr.finalizeOwner(nullable(Account.class), nullable(String.class), nullable(Long.class), nullable(Long.class))).thenReturn(account);
String testUserData = "testUserdata";
RegisterUserDataCmd cmd = Mockito.mock(RegisterUserDataCmd.class);
when(cmd.getUserData()).thenReturn("testUserdata");
when(cmd.getUserData()).thenReturn(testUserData);
when(cmd.getName()).thenReturn("testName");
when(cmd.getHttpMethod()).thenReturn(BaseCmd.HTTPMethod.GET);
when(_userDataDao.findByName(account.getAccountId(), account.getDomainId(), "testName")).thenReturn(null);
when(_userDataDao.findByUserData(account.getAccountId(), account.getDomainId(), "testUserdata")).thenReturn(null);
when(_userDataDao.findByUserData(account.getAccountId(), account.getDomainId(), testUserData)).thenReturn(null);
when(userDataManager.validateUserData(testUserData,BaseCmd.HTTPMethod.GET)).thenReturn(testUserData);
UserData userData = spy.registerUserData(cmd);
Assert.assertEquals("testName", userData.getName());

View File

@ -136,7 +136,7 @@ public class DomainManagerImplTest {
public void setup() throws NoSuchFieldException, SecurityException,
IllegalArgumentException, IllegalAccessException {
Mockito.doReturn(adminAccount).when(domainManager).getCaller();
Mockito.doReturn(lock).when(domainManager).getGlobalLock("AccountCleanup");
Mockito.doReturn(lock).when(domainManager).getGlobalLock("DomainCleanup");
Mockito.when(lock.lock(Mockito.anyInt())).thenReturn(true);
Mockito.when(domainDaoMock.findById(DOMAIN_ID)).thenReturn(domain);
Mockito.when(domain.getAccountId()).thenReturn(ACCOUNT_ID);

View File

@ -50,6 +50,7 @@ import org.apache.cloudstack.api.command.user.vm.UpdateVMCmd;
import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
import org.apache.cloudstack.userdata.UserDataManager;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@ -193,6 +194,12 @@ public class UserVmManagerImplTest {
@Mock
private ServiceOfferingVO serviceOffering;
@Mock
VirtualMachineProfile virtualMachineProfile;
@Mock
UserDataManager userDataManager;
private static final long vmId = 1l;
private static final long zoneId = 2L;
private static final long accountId = 3L;
@ -220,6 +227,8 @@ public class UserVmManagerImplTest {
customParameters.put(VmDetailConstants.ROOT_DISK_SIZE, "123");
lenient().doNothing().when(resourceLimitMgr).incrementResourceCount(anyLong(), any(Resource.ResourceType.class));
lenient().doNothing().when(resourceLimitMgr).decrementResourceCount(anyLong(), any(Resource.ResourceType.class), anyLong());
Mockito.when(virtualMachineProfile.getId()).thenReturn(vmId);
}
@After
@ -548,13 +557,10 @@ public class UserVmManagerImplTest {
public void verifyIfHypervisorSupportRootdiskSizeOverrideTest() {
Hypervisor.HypervisorType[] hypervisorTypeArray = Hypervisor.HypervisorType.values();
int exceptionCounter = 0;
int expectedExceptionCounter = hypervisorTypeArray.length - 4;
int expectedExceptionCounter = hypervisorTypeArray.length - 5;
for(int i = 0; i < hypervisorTypeArray.length; i++) {
if (Hypervisor.HypervisorType.KVM == hypervisorTypeArray[i]
|| Hypervisor.HypervisorType.XenServer == hypervisorTypeArray[i]
|| Hypervisor.HypervisorType.VMware == hypervisorTypeArray[i]
|| Hypervisor.HypervisorType.Simulator == hypervisorTypeArray[i]) {
if (UserVmManagerImpl.ROOT_DISK_SIZE_OVERRIDE_SUPPORTING_HYPERVISORS.contains(hypervisorTypeArray[i])) {
userVmManagerImpl.verifyIfHypervisorSupportsRootdiskSizeOverride(hypervisorTypeArray[i]);
} else {
try {
@ -704,29 +710,6 @@ public class UserVmManagerImplTest {
Assert.assertEquals(finalUserdata, templateUserData);
}
@Test
public void testUserDataAppend() {
String userData = "testUserdata";
String templateUserData = "testTemplateUserdata";
Long userDataId = 1L;
VirtualMachineTemplate template = Mockito.mock(VirtualMachineTemplate.class);
when(template.getUserDataId()).thenReturn(2L);
when(template.getUserDataOverridePolicy()).thenReturn(UserData.UserDataOverridePolicy.APPEND);
UserDataVO templateUserDataVO = Mockito.mock(UserDataVO.class);
doReturn(templateUserDataVO).when(userDataDao).findById(2L);
when(templateUserDataVO.getUserData()).thenReturn(templateUserData);
UserDataVO apiUserDataVO = Mockito.mock(UserDataVO.class);
doReturn(apiUserDataVO).when(userDataDao).findById(userDataId);
when(apiUserDataVO.getUserData()).thenReturn(userData);
String finalUserdata = userVmManagerImpl.finalizeUserData(null, userDataId, template);
Assert.assertEquals(finalUserdata, templateUserData+userData);
}
@Test
public void testUserDataWithoutTemplate() {
String userData = "testUserdata";
@ -846,10 +829,13 @@ public class UserVmManagerImplTest {
when(templateDao.findByIdIncludingRemoved(2L)).thenReturn(template);
when(template.getUserDataId()).thenReturn(null);
when(cmd.getUserData()).thenReturn("testUserdata");
String testUserData = "testUserdata";
when(cmd.getUserData()).thenReturn(testUserData);
when(cmd.getUserdataId()).thenReturn(null);
when(cmd.getHttpMethod()).thenReturn(HTTPMethod.GET);
when(userDataManager.validateUserData(testUserData, HTTPMethod.GET)).thenReturn(testUserData);
try {
doNothing().when(userVmManagerImpl).updateUserData(userVmVO);
userVmManagerImpl.resetVMUserData(cmd);
@ -883,12 +869,15 @@ public class UserVmManagerImplTest {
when(templateDao.findByIdIncludingRemoved(2L)).thenReturn(template);
when(template.getUserDataId()).thenReturn(null);
String testUserData = "testUserdata";
when(cmd.getUserdataId()).thenReturn(1L);
UserDataVO apiUserDataVO = Mockito.mock(UserDataVO.class);
when(userDataDao.findById(1L)).thenReturn(apiUserDataVO);
when(apiUserDataVO.getUserData()).thenReturn("testUserdata");
when(apiUserDataVO.getUserData()).thenReturn(testUserData);
when(cmd.getHttpMethod()).thenReturn(HTTPMethod.GET);
when(userDataManager.validateUserData(testUserData, HTTPMethod.GET)).thenReturn(testUserData);
try {
doNothing().when(userVmManagerImpl).updateUserData(userVmVO);
userVmManagerImpl.resetVMUserData(cmd);
@ -927,4 +916,21 @@ public class UserVmManagerImplTest {
userVmManagerImpl.createVirtualMachine(deployVMCmd);
}
@Test
public void testUpdateVncPasswordIfItHasChanged() {
String vncPassword = "12345678";
userVmManagerImpl.updateVncPasswordIfItHasChanged(vncPassword, vncPassword, virtualMachineProfile);
Mockito.verify(userVmDao, Mockito.never()).update(vmId, userVmVoMock);
}
@Test
public void testUpdateVncPasswordIfItHasChangedNewPassword() {
String vncPassword = "12345678";
String newPassword = "87654321";
Mockito.when(userVmVoMock.getId()).thenReturn(vmId);
userVmManagerImpl.updateVncPasswordIfItHasChanged(vncPassword, newPassword, virtualMachineProfile);
Mockito.verify(userVmDao).findById(vmId);
Mockito.verify(userVmDao).update(vmId, userVmVoMock);
}
}

Some files were not shown because too many files have changed in this diff Show More