mirror of https://github.com/apache/cloudstack.git
Merge branch '4.20' into 4.20-fiberchannel-patches
This commit is contained in:
commit
86eff0e092
|
|
@ -56,6 +56,7 @@ jobs:
|
|||
npm run test:unit
|
||||
|
||||
- uses: codecov/codecov-action@v4
|
||||
if: github.repository == 'apache/cloudstack'
|
||||
with:
|
||||
working-directory: ui
|
||||
files: ./coverage/lcov.info
|
||||
|
|
|
|||
|
|
@ -823,7 +823,7 @@ public class AgentProperties{
|
|||
private T defaultValue;
|
||||
private Class<T> typeClass;
|
||||
|
||||
Property(String name, T value) {
|
||||
public Property(String name, T value) {
|
||||
init(name, value);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -53,9 +53,12 @@ public interface Host extends StateObject<Status>, Identity, Partition, HAResour
|
|||
return strs;
|
||||
}
|
||||
}
|
||||
public static final String HOST_UEFI_ENABLE = "host.uefi.enable";
|
||||
public static final String HOST_VOLUME_ENCRYPTION = "host.volume.encryption";
|
||||
public static final String HOST_INSTANCE_CONVERSION = "host.instance.conversion";
|
||||
|
||||
String HOST_UEFI_ENABLE = "host.uefi.enable";
|
||||
String HOST_VOLUME_ENCRYPTION = "host.volume.encryption";
|
||||
String HOST_INSTANCE_CONVERSION = "host.instance.conversion";
|
||||
String HOST_OVFTOOL_VERSION = "host.ovftool.version";
|
||||
String HOST_VIRTV2V_VERSION = "host.virtv2v.version";
|
||||
|
||||
/**
|
||||
* @return name of the machine.
|
||||
|
|
|
|||
|
|
@ -171,6 +171,13 @@ public interface VolumeApiService {
|
|||
* </table>
|
||||
*/
|
||||
boolean doesStoragePoolSupportDiskOffering(StoragePool destPool, DiskOffering diskOffering);
|
||||
|
||||
/**
|
||||
* Checks if the storage pool supports the required disk offering tags
|
||||
* destPool the storage pool to check the disk offering tags
|
||||
* diskOfferingTags the tags that should be supported
|
||||
* return whether the tags are supported in the storage pool
|
||||
*/
|
||||
boolean doesStoragePoolSupportDiskOfferingTags(StoragePool destPool, String diskOfferingTags);
|
||||
|
||||
Volume destroyVolume(long volumeId, Account caller, boolean expunge, boolean forceExpunge);
|
||||
|
|
|
|||
|
|
@ -87,6 +87,8 @@ public interface AccountService {
|
|||
|
||||
boolean isDomainAdmin(Long accountId);
|
||||
|
||||
boolean isResourceDomainAdmin(Long accountId);
|
||||
|
||||
boolean isNormalUser(long accountId);
|
||||
|
||||
User getActiveUserByRegistrationToken(String registrationToken);
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ public class ListProjectRolesCmd extends BaseListCmd {
|
|||
|
||||
@Override
|
||||
public void execute() {
|
||||
List<ProjectRole> projectRoles;
|
||||
List<ProjectRole> projectRoles = new ArrayList<>();
|
||||
if (getProjectId() != null && getProjectRoleId() != null) {
|
||||
projectRoles = Collections.singletonList(projRoleService.findProjectRole(getProjectRoleId(), getProjectId()));
|
||||
} else if (StringUtils.isNotBlank(getRoleName())) {
|
||||
|
|
|
|||
|
|
@ -128,19 +128,19 @@ public class ListClustersCmd extends BaseListCmd {
|
|||
|
||||
protected Pair<List<ClusterResponse>, Integer> getClusterResponses() {
|
||||
Pair<List<? extends Cluster>, Integer> result = _mgr.searchForClusters(this);
|
||||
List<ClusterResponse> clusterResponses = new ArrayList<ClusterResponse>();
|
||||
List<ClusterResponse> clusterResponses = new ArrayList<>();
|
||||
for (Cluster cluster : result.first()) {
|
||||
ClusterResponse clusterResponse = _responseGenerator.createClusterResponse(cluster, showCapacities);
|
||||
clusterResponse.setObjectName("cluster");
|
||||
clusterResponses.add(clusterResponse);
|
||||
}
|
||||
return new Pair<List<ClusterResponse>, Integer>(clusterResponses, result.second());
|
||||
return new Pair<>(clusterResponses, result.second());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute() {
|
||||
Pair<List<ClusterResponse>, Integer> clusterResponses = getClusterResponses();
|
||||
ListResponse<ClusterResponse> response = new ListResponse<ClusterResponse>();
|
||||
ListResponse<ClusterResponse> response = new ListResponse<>();
|
||||
response.setResponses(clusterResponses.first(), clusterResponses.second());
|
||||
response.setResponseName(getCommandName());
|
||||
this.setResponseObject(response);
|
||||
|
|
|
|||
|
|
@ -86,8 +86,8 @@ public class ListPodsByCmd extends BaseListCmd {
|
|||
@Override
|
||||
public void execute() {
|
||||
Pair<List<? extends Pod>, Integer> result = _mgr.searchForPods(this);
|
||||
ListResponse<PodResponse> response = new ListResponse<PodResponse>();
|
||||
List<PodResponse> podResponses = new ArrayList<PodResponse>();
|
||||
ListResponse<PodResponse> response = new ListResponse<>();
|
||||
List<PodResponse> podResponses = new ArrayList<>();
|
||||
for (Pod pod : result.first()) {
|
||||
PodResponse podResponse = _responseGenerator.createPodResponse(pod, showCapacities);
|
||||
podResponse.setObjectName("pod");
|
||||
|
|
|
|||
|
|
@ -132,14 +132,16 @@ public class ListCapacityCmd extends BaseListCmd {
|
|||
Collections.sort(capacityResponses, new Comparator<CapacityResponse>() {
|
||||
public int compare(CapacityResponse resp1, CapacityResponse resp2) {
|
||||
int res = resp1.getZoneName().compareTo(resp2.getZoneName());
|
||||
// Group by zone
|
||||
if (res != 0) {
|
||||
return res;
|
||||
} else {
|
||||
return resp1.getCapacityType().compareTo(resp2.getCapacityType());
|
||||
}
|
||||
// Sort by capacity type only if not already sorted by usage
|
||||
return (getSortBy() != null) ? 0 : resp1.getCapacityType().compareTo(resp2.getCapacityType());
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
response.setResponses(capacityResponses);
|
||||
response.setResponseName(getCommandName());
|
||||
this.setResponseObject(response);
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ public class ScaleSystemVMCmd extends BaseAsyncCmd {
|
|||
}
|
||||
|
||||
public Map<String, String> getDetails() {
|
||||
return details;
|
||||
return convertDetailsToMap(details);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ public class UpgradeSystemVMCmd extends BaseCmd {
|
|||
}
|
||||
|
||||
public Map<String, String> getDetails() {
|
||||
return details;
|
||||
return convertDetailsToMap(details);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
|
|||
|
|
@ -72,6 +72,7 @@ public class ListCapabilitiesCmd extends BaseCmd {
|
|||
response.setInstancesDisksStatsRetentionTime((Integer) capabilities.get(ApiConstants.INSTANCES_DISKS_STATS_RETENTION_TIME));
|
||||
response.setSharedFsVmMinCpuCount((Integer)capabilities.get(ApiConstants.SHAREDFSVM_MIN_CPU_COUNT));
|
||||
response.setSharedFsVmMinRamSize((Integer)capabilities.get(ApiConstants.SHAREDFSVM_MIN_RAM_SIZE));
|
||||
response.setDynamicScalingEnabled((Boolean) capabilities.get(ApiConstants.DYNAMIC_SCALING_ENABLED));
|
||||
response.setObjectName("capability");
|
||||
response.setResponseName(getCommandName());
|
||||
this.setResponseObject(response);
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import java.util.Collection;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.cpu.CPU;
|
||||
import org.apache.cloudstack.acl.SecurityChecker;
|
||||
import org.apache.cloudstack.api.APICommand;
|
||||
import org.apache.cloudstack.api.ApiCommandResourceType;
|
||||
|
|
@ -148,6 +149,11 @@ public class CreateTemplateCmd extends BaseAsyncCreateCmd implements UserCmd {
|
|||
since = "4.19.0")
|
||||
private String accountName;
|
||||
|
||||
@Parameter(name = ApiConstants.ARCH, type = CommandType.STRING,
|
||||
description = "the CPU arch of the template. Valid options are: x86_64, aarch64. Defaults to x86_64",
|
||||
since = "4.20.2")
|
||||
private String arch;
|
||||
|
||||
// ///////////////////////////////////////////////////
|
||||
// ///////////////// Accessors ///////////////////////
|
||||
// ///////////////////////////////////////////////////
|
||||
|
|
@ -234,6 +240,10 @@ public class CreateTemplateCmd extends BaseAsyncCreateCmd implements UserCmd {
|
|||
return accountName;
|
||||
}
|
||||
|
||||
public CPU.CPUArch getArch() {
|
||||
return CPU.CPUArch.fromType(arch);
|
||||
}
|
||||
|
||||
// ///////////////////////////////////////////////////
|
||||
// ///////////// API Implementation///////////////////
|
||||
// ///////////////////////////////////////////////////
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ import com.cloud.network.NetworkModel;
|
|||
import com.cloud.user.UserData;
|
||||
|
||||
@APICommand(name = "registerUserData",
|
||||
description = "Register a new userdata.",
|
||||
description = "Register a new User Data.",
|
||||
since = "4.18",
|
||||
responseObject = SuccessResponse.class,
|
||||
requestHasSensitiveInfo = false,
|
||||
|
|
@ -56,33 +56,33 @@ public class RegisterUserDataCmd extends BaseCmd {
|
|||
//////////////// API parameters /////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, description = "Name of the userdata")
|
||||
@Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, description = "Name of the User Data")
|
||||
private String name;
|
||||
|
||||
//Owner information
|
||||
@Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "an optional account for the userdata. Must be used with domainId.")
|
||||
@Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "an optional account for the User Data. Must be used with domainId.")
|
||||
private String accountName;
|
||||
|
||||
@Parameter(name = ApiConstants.DOMAIN_ID,
|
||||
type = CommandType.UUID,
|
||||
entityType = DomainResponse.class,
|
||||
description = "an optional domainId for the userdata. If the account parameter is used, domainId must also be used.")
|
||||
description = "an optional domainId for the User Data. If the account parameter is used, domainId must also be used.")
|
||||
private Long domainId;
|
||||
|
||||
@Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class, description = "an optional project for the userdata")
|
||||
@Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class, description = "an optional project for the User Data")
|
||||
private Long projectId;
|
||||
|
||||
@Parameter(name = ApiConstants.USER_DATA,
|
||||
type = CommandType.STRING,
|
||||
required = true,
|
||||
description = "Base64 encoded userdata content. " +
|
||||
description = "Base64 encoded User Data content. " +
|
||||
"Using HTTP GET (via querystring), you can send up to 4KB of data after base64 encoding. " +
|
||||
"Using HTTP POST (via POST body), you can send up to 1MB of data after base64 encoding. " +
|
||||
"You also need to change vm.userdata.max.length value",
|
||||
"Using HTTP POST (via POST body), you can send up to 32KB of data after base64 encoding, " +
|
||||
"which can be increased upto 1MB using the vm.userdata.max.length setting",
|
||||
length = 1048576)
|
||||
private String userData;
|
||||
|
||||
@Parameter(name = ApiConstants.PARAMS, type = CommandType.STRING, description = "comma separated list of variables declared in userdata content")
|
||||
@Parameter(name = ApiConstants.PARAMS, type = CommandType.STRING, description = "comma separated list of variables declared in the User Data content")
|
||||
private String params;
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -140,7 +140,8 @@ public class DestroyVMCmd extends BaseAsyncCmd implements UserCmd {
|
|||
if (responses != null && !responses.isEmpty()) {
|
||||
response = responses.get(0);
|
||||
}
|
||||
response.setResponseName("virtualmachine");
|
||||
response.setResponseName(getCommandName());
|
||||
response.setObjectName("virtualmachine");
|
||||
setResponseObject(response);
|
||||
} else {
|
||||
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to destroy vm");
|
||||
|
|
|
|||
|
|
@ -34,8 +34,6 @@ import org.apache.cloudstack.api.response.ZoneResponse;
|
|||
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
|
||||
public class ListZonesCmd extends BaseListCmd implements UserCmd {
|
||||
|
||||
private static final String s_name = "listzonesresponse";
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
//////////////// API parameters /////////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
|
@ -113,11 +111,6 @@ public class ListZonesCmd extends BaseListCmd implements UserCmd {
|
|||
/////////////// API Implementation///////////////////
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
@Override
|
||||
public String getCommandName() {
|
||||
return s_name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute() {
|
||||
ListResponse<ZoneResponse> response = _queryService.listDataCenters(this);
|
||||
|
|
|
|||
|
|
@ -136,6 +136,10 @@ public class CapabilitiesResponse extends BaseResponse {
|
|||
@Param(description = "the min Ram size for the service offering used by the shared filesystem instance", since = "4.20.0")
|
||||
private Integer sharedFsVmMinRamSize;
|
||||
|
||||
@SerializedName(ApiConstants.DYNAMIC_SCALING_ENABLED)
|
||||
@Param(description = "true if dynamically scaling for instances is enabled", since = "4.21.0")
|
||||
private Boolean dynamicScalingEnabled;
|
||||
|
||||
public void setSecurityGroupsEnabled(boolean securityGroupsEnabled) {
|
||||
this.securityGroupsEnabled = securityGroupsEnabled;
|
||||
}
|
||||
|
|
@ -247,4 +251,8 @@ public class CapabilitiesResponse extends BaseResponse {
|
|||
public void setSharedFsVmMinRamSize(Integer sharedFsVmMinRamSize) {
|
||||
this.sharedFsVmMinRamSize = sharedFsVmMinRamSize;
|
||||
}
|
||||
|
||||
public void setDynamicScalingEnabled(Boolean dynamicScalingEnabled) {
|
||||
this.dynamicScalingEnabled = dynamicScalingEnabled;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ import com.google.gson.annotations.SerializedName;
|
|||
public class StatsResponse extends BaseResponse {
|
||||
|
||||
@SerializedName("timestamp")
|
||||
@Param(description = "the time when the VM stats were collected. The format is \"yyyy-MM-dd hh:mm:ss\"")
|
||||
@Param(description = "the time when the VM stats were collected. The format is 'yyyy-MM-dd hh:mm:ss'")
|
||||
private Date timestamp;
|
||||
|
||||
@SerializedName("cpuused")
|
||||
|
|
|
|||
|
|
@ -27,41 +27,41 @@ import org.apache.cloudstack.api.EntityReference;
|
|||
public class UserDataResponse extends BaseResponseWithAnnotations implements ControlledEntityResponse {
|
||||
|
||||
@SerializedName(ApiConstants.ID)
|
||||
@Param(description = "ID of the ssh keypair")
|
||||
@Param(description = "ID of the User Data")
|
||||
private String id;
|
||||
|
||||
@SerializedName(ApiConstants.NAME)
|
||||
@Param(description = "Name of the userdata")
|
||||
@Param(description = "Name of the User Data")
|
||||
private String name;
|
||||
|
||||
@SerializedName(ApiConstants.ACCOUNT_ID) @Param(description="the owner id of the userdata")
|
||||
@SerializedName(ApiConstants.ACCOUNT_ID) @Param(description="the owner id of the User Data")
|
||||
private String accountId;
|
||||
|
||||
@SerializedName(ApiConstants.ACCOUNT) @Param(description="the owner of the userdata")
|
||||
@SerializedName(ApiConstants.ACCOUNT) @Param(description="the owner of the User Data")
|
||||
private String accountName;
|
||||
|
||||
@SerializedName(ApiConstants.PROJECT_ID)
|
||||
@Param(description = "the project id of the userdata", since = "4.19.1")
|
||||
@Param(description = "the project id of the User Data", since = "4.19.1")
|
||||
private String projectId;
|
||||
|
||||
@SerializedName(ApiConstants.PROJECT)
|
||||
@Param(description = "the project name of the userdata", since = "4.19.1")
|
||||
@Param(description = "the project name of the User Data", since = "4.19.1")
|
||||
private String projectName;
|
||||
|
||||
@SerializedName(ApiConstants.DOMAIN_ID) @Param(description="the domain id of the userdata owner")
|
||||
@SerializedName(ApiConstants.DOMAIN_ID) @Param(description="the domain id of the User Data owner")
|
||||
private String domainId;
|
||||
|
||||
@SerializedName(ApiConstants.DOMAIN) @Param(description="the domain name of the userdata owner")
|
||||
@SerializedName(ApiConstants.DOMAIN) @Param(description="the domain name of the User Data owner")
|
||||
private String domain;
|
||||
|
||||
@SerializedName(ApiConstants.DOMAIN_PATH)
|
||||
@Param(description = "path of the domain to which the userdata owner belongs", since = "4.19.2.0")
|
||||
@Param(description = "path of the domain to which the User Data owner belongs", since = "4.19.2.0")
|
||||
private String domainPath;
|
||||
|
||||
@SerializedName(ApiConstants.USER_DATA) @Param(description="base64 encoded userdata content")
|
||||
@SerializedName(ApiConstants.USER_DATA) @Param(description="base64 encoded User Data content")
|
||||
private String userData;
|
||||
|
||||
@SerializedName(ApiConstants.PARAMS) @Param(description="list of parameters which contains the list of keys or string parameters that are needed to be passed for any variables declared in userdata")
|
||||
@SerializedName(ApiConstants.PARAMS) @Param(description="list of parameters which contains the list of keys or string parameters that are needed to be passed for any variables declared in the User Data")
|
||||
private String params;
|
||||
|
||||
public UserDataResponse() {
|
||||
|
|
|
|||
|
|
@ -112,11 +112,11 @@ public interface QueryService {
|
|||
ConfigKey<Boolean> AllowUserViewDestroyedVM = new ConfigKey<>("Advanced", Boolean.class, "allow.user.view.destroyed.vm", "false",
|
||||
"Determines whether users can view their destroyed or expunging vm ", true, ConfigKey.Scope.Account);
|
||||
|
||||
static final ConfigKey<String> UserVMDeniedDetails = new ConfigKey<>(String.class,
|
||||
ConfigKey<String> UserVMDeniedDetails = new ConfigKey<>(String.class,
|
||||
"user.vm.denied.details", "Advanced", "rootdisksize, cpuOvercommitRatio, memoryOvercommitRatio, Message.ReservedCapacityFreed.Flag",
|
||||
"Determines whether users can view certain VM settings. When set to empty, default value used is: rootdisksize, cpuOvercommitRatio, memoryOvercommitRatio, Message.ReservedCapacityFreed.Flag.", true, ConfigKey.Scope.Global, null, null, null, null, null, ConfigKey.Kind.CSV, null);
|
||||
|
||||
static final ConfigKey<String> UserVMReadOnlyDetails = new ConfigKey<>(String.class,
|
||||
ConfigKey<String> UserVMReadOnlyDetails = new ConfigKey<>(String.class,
|
||||
"user.vm.readonly.details", "Advanced", "dataDiskController, rootDiskController",
|
||||
"List of read-only VM settings/details as comma separated string", true, ConfigKey.Scope.Global, null, null, null, null, null, ConfigKey.Kind.CSV, null);
|
||||
|
||||
|
|
@ -125,16 +125,20 @@ public interface QueryService {
|
|||
"network offering, zones), we use the flag to determine if the entities should be sorted ascending (when flag is true) " +
|
||||
"or descending (when flag is false). Within the scope of the config all users see the same result.", true, ConfigKey.Scope.Global);
|
||||
|
||||
public static final ConfigKey<Boolean> AllowUserViewAllDomainAccounts = new ConfigKey<>("Advanced", Boolean.class,
|
||||
ConfigKey<Boolean> AllowUserViewAllDomainAccounts = new ConfigKey<>("Advanced", Boolean.class,
|
||||
"allow.user.view.all.domain.accounts", "false",
|
||||
"Determines whether users can view all user accounts within the same domain", true, ConfigKey.Scope.Domain);
|
||||
|
||||
static final ConfigKey<Boolean> SharePublicTemplatesWithOtherDomains = new ConfigKey<>("Advanced", Boolean.class, "share.public.templates.with.other.domains", "true",
|
||||
ConfigKey<Boolean> AllowUserViewAllDataCenters = new ConfigKey<>("Advanced", Boolean.class, "allow.user.view.all.zones", "true",
|
||||
"Determines whether for instance a Resource Admin can view zones that are not dedicated to them.", true, ConfigKey.Scope.Domain);
|
||||
|
||||
ConfigKey<Boolean> SharePublicTemplatesWithOtherDomains = new ConfigKey<>("Advanced", Boolean.class, "share.public.templates.with.other.domains", "true",
|
||||
"If false, templates of this domain will not show up in the list templates of other domains.", true, ConfigKey.Scope.Domain);
|
||||
|
||||
ConfigKey<Boolean> ReturnVmStatsOnVmList = new ConfigKey<>("Advanced", Boolean.class, "list.vm.default.details.stats", "true",
|
||||
"Determines whether VM stats should be returned when details are not explicitly specified in listVirtualMachines API request. When false, details default to [group, nics, secgrp, tmpl, servoff, diskoff, backoff, iso, volume, min, affgrp]. When true, all details are returned including 'stats'.", true, ConfigKey.Scope.Global);
|
||||
|
||||
|
||||
ListResponse<UserResponse> searchForUsers(ResponseObject.ResponseView responseView, ListUsersCmd cmd) throws PermissionDeniedException;
|
||||
|
||||
ListResponse<UserResponse> searchForUsers(Long domainId, boolean recursive) throws PermissionDeniedException;
|
||||
|
|
|
|||
|
|
@ -17,22 +17,33 @@
|
|||
|
||||
package com.cloud.agent.api;
|
||||
|
||||
import org.apache.cloudstack.storage.volume.VolumeOnStorageTO;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
public class CheckVolumeAnswer extends Answer {
|
||||
|
||||
private long size;
|
||||
private Map<VolumeOnStorageTO.Detail, String> volumeDetails;
|
||||
|
||||
CheckVolumeAnswer() {
|
||||
}
|
||||
|
||||
public CheckVolumeAnswer(CheckVolumeCommand cmd, String details, long size) {
|
||||
super(cmd, true, details);
|
||||
public CheckVolumeAnswer(CheckVolumeCommand cmd, final boolean success, String details, long size,
|
||||
Map<VolumeOnStorageTO.Detail, String> volumeDetails) {
|
||||
super(cmd, success, details);
|
||||
this.size = size;
|
||||
this.volumeDetails = volumeDetails;
|
||||
}
|
||||
|
||||
public long getSize() {
|
||||
return size;
|
||||
}
|
||||
|
||||
public Map<VolumeOnStorageTO.Detail, String> getVolumeDetails() {
|
||||
return volumeDetails;
|
||||
}
|
||||
|
||||
public String getString() {
|
||||
return "CheckVolumeAnswer [size=" + size + "]";
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,8 +16,6 @@
|
|||
// under the License.
|
||||
package com.cloud.agent.api;
|
||||
|
||||
import org.apache.cloudstack.vm.UnmanagedInstanceTO;
|
||||
|
||||
public class ConvertInstanceAnswer extends Answer {
|
||||
|
||||
private String temporaryConvertUuid;
|
||||
|
|
@ -25,16 +23,6 @@ public class ConvertInstanceAnswer extends Answer {
|
|||
public ConvertInstanceAnswer() {
|
||||
super();
|
||||
}
|
||||
private UnmanagedInstanceTO convertedInstance;
|
||||
|
||||
public ConvertInstanceAnswer(Command command, boolean success, String details) {
|
||||
super(command, success, details);
|
||||
}
|
||||
|
||||
public ConvertInstanceAnswer(Command command, UnmanagedInstanceTO convertedInstance) {
|
||||
super(command, true, "");
|
||||
this.convertedInstance = convertedInstance;
|
||||
}
|
||||
|
||||
public ConvertInstanceAnswer(Command command, String temporaryConvertUuid) {
|
||||
super(command, true, "");
|
||||
|
|
@ -44,8 +32,4 @@ public class ConvertInstanceAnswer extends Answer {
|
|||
public String getTemporaryConvertUuid() {
|
||||
return temporaryConvertUuid;
|
||||
}
|
||||
|
||||
public UnmanagedInstanceTO getConvertedInstance() {
|
||||
return convertedInstance;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,13 +20,10 @@ import com.cloud.agent.api.to.DataStoreTO;
|
|||
import com.cloud.agent.api.to.RemoteInstanceTO;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class ConvertInstanceCommand extends Command {
|
||||
|
||||
private RemoteInstanceTO sourceInstance;
|
||||
private Hypervisor.HypervisorType destinationHypervisorType;
|
||||
private List<String> destinationStoragePools;
|
||||
private DataStoreTO conversionTemporaryLocation;
|
||||
private String templateDirOnConversionLocation;
|
||||
private boolean checkConversionSupport;
|
||||
|
|
@ -36,12 +33,10 @@ public class ConvertInstanceCommand extends Command {
|
|||
public ConvertInstanceCommand() {
|
||||
}
|
||||
|
||||
public ConvertInstanceCommand(RemoteInstanceTO sourceInstance, Hypervisor.HypervisorType destinationHypervisorType,
|
||||
List<String> destinationStoragePools, DataStoreTO conversionTemporaryLocation,
|
||||
public ConvertInstanceCommand(RemoteInstanceTO sourceInstance, Hypervisor.HypervisorType destinationHypervisorType, DataStoreTO conversionTemporaryLocation,
|
||||
String templateDirOnConversionLocation, boolean checkConversionSupport, boolean exportOvfToConversionLocation) {
|
||||
this.sourceInstance = sourceInstance;
|
||||
this.destinationHypervisorType = destinationHypervisorType;
|
||||
this.destinationStoragePools = destinationStoragePools;
|
||||
this.conversionTemporaryLocation = conversionTemporaryLocation;
|
||||
this.templateDirOnConversionLocation = templateDirOnConversionLocation;
|
||||
this.checkConversionSupport = checkConversionSupport;
|
||||
|
|
@ -56,10 +51,6 @@ public class ConvertInstanceCommand extends Command {
|
|||
return destinationHypervisorType;
|
||||
}
|
||||
|
||||
public List<String> getDestinationStoragePools() {
|
||||
return destinationStoragePools;
|
||||
}
|
||||
|
||||
public DataStoreTO getConversionTemporaryLocation() {
|
||||
return conversionTemporaryLocation;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,21 +17,28 @@
|
|||
|
||||
package com.cloud.agent.api;
|
||||
|
||||
import org.apache.cloudstack.storage.volume.VolumeOnStorageTO;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
public class CopyRemoteVolumeAnswer extends Answer {
|
||||
|
||||
private String remoteIp;
|
||||
private String filename;
|
||||
|
||||
private long size;
|
||||
private Map<VolumeOnStorageTO.Detail, String> volumeDetails;
|
||||
|
||||
CopyRemoteVolumeAnswer() {
|
||||
}
|
||||
|
||||
public CopyRemoteVolumeAnswer(CopyRemoteVolumeCommand cmd, String details, String filename, long size) {
|
||||
super(cmd, true, details);
|
||||
public CopyRemoteVolumeAnswer(CopyRemoteVolumeCommand cmd, final boolean success, String details, String filename, long size,
|
||||
Map<VolumeOnStorageTO.Detail, String> volumeDetails) {
|
||||
super(cmd, success, details);
|
||||
this.remoteIp = cmd.getRemoteIp();
|
||||
this.filename = filename;
|
||||
this.size = size;
|
||||
this.volumeDetails = volumeDetails;
|
||||
}
|
||||
|
||||
public String getRemoteIp() {
|
||||
|
|
@ -54,6 +61,10 @@ public class CopyRemoteVolumeAnswer extends Answer {
|
|||
return size;
|
||||
}
|
||||
|
||||
public Map<VolumeOnStorageTO.Detail, String> getVolumeDetails() {
|
||||
return volumeDetails;
|
||||
}
|
||||
|
||||
public String getString() {
|
||||
return "CopyRemoteVolumeAnswer [remoteIp=" + remoteIp + "]";
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,9 +39,7 @@ import java.util.Map;
|
|||
|
||||
import javax.net.ssl.HttpsURLConnection;
|
||||
import javax.net.ssl.SSLContext;
|
||||
import javax.net.ssl.TrustManager;
|
||||
|
||||
import org.apache.cloudstack.utils.security.SSLUtils;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.httpclient.HttpStatus;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
|
|
@ -55,6 +53,7 @@ import org.apache.http.client.methods.HttpUriRequest;
|
|||
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClients;
|
||||
import org.apache.http.ssl.SSLContexts;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
|
||||
import com.cloud.utils.Pair;
|
||||
|
|
@ -120,10 +119,10 @@ public class HttpsDirectTemplateDownloader extends DirectTemplateDownloaderImpl
|
|||
String password = "changeit";
|
||||
defaultKeystore.load(is, password.toCharArray());
|
||||
}
|
||||
TrustManager[] tm = HttpsMultiTrustManager.getTrustManagersFromKeyStores(customKeystore, defaultKeystore);
|
||||
SSLContext sslContext = SSLUtils.getSSLContext();
|
||||
sslContext.init(null, tm, null);
|
||||
return sslContext;
|
||||
return SSLContexts.custom()
|
||||
.loadTrustMaterial(customKeystore, null)
|
||||
.loadTrustMaterial(defaultKeystore, null)
|
||||
.build();
|
||||
} catch (KeyStoreException | NoSuchAlgorithmException | CertificateException | IOException | KeyManagementException e) {
|
||||
logger.error(String.format("Failure getting SSL context for HTTPS downloader, using default SSL context: %s", e.getMessage()), e);
|
||||
try {
|
||||
|
|
|
|||
|
|
@ -1,102 +0,0 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.direct.download;
|
||||
|
||||
import java.security.KeyStore;
|
||||
import java.security.KeyStoreException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.security.cert.CertificateException;
|
||||
import java.security.cert.X509Certificate;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import javax.net.ssl.TrustManager;
|
||||
import javax.net.ssl.TrustManagerFactory;
|
||||
import javax.net.ssl.X509TrustManager;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Iterables;
|
||||
|
||||
public class HttpsMultiTrustManager implements X509TrustManager {
|
||||
|
||||
private final List<X509TrustManager> trustManagers;
|
||||
|
||||
public HttpsMultiTrustManager(KeyStore... keystores) {
|
||||
List<X509TrustManager> trustManagers = new ArrayList<>();
|
||||
trustManagers.add(getTrustManager(null));
|
||||
for (KeyStore keystore : keystores) {
|
||||
trustManagers.add(getTrustManager(keystore));
|
||||
}
|
||||
this.trustManagers = ImmutableList.copyOf(trustManagers);
|
||||
}
|
||||
|
||||
public static TrustManager[] getTrustManagersFromKeyStores(KeyStore... keyStore) {
|
||||
return new TrustManager[] { new HttpsMultiTrustManager(keyStore) };
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException {
|
||||
for (X509TrustManager trustManager : trustManagers) {
|
||||
try {
|
||||
trustManager.checkClientTrusted(chain, authType);
|
||||
return;
|
||||
} catch (CertificateException ignored) {}
|
||||
}
|
||||
throw new CertificateException("None of the TrustManagers trust this certificate chain");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException {
|
||||
for (X509TrustManager trustManager : trustManagers) {
|
||||
try {
|
||||
trustManager.checkServerTrusted(chain, authType);
|
||||
return;
|
||||
} catch (CertificateException ignored) {}
|
||||
}
|
||||
throw new CertificateException("None of the TrustManagers trust this certificate chain");
|
||||
}
|
||||
|
||||
@Override
|
||||
public X509Certificate[] getAcceptedIssuers() {
|
||||
ImmutableList.Builder<X509Certificate> certificates = ImmutableList.builder();
|
||||
for (X509TrustManager trustManager : trustManagers) {
|
||||
for (X509Certificate cert : trustManager.getAcceptedIssuers()) {
|
||||
certificates.add(cert);
|
||||
}
|
||||
}
|
||||
return Iterables.toArray(certificates.build(), X509Certificate.class);
|
||||
}
|
||||
|
||||
public X509TrustManager getTrustManager(KeyStore keystore) {
|
||||
return getTrustManager(TrustManagerFactory.getDefaultAlgorithm(), keystore);
|
||||
}
|
||||
|
||||
public X509TrustManager getTrustManager(String algorithm, KeyStore keystore) {
|
||||
TrustManagerFactory factory;
|
||||
try {
|
||||
factory = TrustManagerFactory.getInstance(algorithm);
|
||||
factory.init(keystore);
|
||||
return Iterables.getFirst(Iterables.filter(
|
||||
Arrays.asList(factory.getTrustManagers()), X509TrustManager.class), null);
|
||||
} catch (NoSuchAlgorithmException | KeyStoreException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
@ -24,6 +24,7 @@ import com.cloud.utils.PasswordGenerator;
|
|||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.ca.CAManager;
|
||||
import org.apache.cloudstack.framework.ca.Certificate;
|
||||
import org.apache.cloudstack.framework.config.ConfigKey;
|
||||
import org.apache.cloudstack.utils.security.CertUtils;
|
||||
import org.apache.cloudstack.utils.security.KeyStoreUtils;
|
||||
|
||||
|
|
@ -37,6 +38,9 @@ import java.util.Base64;
|
|||
*/
|
||||
public interface VirtualMachineGuru {
|
||||
|
||||
static final ConfigKey<String> NTPServerConfig = new ConfigKey<String>(String.class, "ntp.server.list", "Advanced", null,
|
||||
"Comma separated list of NTP servers to configure in System VMs", true, ConfigKey.Scope.Global, null, null, null, null, null, ConfigKey.Kind.CSV, null);
|
||||
|
||||
boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, DeployDestination dest, ReservationContext context);
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -54,6 +54,7 @@ import org.apache.cloudstack.utils.identity.ManagementServerNode;
|
|||
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.lang3.BooleanUtils;
|
||||
import org.apache.commons.lang3.ObjectUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.logging.log4j.ThreadContext;
|
||||
|
||||
|
|
@ -703,11 +704,25 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
|
|||
Map<String, String> detailsMap = readyAnswer.getDetailsMap();
|
||||
if (detailsMap != null) {
|
||||
String uefiEnabled = detailsMap.get(Host.HOST_UEFI_ENABLE);
|
||||
String virtv2vVersion = detailsMap.get(Host.HOST_VIRTV2V_VERSION);
|
||||
String ovftoolVersion = detailsMap.get(Host.HOST_OVFTOOL_VERSION);
|
||||
logger.debug("Got HOST_UEFI_ENABLE [{}] for host [{}]:", uefiEnabled, host);
|
||||
if (uefiEnabled != null) {
|
||||
if (ObjectUtils.anyNotNull(uefiEnabled, virtv2vVersion, ovftoolVersion)) {
|
||||
_hostDao.loadDetails(host);
|
||||
boolean updateNeeded = false;
|
||||
if (!uefiEnabled.equals(host.getDetails().get(Host.HOST_UEFI_ENABLE))) {
|
||||
host.getDetails().put(Host.HOST_UEFI_ENABLE, uefiEnabled);
|
||||
updateNeeded = true;
|
||||
}
|
||||
if (StringUtils.isNotBlank(virtv2vVersion) && !virtv2vVersion.equals(host.getDetails().get(Host.HOST_VIRTV2V_VERSION))) {
|
||||
host.getDetails().put(Host.HOST_VIRTV2V_VERSION, virtv2vVersion);
|
||||
updateNeeded = true;
|
||||
}
|
||||
if (StringUtils.isNotBlank(ovftoolVersion) && !ovftoolVersion.equals(host.getDetails().get(Host.HOST_OVFTOOL_VERSION))) {
|
||||
host.getDetails().put(Host.HOST_OVFTOOL_VERSION, ovftoolVersion);
|
||||
updateNeeded = true;
|
||||
}
|
||||
if (updateNeeded) {
|
||||
_hostDao.saveDetails(host);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashMap;
|
||||
|
|
@ -1292,6 +1293,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
|
|||
IPAddressVO lockedIpVO = _ipAddressDao.acquireInLockTable(ipVO.getId());
|
||||
validateLockedRequestedIp(ipVO, lockedIpVO);
|
||||
lockedIpVO.setState(IPAddressVO.State.Allocated);
|
||||
lockedIpVO.setAllocatedTime(new Date());
|
||||
_ipAddressDao.update(lockedIpVO.getId(), lockedIpVO);
|
||||
} finally {
|
||||
_ipAddressDao.releaseFromLockTable(ipVO.getId());
|
||||
|
|
|
|||
|
|
@ -37,4 +37,6 @@ public interface AutoScaleVmGroupVmMapDao extends GenericDao<AutoScaleVmGroupVmM
|
|||
public boolean removeByGroup(long vmGroupId);
|
||||
|
||||
int expungeByVmList(List<Long> vmIds, Long batchSize);
|
||||
|
||||
int getErroredInstanceCount(long vmGroupId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -127,4 +127,13 @@ public class AutoScaleVmGroupVmMapDaoImpl extends GenericDaoBase<AutoScaleVmGrou
|
|||
sc.setParameters("vmIds", vmIds.toArray());
|
||||
return batchExpunge(sc, batchSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getErroredInstanceCount(long vmGroupId) {
|
||||
SearchCriteria<Integer> sc = CountBy.create();
|
||||
sc.setParameters("vmGroupId", vmGroupId);
|
||||
sc.setJoinParameters("vmSearch", "states", State.Error);
|
||||
final List<Integer> results = customSearch(sc, null);
|
||||
return results.get(0);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -50,6 +50,9 @@ public class SecurityGroupVMMapVO implements InternalIdentity {
|
|||
@Column(name = "ip4_address", table = "nics", insertable = false, updatable = false)
|
||||
private String guestIpAddress;
|
||||
|
||||
@Column(name = "ip6_address", table = "nics", insertable = false, updatable = false)
|
||||
private String guestIpv6Address;
|
||||
|
||||
@Column(name = "state", table = "vm_instance", insertable = false, updatable = false)
|
||||
private State vmState;
|
||||
|
||||
|
|
@ -77,6 +80,10 @@ public class SecurityGroupVMMapVO implements InternalIdentity {
|
|||
return guestIpAddress;
|
||||
}
|
||||
|
||||
public String getGuestIpv6Address() {
|
||||
return guestIpv6Address;
|
||||
}
|
||||
|
||||
public long getInstanceId() {
|
||||
return instanceId;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ public interface VMTemplateDao extends GenericDao<VMTemplateVO, Long>, StateDao<
|
|||
|
||||
VMTemplateVO findSystemVMTemplate(long zoneId);
|
||||
|
||||
VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType);
|
||||
VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType, String preferredArch);
|
||||
|
||||
List<VMTemplateVO> findSystemVMReadyTemplates(long zoneId, HypervisorType hypervisorType, String preferredArch);
|
||||
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ import java.util.Date;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
|
@ -578,11 +579,19 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
|
|||
}
|
||||
|
||||
@Override
|
||||
public VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType) {
|
||||
public VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType, String preferredArch) {
|
||||
List<VMTemplateVO> templates = listAllReadySystemVMTemplates(zoneId);
|
||||
if (CollectionUtils.isEmpty(templates)) {
|
||||
return null;
|
||||
}
|
||||
if (StringUtils.isNotBlank(preferredArch)) {
|
||||
// Sort the templates by preferred architecture first
|
||||
templates = templates.stream()
|
||||
.sorted(Comparator.comparing(
|
||||
x -> !x.getArch().getType().equalsIgnoreCase(preferredArch)
|
||||
))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
if (hypervisorType == HypervisorType.Any) {
|
||||
return templates.get(0);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -64,11 +64,14 @@ import com.cloud.dc.dao.DataCenterDao;
|
|||
import com.cloud.dc.dao.DataCenterDaoImpl;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.GuestOSVO;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
import com.cloud.storage.VMTemplateStorageResourceAssoc;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.storage.VMTemplateZoneVO;
|
||||
import com.cloud.storage.dao.GuestOSDao;
|
||||
import com.cloud.storage.dao.GuestOSDaoImpl;
|
||||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
import com.cloud.storage.dao.VMTemplateDaoImpl;
|
||||
import com.cloud.storage.dao.VMTemplateZoneDao;
|
||||
|
|
@ -102,15 +105,13 @@ public class SystemVmTemplateRegistration {
|
|||
private static final String PARTIAL_TEMPLATE_FOLDER = String.format("/template/tmpl/%d/", Account.ACCOUNT_ID_SYSTEM);
|
||||
private static final String storageScriptsDir = "scripts/storage/secondary";
|
||||
private static final Integer OTHER_LINUX_ID = 99;
|
||||
private static final Integer LINUX_5_ID = 15;
|
||||
private static final Integer LINUX_7_ID = 183;
|
||||
private static Integer LINUX_12_ID = 363;
|
||||
private static final Integer SCRIPT_TIMEOUT = 1800000;
|
||||
private static final Integer LOCK_WAIT_TIMEOUT = 1200;
|
||||
protected static final List<CPU.CPUArch> DOWNLOADABLE_TEMPLATE_ARCH_TYPES = Arrays.asList(
|
||||
CPU.CPUArch.arm64
|
||||
);
|
||||
|
||||
|
||||
public static String CS_MAJOR_VERSION = null;
|
||||
public static String CS_TINY_VERSION = null;
|
||||
|
||||
|
|
@ -132,6 +133,8 @@ public class SystemVmTemplateRegistration {
|
|||
ClusterDao clusterDao;
|
||||
@Inject
|
||||
ConfigurationDao configurationDao;
|
||||
@Inject
|
||||
private GuestOSDao guestOSDao;
|
||||
|
||||
private String systemVmTemplateVersion;
|
||||
|
||||
|
|
@ -147,6 +150,7 @@ public class SystemVmTemplateRegistration {
|
|||
imageStoreDetailsDao = new ImageStoreDetailsDaoImpl();
|
||||
clusterDao = new ClusterDaoImpl();
|
||||
configurationDao = new ConfigurationDaoImpl();
|
||||
guestOSDao = new GuestOSDaoImpl();
|
||||
tempDownloadDir = new File(System.getProperty("java.io.tmpdir"));
|
||||
}
|
||||
|
||||
|
|
@ -320,7 +324,7 @@ public class SystemVmTemplateRegistration {
|
|||
|
||||
public static final Map<String, MetadataTemplateDetails> NewTemplateMap = new HashMap<>();
|
||||
|
||||
public static final Map<Hypervisor.HypervisorType, String> RouterTemplateConfigurationNames = new HashMap<Hypervisor.HypervisorType, String>() {
|
||||
public static final Map<Hypervisor.HypervisorType, String> RouterTemplateConfigurationNames = new HashMap<>() {
|
||||
{
|
||||
put(Hypervisor.HypervisorType.KVM, "router.template.kvm");
|
||||
put(Hypervisor.HypervisorType.VMware, "router.template.vmware");
|
||||
|
|
@ -331,14 +335,14 @@ public class SystemVmTemplateRegistration {
|
|||
}
|
||||
};
|
||||
|
||||
public static final Map<Hypervisor.HypervisorType, Integer> hypervisorGuestOsMap = new HashMap<Hypervisor.HypervisorType, Integer>() {
|
||||
public static Map<Hypervisor.HypervisorType, Integer> hypervisorGuestOsMap = new HashMap<>() {
|
||||
{
|
||||
put(Hypervisor.HypervisorType.KVM, LINUX_5_ID);
|
||||
put(Hypervisor.HypervisorType.KVM, LINUX_12_ID);
|
||||
put(Hypervisor.HypervisorType.XenServer, OTHER_LINUX_ID);
|
||||
put(Hypervisor.HypervisorType.VMware, OTHER_LINUX_ID);
|
||||
put(Hypervisor.HypervisorType.Hyperv, LINUX_5_ID);
|
||||
put(Hypervisor.HypervisorType.LXC, LINUX_5_ID);
|
||||
put(Hypervisor.HypervisorType.Ovm3, LINUX_7_ID);
|
||||
put(Hypervisor.HypervisorType.Hyperv, LINUX_12_ID);
|
||||
put(Hypervisor.HypervisorType.LXC, LINUX_12_ID);
|
||||
put(Hypervisor.HypervisorType.Ovm3, LINUX_12_ID);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -595,6 +599,23 @@ public class SystemVmTemplateRegistration {
|
|||
vmInstanceDao.updateSystemVmTemplateId(templateId, hypervisorType);
|
||||
}
|
||||
|
||||
private void updateSystemVmTemplateGuestOsId() {
|
||||
String systemVmGuestOsName = "Debian GNU/Linux 12 (64-bit)"; // default
|
||||
try {
|
||||
GuestOSVO guestOS = guestOSDao.findOneByDisplayName(systemVmGuestOsName);
|
||||
if (guestOS != null) {
|
||||
LOGGER.debug("Updating SystemVM Template Guest OS [{}] id", systemVmGuestOsName);
|
||||
SystemVmTemplateRegistration.LINUX_12_ID = Math.toIntExact(guestOS.getId());
|
||||
hypervisorGuestOsMap.put(Hypervisor.HypervisorType.KVM, LINUX_12_ID);
|
||||
hypervisorGuestOsMap.put(Hypervisor.HypervisorType.Hyperv, LINUX_12_ID);
|
||||
hypervisorGuestOsMap.put(Hypervisor.HypervisorType.LXC, LINUX_12_ID);
|
||||
hypervisorGuestOsMap.put(Hypervisor.HypervisorType.Ovm3, LINUX_12_ID);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Couldn't update SystemVM Template Guest OS id, due to {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void updateConfigurationParams(Map<String, String> configParams) {
|
||||
for (Map.Entry<String, String> config : configParams.entrySet()) {
|
||||
boolean updated = configurationDao.update(config.getKey(), config.getValue());
|
||||
|
|
@ -813,7 +834,8 @@ public class SystemVmTemplateRegistration {
|
|||
section.get("filename"),
|
||||
section.get("downloadurl"),
|
||||
section.get("checksum"),
|
||||
hypervisorType.second()));
|
||||
hypervisorType.second(),
|
||||
section.get("guestos")));
|
||||
}
|
||||
Ini.Section defaultSection = ini.get("default");
|
||||
return defaultSection.get("version").trim();
|
||||
|
|
@ -965,6 +987,10 @@ public class SystemVmTemplateRegistration {
|
|||
private void updateRegisteredTemplateDetails(Long templateId, MetadataTemplateDetails templateDetails) {
|
||||
VMTemplateVO templateVO = vmTemplateDao.findById(templateId);
|
||||
templateVO.setTemplateType(Storage.TemplateType.SYSTEM);
|
||||
GuestOSVO guestOS = guestOSDao.findOneByDisplayName(templateDetails.getGuestOs());
|
||||
if (guestOS != null) {
|
||||
templateVO.setGuestOSId(guestOS.getId());
|
||||
}
|
||||
boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO);
|
||||
if (!updated) {
|
||||
String errMsg = String.format("updateSystemVmTemplates:Exception while updating template with id %s to be marked as 'system'", templateId);
|
||||
|
|
@ -980,9 +1006,13 @@ public class SystemVmTemplateRegistration {
|
|||
updateConfigurationParams(configParams);
|
||||
}
|
||||
|
||||
private void updateTemplateUrlAndChecksum(VMTemplateVO templateVO, MetadataTemplateDetails templateDetails) {
|
||||
private void updateTemplateUrlChecksumAndGuestOsId(VMTemplateVO templateVO, MetadataTemplateDetails templateDetails) {
|
||||
templateVO.setUrl(templateDetails.getUrl());
|
||||
templateVO.setChecksum(templateDetails.getChecksum());
|
||||
GuestOSVO guestOS = guestOSDao.findOneByDisplayName(templateDetails.getGuestOs());
|
||||
if (guestOS != null) {
|
||||
templateVO.setGuestOSId(guestOS.getId());
|
||||
}
|
||||
boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO);
|
||||
if (!updated) {
|
||||
String errMsg = String.format("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type %s", templateDetails.getHypervisorType());
|
||||
|
|
@ -1020,7 +1050,7 @@ public class SystemVmTemplateRegistration {
|
|||
VMTemplateVO templateVO = vmTemplateDao.findLatestTemplateByTypeAndHypervisorAndArch(
|
||||
templateDetails.getHypervisorType(), templateDetails.getArch(), Storage.TemplateType.SYSTEM);
|
||||
if (templateVO != null) {
|
||||
updateTemplateUrlAndChecksum(templateVO, templateDetails);
|
||||
updateTemplateUrlChecksumAndGuestOsId(templateVO, templateDetails);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1029,6 +1059,7 @@ public class SystemVmTemplateRegistration {
|
|||
|
||||
public void updateSystemVmTemplates(final Connection conn) {
|
||||
LOGGER.debug("Updating System Vm template IDs");
|
||||
updateSystemVmTemplateGuestOsId();
|
||||
Transaction.execute(new TransactionCallbackNoReturn() {
|
||||
@Override
|
||||
public void doInTransactionWithoutResult(final TransactionStatus status) {
|
||||
|
|
@ -1076,15 +1107,17 @@ public class SystemVmTemplateRegistration {
|
|||
private final String checksum;
|
||||
private final CPU.CPUArch arch;
|
||||
private String downloadedFilePath;
|
||||
private final String guestOs;
|
||||
|
||||
MetadataTemplateDetails(Hypervisor.HypervisorType hypervisorType, String name, String filename, String url,
|
||||
String checksum, CPU.CPUArch arch) {
|
||||
String checksum, CPU.CPUArch arch, String guestOs) {
|
||||
this.hypervisorType = hypervisorType;
|
||||
this.name = name;
|
||||
this.filename = filename;
|
||||
this.url = url;
|
||||
this.checksum = checksum;
|
||||
this.arch = arch;
|
||||
this.guestOs = guestOs;
|
||||
}
|
||||
|
||||
public Hypervisor.HypervisorType getHypervisorType() {
|
||||
|
|
@ -1111,6 +1144,10 @@ public class SystemVmTemplateRegistration {
|
|||
return arch;
|
||||
}
|
||||
|
||||
public String getGuestOs() {
|
||||
return guestOs;
|
||||
}
|
||||
|
||||
public String getDownloadedFilePath() {
|
||||
return downloadedFilePath;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -100,8 +100,6 @@ public class Upgrade42000to42010 extends DbUpgradeAbstractImpl implements DbUpgr
|
|||
|
||||
DbUpgradeUtils.addIndexIfNeeded(conn, "network_offering_details", "name");
|
||||
|
||||
DbUpgradeUtils.addIndexIfNeeded(conn, "network_offering_details", "resource_id", "resource_type");
|
||||
|
||||
DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "cpu");
|
||||
DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "speed");
|
||||
DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "ram_size");
|
||||
|
|
|
|||
|
|
@ -37,4 +37,6 @@ public interface UsageJobDao extends GenericDao<UsageJobVO, Long> {
|
|||
UsageJobVO isOwner(String hostname, int pid);
|
||||
|
||||
void updateJobSuccess(Long jobId, long startMillis, long endMillis, long execTime, boolean success);
|
||||
|
||||
void removeLastOpenJobsOwned(String hostname, int pid);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ import java.util.Date;
|
|||
import java.util.List;
|
||||
|
||||
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import com.cloud.usage.UsageJobVO;
|
||||
|
|
@ -114,7 +115,7 @@ public class UsageJobDaoImpl extends GenericDaoBase<UsageJobVO, Long> implements
|
|||
public UsageJobVO isOwner(String hostname, int pid) {
|
||||
TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
|
||||
try {
|
||||
if ((hostname == null) || (pid <= 0)) {
|
||||
if (hostname == null || pid <= 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
@ -174,7 +175,7 @@ public class UsageJobDaoImpl extends GenericDaoBase<UsageJobVO, Long> implements
|
|||
SearchCriteria<UsageJobVO> sc = createSearchCriteria();
|
||||
sc.addAnd("endMillis", SearchCriteria.Op.EQ, Long.valueOf(0));
|
||||
sc.addAnd("jobType", SearchCriteria.Op.EQ, Integer.valueOf(UsageJobVO.JOB_TYPE_SINGLE));
|
||||
sc.addAnd("scheduled", SearchCriteria.Op.EQ, Integer.valueOf(0));
|
||||
sc.addAnd("scheduled", SearchCriteria.Op.EQ, Integer.valueOf(UsageJobVO.JOB_NOT_SCHEDULED));
|
||||
List<UsageJobVO> jobs = search(sc, filter);
|
||||
|
||||
if ((jobs == null) || jobs.isEmpty()) {
|
||||
|
|
@ -194,4 +195,36 @@ public class UsageJobDaoImpl extends GenericDaoBase<UsageJobVO, Long> implements
|
|||
}
|
||||
return jobs.get(0).getHeartbeat();
|
||||
}
|
||||
|
||||
private List<UsageJobVO> getLastOpenJobsOwned(String hostname, int pid) {
|
||||
SearchCriteria<UsageJobVO> sc = createSearchCriteria();
|
||||
sc.addAnd("endMillis", SearchCriteria.Op.EQ, Long.valueOf(0));
|
||||
sc.addAnd("host", SearchCriteria.Op.EQ, hostname);
|
||||
if (pid > 0) {
|
||||
sc.addAnd("pid", SearchCriteria.Op.EQ, Integer.valueOf(pid));
|
||||
}
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeLastOpenJobsOwned(String hostname, int pid) {
|
||||
if (hostname == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
|
||||
try {
|
||||
List<UsageJobVO> jobs = getLastOpenJobsOwned(hostname, pid);
|
||||
if (CollectionUtils.isNotEmpty(jobs)) {
|
||||
logger.info("Found {} opens job, to remove", jobs.size());
|
||||
for (UsageJobVO job : jobs) {
|
||||
logger.debug("Removing job - id: {}, pid: {}, job type: {}, scheduled: {}, heartbeat: {}",
|
||||
job.getId(), job.getPid(), job.getJobType(), job.getScheduled(), job.getHeartbeat());
|
||||
remove(job.getId());
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
txn.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ UPDATE `cloud`.`service_offering` SET ram_size = 512 WHERE unique_name IN ("Clou
|
|||
AND system_use = 1 AND ram_size < 512;
|
||||
|
||||
-- NSX Plugin --
|
||||
CREATE TABLE `cloud`.`nsx_providers` (
|
||||
CREATE TABLE IF NOT EXISTS `cloud`.`nsx_providers` (
|
||||
`id` bigint unsigned NOT NULL auto_increment COMMENT 'id',
|
||||
`uuid` varchar(40),
|
||||
`zone_id` bigint unsigned NOT NULL COMMENT 'Zone ID',
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.InjectMocks;
|
||||
|
|
@ -186,4 +187,24 @@ public class VMTemplateDaoImplTest {
|
|||
VMTemplateVO result = templateDao.findLatestTemplateByTypeAndHypervisorAndArch(hypervisorType, arch, type);
|
||||
assertNull(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFindSystemVMReadyTemplate() {
|
||||
Long zoneId = 1L;
|
||||
VMTemplateVO systemVmTemplate1 = mock(VMTemplateVO.class);
|
||||
Mockito.when(systemVmTemplate1.getArch()).thenReturn(CPU.CPUArch.x86);
|
||||
VMTemplateVO systemVmTemplate2 = mock(VMTemplateVO.class);
|
||||
Mockito.when(systemVmTemplate2.getArch()).thenReturn(CPU.CPUArch.x86);
|
||||
VMTemplateVO systemVmTemplate3 = mock(VMTemplateVO.class);
|
||||
Mockito.when(systemVmTemplate3.getArch()).thenReturn(CPU.CPUArch.arm64);
|
||||
Mockito.when(systemVmTemplate3.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM);
|
||||
List<VMTemplateVO> templates = Arrays.asList(systemVmTemplate1, systemVmTemplate2, systemVmTemplate3);
|
||||
Mockito.when(hostDao.listDistinctHypervisorTypes(zoneId)).thenReturn(Arrays.asList(Hypervisor.HypervisorType.KVM));
|
||||
SearchBuilder<VMTemplateVO> sb = mock(SearchBuilder.class);
|
||||
templateDao.readySystemTemplateSearch = sb;
|
||||
when(sb.create()).thenReturn(mock(SearchCriteria.class));
|
||||
doReturn(templates).when(templateDao).listBy(any(SearchCriteria.class), any(Filter.class));
|
||||
VMTemplateVO readyTemplate = templateDao.findSystemVMReadyTemplate(zoneId, Hypervisor.HypervisorType.KVM, CPU.CPUArch.arm64.getType());
|
||||
Assert.assertEquals(CPU.CPUArch.arm64, readyTemplate.getArch());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -192,7 +192,7 @@ public class SystemVmTemplateRegistrationTest {
|
|||
public void testValidateTemplateFile_fileNotFound() {
|
||||
SystemVmTemplateRegistration.MetadataTemplateDetails details =
|
||||
new SystemVmTemplateRegistration.MetadataTemplateDetails(Hypervisor.HypervisorType.KVM,
|
||||
"name", "file", "url", "checksum", CPU.CPUArch.amd64);
|
||||
"name", "file", "url", "checksum", CPU.CPUArch.amd64, "guestos");
|
||||
SystemVmTemplateRegistration.NewTemplateMap.put(SystemVmTemplateRegistration.getHypervisorArchKey(
|
||||
details.getHypervisorType(), details.getArch()), details);
|
||||
doReturn(null).when(systemVmTemplateRegistration).getTemplateFile(details);
|
||||
|
|
@ -209,7 +209,7 @@ public class SystemVmTemplateRegistrationTest {
|
|||
public void testValidateTemplateFile_checksumMismatch() {
|
||||
SystemVmTemplateRegistration.MetadataTemplateDetails details =
|
||||
new SystemVmTemplateRegistration.MetadataTemplateDetails(Hypervisor.HypervisorType.KVM,
|
||||
"name", "file", "url", "checksum", CPU.CPUArch.amd64);
|
||||
"name", "file", "url", "checksum", CPU.CPUArch.amd64, "guestos");
|
||||
File dummyFile = new File("dummy.txt");
|
||||
SystemVmTemplateRegistration.NewTemplateMap.put(SystemVmTemplateRegistration.getHypervisorArchKey(
|
||||
details.getHypervisorType(), details.getArch()), details);
|
||||
|
|
@ -228,7 +228,7 @@ public class SystemVmTemplateRegistrationTest {
|
|||
public void testValidateTemplateFile_success() {
|
||||
SystemVmTemplateRegistration.MetadataTemplateDetails details =
|
||||
new SystemVmTemplateRegistration.MetadataTemplateDetails(Hypervisor.HypervisorType.KVM,
|
||||
"name", "file", "url", "checksum", CPU.CPUArch.amd64);
|
||||
"name", "file", "url", "checksum", CPU.CPUArch.amd64, "guestos");
|
||||
File dummyFile = new File("dummy.txt");
|
||||
SystemVmTemplateRegistration.NewTemplateMap.put(SystemVmTemplateRegistration.getHypervisorArchKey(
|
||||
details.getHypervisorType(), details.getArch()), details);
|
||||
|
|
@ -285,7 +285,7 @@ public class SystemVmTemplateRegistrationTest {
|
|||
SystemVmTemplateRegistration.MetadataTemplateDetails details =
|
||||
new SystemVmTemplateRegistration.MetadataTemplateDetails(Hypervisor.HypervisorType.KVM,
|
||||
"name", "nonexistent.qcow2", "http://example.com/file.qcow2",
|
||||
"", CPU.CPUArch.arm64);
|
||||
"", CPU.CPUArch.arm64, "guestos");
|
||||
try (MockedStatic<Files> filesMock = Mockito.mockStatic(Files.class);
|
||||
MockedStatic<HttpUtils> httpMock = Mockito.mockStatic(HttpUtils.class)) {
|
||||
filesMock.when(() -> Files.isWritable(any(Path.class))).thenReturn(true);
|
||||
|
|
@ -301,7 +301,7 @@ public class SystemVmTemplateRegistrationTest {
|
|||
SystemVmTemplateRegistration.MetadataTemplateDetails details =
|
||||
new SystemVmTemplateRegistration.MetadataTemplateDetails(Hypervisor.HypervisorType.KVM,
|
||||
"name", "file.qcow2", "http://example.com/file.qcow2",
|
||||
"", CPU.CPUArch.arm64);
|
||||
"", CPU.CPUArch.arm64, "guestos");
|
||||
try (MockedStatic<Files> filesMock = Mockito.mockStatic(Files.class);
|
||||
MockedStatic<HttpUtils> httpMock = Mockito.mockStatic(HttpUtils.class)) {
|
||||
filesMock.when(() -> Files.isWritable(any(Path.class))).thenReturn(false);
|
||||
|
|
|
|||
|
|
@ -42,6 +42,15 @@ function getGenericName() {
|
|||
fi
|
||||
}
|
||||
|
||||
function getGuestOS() {
|
||||
hypervisor=$(echo "$1" | tr "[:upper:]" "[:lower:]")
|
||||
if [[ "$hypervisor" == "vmware" || "$hypervisor" == "xenserver" ]]; then
|
||||
echo "Other Linux (64-bit)"
|
||||
else
|
||||
echo "Debian GNU/Linux 12 (64-bit)"
|
||||
fi
|
||||
}
|
||||
|
||||
function getChecksum() {
|
||||
local fileData="$1"
|
||||
local hvName=$2
|
||||
|
|
@ -60,13 +69,14 @@ function createMetadataFile() {
|
|||
section="${template%%:*}"
|
||||
sectionHv="${section%%-*}"
|
||||
hvName=$(getGenericName $sectionHv)
|
||||
guestos=$(getGuestOS $sectionHv)
|
||||
|
||||
downloadurl="${template#*:}"
|
||||
arch=$(echo ${downloadurl#*"/systemvmtemplate-$VERSION-"} | cut -d'-' -f 1)
|
||||
templatename="systemvm-${sectionHv%.*}-${VERSION}-${arch}"
|
||||
checksum=$(getChecksum "$fileData" "$VERSION-${arch}-$hvName")
|
||||
filename=$(echo ${downloadurl##*'/'})
|
||||
echo -e "["$section"]\ntemplatename = $templatename\nchecksum = $checksum\ndownloadurl = $downloadurl\nfilename = $filename\narch = $arch\n" >> $METADATAFILE
|
||||
echo -e "["$section"]\ntemplatename = $templatename\nchecksum = $checksum\ndownloadurl = $downloadurl\nfilename = $filename\narch = $arch\nguestos = $guestos\n" >> $METADATAFILE
|
||||
done
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -172,10 +172,15 @@ public class SnapshotObject implements SnapshotInfo {
|
|||
@Override
|
||||
public long getPhysicalSize() {
|
||||
long physicalSize = 0;
|
||||
SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findByStoreSnapshot(DataStoreRole.Image, store.getId(), snapshot.getId());
|
||||
if (snapshotStore != null) {
|
||||
physicalSize = snapshotStore.getPhysicalSize();
|
||||
for (DataStoreRole role : List.of(DataStoreRole.Image, DataStoreRole.Primary)) {
|
||||
logger.trace("Retrieving snapshot [{}] size from {} storage.", snapshot.getUuid(), role);
|
||||
SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findByStoreSnapshot(role, store.getId(), snapshot.getId());
|
||||
if (snapshotStore != null) {
|
||||
return snapshotStore.getPhysicalSize();
|
||||
}
|
||||
logger.trace("Snapshot [{}] size not found on {} storage.", snapshot.getUuid(), role);
|
||||
}
|
||||
logger.warn("Snapshot [{}] reference not found in any storage. There may be an inconsistency on the database.", snapshot.getUuid());
|
||||
return physicalSize;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ The Apache CloudStack files shared between agent and management server
|
|||
Summary: CloudStack Agent for KVM hypervisors
|
||||
Requires: (openssh-clients or openssh)
|
||||
Requires: java-17-openjdk
|
||||
Requires: tzdata-java
|
||||
Requires: (tzdata-java or timezone-java)
|
||||
Requires: %{name}-common = %{_ver}
|
||||
Requires: libvirt
|
||||
Requires: libvirt-daemon-driver-storage-rbd
|
||||
|
|
@ -143,7 +143,7 @@ The CloudStack baremetal agent
|
|||
%package usage
|
||||
Summary: CloudStack Usage calculation server
|
||||
Requires: java-17-openjdk
|
||||
Requires: tzdata-java
|
||||
Requires: (tzdata-java or timezone-java)
|
||||
Group: System Environment/Libraries
|
||||
%description usage
|
||||
The CloudStack usage calculation service
|
||||
|
|
|
|||
|
|
@ -51,6 +51,7 @@ import javax.inject.Inject;
|
|||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
|
@ -162,6 +163,7 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co
|
|||
|
||||
if (VirtualMachine.State.Stopped.equals(vm.getState())) {
|
||||
List<VolumeVO> vmVolumes = volumeDao.findByInstance(vm.getId());
|
||||
vmVolumes.sort(Comparator.comparing(Volume::getDeviceId));
|
||||
List<String> volumePaths = getVolumePaths(vmVolumes);
|
||||
command.setVolumePaths(volumePaths);
|
||||
}
|
||||
|
|
@ -212,7 +214,10 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co
|
|||
@Override
|
||||
public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) {
|
||||
List<Backup.VolumeInfo> backedVolumes = backup.getBackedUpVolumes();
|
||||
List<VolumeVO> volumes = backedVolumes.stream().map(volume -> volumeDao.findByUuid(volume.getUuid())).collect(Collectors.toList());
|
||||
List<VolumeVO> volumes = backedVolumes.stream()
|
||||
.map(volume -> volumeDao.findByUuid(volume.getUuid()))
|
||||
.sorted((v1, v2) -> Long.compare(v1.getDeviceId(), v2.getDeviceId()))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
LOG.debug("Restoring vm {} from backup {} on the NAS Backup Provider", vm, backup);
|
||||
BackupRepository backupRepository = getBackupRepository(vm, backup);
|
||||
|
|
@ -246,9 +251,13 @@ public class NASBackupProvider extends AdapterBase implements BackupProvider, Co
|
|||
if (Objects.isNull(storagePool)) {
|
||||
throw new CloudRuntimeException("Unable to find storage pool associated to the volume");
|
||||
}
|
||||
String volumePathPrefix = String.format("/mnt/%s", storagePool.getUuid());
|
||||
String volumePathPrefix;
|
||||
if (ScopeType.HOST.equals(storagePool.getScope())) {
|
||||
volumePathPrefix = storagePool.getPath();
|
||||
} else if (Storage.StoragePoolType.SharedMountPoint.equals(storagePool.getPoolType())) {
|
||||
volumePathPrefix = storagePool.getPath();
|
||||
} else {
|
||||
volumePathPrefix = String.format("/mnt/%s", storagePool.getUuid());
|
||||
}
|
||||
volumePaths.add(String.format("%s/%s", volumePathPrefix, volume.getPath()));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -250,6 +250,15 @@ public class BridgeVifDriver extends VifDriverBase {
|
|||
intf.defBridgeNet(_bridges.get("private"), null, nic.getMac(), getGuestNicModel(guestOsType, nicAdapter));
|
||||
} else if (nic.getType() == Networks.TrafficType.Storage) {
|
||||
String storageBrName = nic.getName() == null ? _bridges.get("private") : nic.getName();
|
||||
if (nic.getBroadcastType() == Networks.BroadcastDomainType.Storage) {
|
||||
vNetId = Networks.BroadcastDomainType.getValue(nic.getBroadcastUri());
|
||||
protocol = Networks.BroadcastDomainType.Vlan.scheme();
|
||||
}
|
||||
if (isValidProtocolAndVnetId(vNetId, protocol)) {
|
||||
logger.debug(String.format("creating a vNet dev and bridge for %s traffic per traffic label %s",
|
||||
Networks.TrafficType.Storage.name(), trafficLabel));
|
||||
storageBrName = createVnetBr(vNetId, storageBrName, protocol);
|
||||
}
|
||||
intf.defBridgeNet(storageBrName, null, nic.getMac(), getGuestNicModel(guestOsType, nicAdapter));
|
||||
}
|
||||
if (nic.getPxeDisable()) {
|
||||
|
|
|
|||
|
|
@ -17,6 +17,8 @@
|
|||
package com.cloud.hypervisor.kvm.resource;
|
||||
|
||||
import static com.cloud.host.Host.HOST_INSTANCE_CONVERSION;
|
||||
import static com.cloud.host.Host.HOST_OVFTOOL_VERSION;
|
||||
import static com.cloud.host.Host.HOST_VIRTV2V_VERSION;
|
||||
import static com.cloud.host.Host.HOST_VOLUME_ENCRYPTION;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
|
|
@ -3366,7 +3368,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
if (!meetRequirements) {
|
||||
return false;
|
||||
}
|
||||
return isUbuntuHost() || isIoUringSupportedByQemu();
|
||||
return isUbuntuOrDebianHost() || isIoUringSupportedByQemu();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -3379,13 +3381,14 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
return diskBus != DiskDef.DiskBus.IDE || getHypervisorQemuVersion() >= HYPERVISOR_QEMU_VERSION_IDE_DISCARD_FIXED;
|
||||
}
|
||||
|
||||
public boolean isUbuntuHost() {
|
||||
public boolean isUbuntuOrDebianHost() {
|
||||
Map<String, String> versionString = getVersionStrings();
|
||||
String hostKey = "Host.OS";
|
||||
if (MapUtils.isEmpty(versionString) || !versionString.containsKey(hostKey) || versionString.get(hostKey) == null) {
|
||||
return false;
|
||||
}
|
||||
return versionString.get(hostKey).equalsIgnoreCase("ubuntu");
|
||||
return versionString.get(hostKey).equalsIgnoreCase("ubuntu")
|
||||
|| versionString.get(hostKey).toLowerCase().startsWith("debian");
|
||||
}
|
||||
|
||||
private KVMPhysicalDisk getPhysicalDiskFromNfsStore(String dataStoreUrl, DataTO data) {
|
||||
|
|
@ -3503,10 +3506,10 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
public synchronized String attachOrDetachISO(final Connect conn, final String vmName, String isoPath, final boolean isAttach, final Integer diskSeq) throws LibvirtException, URISyntaxException,
|
||||
InternalErrorException {
|
||||
final DiskDef iso = new DiskDef();
|
||||
if (isAttach && StringUtils.isNotBlank(isoPath) && isoPath.lastIndexOf("/") > 0) {
|
||||
if (isoPath.startsWith(getConfigPath() + "/" + ConfigDrive.CONFIGDRIVEDIR) && isoPath.contains(vmName)) {
|
||||
if (isAttach && StringUtils.isNotBlank(isoPath)) {
|
||||
if (isoPath.startsWith(getConfigPath() + "/" + ConfigDrive.CONFIGDRIVEDIR) || isoPath.contains(vmName)) {
|
||||
iso.defISODisk(isoPath, diskSeq, DiskDef.DiskType.FILE);
|
||||
} else {
|
||||
} else if (isoPath.lastIndexOf("/") > 0) {
|
||||
final int index = isoPath.lastIndexOf("/");
|
||||
final String path = isoPath.substring(0, index);
|
||||
final String name = isoPath.substring(index + 1);
|
||||
|
|
@ -3530,7 +3533,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
cleanupDisk(disk);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
|
@ -3766,7 +3768,14 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
cmd.setIqn(getIqn());
|
||||
cmd.getHostDetails().put(HOST_VOLUME_ENCRYPTION, String.valueOf(hostSupportsVolumeEncryption()));
|
||||
cmd.setHostTags(getHostTags());
|
||||
cmd.getHostDetails().put(HOST_INSTANCE_CONVERSION, String.valueOf(hostSupportsInstanceConversion()));
|
||||
boolean instanceConversionSupported = hostSupportsInstanceConversion();
|
||||
cmd.getHostDetails().put(HOST_INSTANCE_CONVERSION, String.valueOf(instanceConversionSupported));
|
||||
if (instanceConversionSupported) {
|
||||
cmd.getHostDetails().put(HOST_VIRTV2V_VERSION, getHostVirtV2vVersion());
|
||||
}
|
||||
if (hostSupportsOvfExport()) {
|
||||
cmd.getHostDetails().put(HOST_OVFTOOL_VERSION, getHostOvfToolVersion());
|
||||
}
|
||||
HealthCheckResult healthCheckResult = getHostHealthCheckResult();
|
||||
if (healthCheckResult != HealthCheckResult.IGNORE) {
|
||||
cmd.setHostHealthCheckResult(healthCheckResult == HealthCheckResult.SUCCESS);
|
||||
|
|
@ -5348,14 +5357,14 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
|
||||
public boolean hostSupportsInstanceConversion() {
|
||||
int exitValue = Script.runSimpleBashScriptForExitValue(INSTANCE_CONVERSION_SUPPORTED_CHECK_CMD);
|
||||
if (isUbuntuHost() && exitValue == 0) {
|
||||
if (isUbuntuOrDebianHost() && exitValue == 0) {
|
||||
exitValue = Script.runSimpleBashScriptForExitValue(UBUNTU_NBDKIT_PKG_CHECK_CMD);
|
||||
}
|
||||
return exitValue == 0;
|
||||
}
|
||||
|
||||
public boolean hostSupportsWindowsGuestConversion() {
|
||||
if (isUbuntuHost()) {
|
||||
if (isUbuntuOrDebianHost()) {
|
||||
int exitValue = Script.runSimpleBashScriptForExitValue(UBUNTU_WINDOWS_GUEST_CONVERSION_SUPPORTED_CHECK_CMD);
|
||||
return exitValue == 0;
|
||||
}
|
||||
|
|
@ -5368,8 +5377,24 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
return exitValue == 0;
|
||||
}
|
||||
|
||||
public String getHostVirtV2vVersion() {
|
||||
if (!hostSupportsInstanceConversion()) {
|
||||
return "";
|
||||
}
|
||||
String cmd = String.format("%s | awk '{print $2}'", INSTANCE_CONVERSION_SUPPORTED_CHECK_CMD);
|
||||
String version = Script.runSimpleBashScript(cmd);
|
||||
return StringUtils.isNotBlank(version) ? version.split(",")[0] : "";
|
||||
}
|
||||
|
||||
public String getHostOvfToolVersion() {
|
||||
if (!hostSupportsOvfExport()) {
|
||||
return "";
|
||||
}
|
||||
return Script.runSimpleBashScript(OVF_EXPORT_TOOl_GET_VERSION_CMD);
|
||||
}
|
||||
|
||||
public boolean ovfExportToolSupportsParallelThreads() {
|
||||
String ovfExportToolVersion = Script.runSimpleBashScript(OVF_EXPORT_TOOl_GET_VERSION_CMD);
|
||||
String ovfExportToolVersion = getHostOvfToolVersion();
|
||||
if (StringUtils.isBlank(ovfExportToolVersion)) {
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -249,9 +249,7 @@ public class LibvirtVMDef {
|
|||
guestDef.append("<boot dev='" + bo + "'/>\n");
|
||||
}
|
||||
}
|
||||
if (_arch == null || !_arch.equals("aarch64")) {
|
||||
guestDef.append("<smbios mode='sysinfo'/>\n");
|
||||
}
|
||||
guestDef.append("<smbios mode='sysinfo'/>\n");
|
||||
guestDef.append("</os>\n");
|
||||
if (iothreads) {
|
||||
guestDef.append(String.format("<iothreads>%s</iothreads>", NUMBER_OF_IOTHREADS));
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ public class LibvirtCheckConvertInstanceCommandWrapper extends CommandWrapper<Ch
|
|||
public Answer execute(CheckConvertInstanceCommand cmd, LibvirtComputingResource serverResource) {
|
||||
if (!serverResource.hostSupportsInstanceConversion()) {
|
||||
String msg = String.format("Cannot convert the instance from VMware as the virt-v2v binary is not found on host %s. " +
|
||||
"Please install virt-v2v%s on the host before attempting the instance conversion.", serverResource.getPrivateIp(), serverResource.isUbuntuHost()? ", nbdkit" : "");
|
||||
"Please install virt-v2v%s on the host before attempting the instance conversion.", serverResource.getPrivateIp(), serverResource.isUbuntuOrDebianHost()? ", nbdkit" : "");
|
||||
logger.info(msg);
|
||||
return new CheckConvertInstanceAnswer(cmd, false, msg);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,16 +31,24 @@ import com.cloud.resource.CommandWrapper;
|
|||
import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.storage.volume.VolumeOnStorageTO;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@ResourceWrapper(handles = CheckVolumeCommand.class)
|
||||
public final class LibvirtCheckVolumeCommandWrapper extends CommandWrapper<CheckVolumeCommand, Answer, LibvirtComputingResource> {
|
||||
|
||||
private static final List<Storage.StoragePoolType> STORAGE_POOL_TYPES_SUPPORTED = Arrays.asList(Storage.StoragePoolType.Filesystem, Storage.StoragePoolType.NetworkFilesystem);
|
||||
|
||||
@Override
|
||||
public Answer execute(final CheckVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) {
|
||||
String result = null;
|
||||
|
|
@ -50,34 +58,76 @@ public final class LibvirtCheckVolumeCommandWrapper extends CommandWrapper<Check
|
|||
KVMStoragePool pool = poolMgr.getStoragePool(storageFilerTO.getType(), storageFilerTO.getUuid());
|
||||
|
||||
try {
|
||||
if (storageFilerTO.getType() == Storage.StoragePoolType.Filesystem ||
|
||||
storageFilerTO.getType() == Storage.StoragePoolType.NetworkFilesystem) {
|
||||
if (STORAGE_POOL_TYPES_SUPPORTED.contains(storageFilerTO.getType())) {
|
||||
final KVMPhysicalDisk vol = pool.getPhysicalDisk(srcFile);
|
||||
final String path = vol.getPath();
|
||||
long size = getVirtualSizeFromFile(path);
|
||||
return new CheckVolumeAnswer(command, "", size);
|
||||
try {
|
||||
KVMPhysicalDisk.checkQcow2File(path);
|
||||
} catch (final CloudRuntimeException e) {
|
||||
return new CheckVolumeAnswer(command, false, "", 0, getVolumeDetails(pool, vol));
|
||||
}
|
||||
|
||||
long size = KVMPhysicalDisk.getVirtualSizeFromFile(path);
|
||||
return new CheckVolumeAnswer(command, true, "", size, getVolumeDetails(pool, vol));
|
||||
} else {
|
||||
return new Answer(command, false, "Unsupported Storage Pool");
|
||||
}
|
||||
|
||||
} catch (final Exception e) {
|
||||
logger.error("Error while locating disk: "+ e.getMessage());
|
||||
logger.error("Error while checking the disk: {}", e.getMessage());
|
||||
return new Answer(command, false, result);
|
||||
}
|
||||
}
|
||||
|
||||
private long getVirtualSizeFromFile(String path) {
|
||||
private Map<VolumeOnStorageTO.Detail, String> getVolumeDetails(KVMStoragePool pool, KVMPhysicalDisk disk) {
|
||||
Map<String, String> info = getDiskFileInfo(pool, disk, true);
|
||||
if (MapUtils.isEmpty(info)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Map<VolumeOnStorageTO.Detail, String> volumeDetails = new HashMap<>();
|
||||
|
||||
String backingFilePath = info.get(QemuImg.BACKING_FILE);
|
||||
if (StringUtils.isNotBlank(backingFilePath)) {
|
||||
volumeDetails.put(VolumeOnStorageTO.Detail.BACKING_FILE, backingFilePath);
|
||||
}
|
||||
String backingFileFormat = info.get(QemuImg.BACKING_FILE_FORMAT);
|
||||
if (StringUtils.isNotBlank(backingFileFormat)) {
|
||||
volumeDetails.put(VolumeOnStorageTO.Detail.BACKING_FILE_FORMAT, backingFileFormat);
|
||||
}
|
||||
String clusterSize = info.get(QemuImg.CLUSTER_SIZE);
|
||||
if (StringUtils.isNotBlank(clusterSize)) {
|
||||
volumeDetails.put(VolumeOnStorageTO.Detail.CLUSTER_SIZE, clusterSize);
|
||||
}
|
||||
String fileFormat = info.get(QemuImg.FILE_FORMAT);
|
||||
if (StringUtils.isNotBlank(fileFormat)) {
|
||||
volumeDetails.put(VolumeOnStorageTO.Detail.FILE_FORMAT, fileFormat);
|
||||
}
|
||||
String encrypted = info.get(QemuImg.ENCRYPTED);
|
||||
if (StringUtils.isNotBlank(encrypted) && encrypted.equalsIgnoreCase("yes")) {
|
||||
volumeDetails.put(VolumeOnStorageTO.Detail.IS_ENCRYPTED, String.valueOf(Boolean.TRUE));
|
||||
}
|
||||
Boolean isLocked = isDiskFileLocked(pool, disk);
|
||||
volumeDetails.put(VolumeOnStorageTO.Detail.IS_LOCKED, String.valueOf(isLocked));
|
||||
|
||||
return volumeDetails;
|
||||
}
|
||||
|
||||
private Map<String, String> getDiskFileInfo(KVMStoragePool pool, KVMPhysicalDisk disk, boolean secure) {
|
||||
if (!STORAGE_POOL_TYPES_SUPPORTED.contains(pool.getType())) {
|
||||
return new HashMap<>(); // unknown
|
||||
}
|
||||
try {
|
||||
QemuImg qemu = new QemuImg(0);
|
||||
QemuImgFile qemuFile = new QemuImgFile(path);
|
||||
Map<String, String> info = qemu.info(qemuFile);
|
||||
if (info.containsKey(QemuImg.VIRTUAL_SIZE)) {
|
||||
return Long.parseLong(info.get(QemuImg.VIRTUAL_SIZE));
|
||||
} else {
|
||||
throw new CloudRuntimeException("Unable to determine virtual size of volume at path " + path);
|
||||
}
|
||||
QemuImgFile qemuFile = new QemuImgFile(disk.getPath(), disk.getFormat());
|
||||
return qemu.info(qemuFile, secure);
|
||||
} catch (QemuImgException | LibvirtException ex) {
|
||||
throw new CloudRuntimeException("Error when inspecting volume at path " + path, ex);
|
||||
logger.error("Failed to get info of disk file: " + ex.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isDiskFileLocked(KVMStoragePool pool, KVMPhysicalDisk disk) {
|
||||
Map<String, String> info = getDiskFileInfo(pool, disk, false);
|
||||
return info == null;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,22 +18,12 @@
|
|||
//
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.URLEncoder;
|
||||
import java.nio.charset.Charset;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
import org.apache.cloudstack.vm.UnmanagedInstanceTO;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
|
|
@ -44,17 +34,12 @@ import com.cloud.agent.api.to.NfsTO;
|
|||
import com.cloud.agent.api.to.RemoteInstanceTO;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtDomainXMLParser;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
||||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.utils.FileUtil;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.script.OutputInterpreter;
|
||||
import com.cloud.utils.script.Script;
|
||||
|
||||
|
|
@ -75,9 +60,9 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
|
||||
if (cmd.getCheckConversionSupport() && !serverResource.hostSupportsInstanceConversion()) {
|
||||
String msg = String.format("Cannot convert the instance %s from VMware as the virt-v2v binary is not found. " +
|
||||
"Please install virt-v2v%s on the host before attempting the instance conversion.", sourceInstanceName, serverResource.isUbuntuHost()? ", nbdkit" : "");
|
||||
"Please install virt-v2v%s on the host before attempting the instance conversion.", sourceInstanceName, serverResource.isUbuntuOrDebianHost()? ", nbdkit" : "");
|
||||
logger.info(msg);
|
||||
return new ConvertInstanceAnswer(cmd, false, msg);
|
||||
return new Answer(cmd, false, msg);
|
||||
}
|
||||
|
||||
if (!areSourceAndDestinationHypervisorsSupported(sourceHypervisorType, destinationHypervisorType)) {
|
||||
|
|
@ -85,7 +70,7 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
String.format("The destination hypervisor type is %s, KVM was expected, cannot handle it", destinationHypervisorType) :
|
||||
String.format("The source hypervisor type %s is not supported for KVM conversion", sourceHypervisorType);
|
||||
logger.error(err);
|
||||
return new ConvertInstanceAnswer(cmd, false, err);
|
||||
return new Answer(cmd, false, err);
|
||||
}
|
||||
|
||||
final KVMStoragePoolManager storagePoolMgr = serverResource.getStoragePoolMgr();
|
||||
|
|
@ -103,7 +88,7 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
if (StringUtils.isBlank(exportInstanceOVAUrl)) {
|
||||
String err = String.format("Couldn't export OVA for the VM %s, due to empty url", sourceInstanceName);
|
||||
logger.error(err);
|
||||
return new ConvertInstanceAnswer(cmd, false, err);
|
||||
return new Answer(cmd, false, err);
|
||||
}
|
||||
|
||||
int noOfThreads = cmd.getThreadsCountToExportOvf();
|
||||
|
|
@ -117,7 +102,7 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
if (!ovfExported) {
|
||||
String err = String.format("Export OVA for the VM %s failed", sourceInstanceName);
|
||||
logger.error(err);
|
||||
return new ConvertInstanceAnswer(cmd, false, err);
|
||||
return new Answer(cmd, false, err);
|
||||
}
|
||||
sourceOVFDirPath = String.format("%s%s/", sourceOVFDirPath, sourceInstanceName);
|
||||
} else {
|
||||
|
|
@ -140,7 +125,7 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
"has a different virt-v2v version.",
|
||||
ovfTemplateDirOnConversionLocation);
|
||||
logger.error(err);
|
||||
return new ConvertInstanceAnswer(cmd, false, err);
|
||||
return new Answer(cmd, false, err);
|
||||
}
|
||||
return new ConvertInstanceAnswer(cmd, temporaryConvertUuid);
|
||||
} catch (Exception e) {
|
||||
|
|
@ -148,7 +133,7 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
sourceInstanceName, sourceHypervisorType, e.getMessage());
|
||||
logger.error(error, e);
|
||||
cleanupSecondaryStorage = true;
|
||||
return new ConvertInstanceAnswer(cmd, false, error);
|
||||
return new Answer(cmd, false, error);
|
||||
} finally {
|
||||
if (ovfExported && StringUtils.isNotBlank(ovfTemplateDirOnConversionLocation)) {
|
||||
String sourceOVFDir = String.format("%s/%s", temporaryConvertPath, ovfTemplateDirOnConversionLocation);
|
||||
|
|
@ -205,55 +190,6 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
encodedUsername, encodedPassword, vcenter, datacenter, vm);
|
||||
}
|
||||
|
||||
protected List<KVMPhysicalDisk> getTemporaryDisksFromParsedXml(KVMStoragePool pool, LibvirtDomainXMLParser xmlParser, String convertedBasePath) {
|
||||
List<LibvirtVMDef.DiskDef> disksDefs = xmlParser.getDisks();
|
||||
disksDefs = disksDefs.stream().filter(x -> x.getDiskType() == LibvirtVMDef.DiskDef.DiskType.FILE &&
|
||||
x.getDeviceType() == LibvirtVMDef.DiskDef.DeviceType.DISK).collect(Collectors.toList());
|
||||
if (CollectionUtils.isEmpty(disksDefs)) {
|
||||
String err = String.format("Cannot find any disk defined on the converted XML domain %s.xml", convertedBasePath);
|
||||
logger.error(err);
|
||||
throw new CloudRuntimeException(err);
|
||||
}
|
||||
sanitizeDisksPath(disksDefs);
|
||||
return getPhysicalDisksFromDefPaths(disksDefs, pool);
|
||||
}
|
||||
|
||||
private List<KVMPhysicalDisk> getPhysicalDisksFromDefPaths(List<LibvirtVMDef.DiskDef> disksDefs, KVMStoragePool pool) {
|
||||
List<KVMPhysicalDisk> disks = new ArrayList<>();
|
||||
for (LibvirtVMDef.DiskDef diskDef : disksDefs) {
|
||||
KVMPhysicalDisk physicalDisk = pool.getPhysicalDisk(diskDef.getDiskPath());
|
||||
disks.add(physicalDisk);
|
||||
}
|
||||
return disks;
|
||||
}
|
||||
|
||||
protected List<KVMPhysicalDisk> getTemporaryDisksWithPrefixFromTemporaryPool(KVMStoragePool pool, String path, String prefix) {
|
||||
String msg = String.format("Could not parse correctly the converted XML domain, checking for disks on %s with prefix %s", path, prefix);
|
||||
logger.info(msg);
|
||||
pool.refresh();
|
||||
List<KVMPhysicalDisk> disksWithPrefix = pool.listPhysicalDisks()
|
||||
.stream()
|
||||
.filter(x -> x.getName().startsWith(prefix) && !x.getName().endsWith(".xml"))
|
||||
.collect(Collectors.toList());
|
||||
if (CollectionUtils.isEmpty(disksWithPrefix)) {
|
||||
msg = String.format("Could not find any converted disk with prefix %s on temporary location %s", prefix, path);
|
||||
logger.error(msg);
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
return disksWithPrefix;
|
||||
}
|
||||
|
||||
private void cleanupDisksAndDomainFromTemporaryLocation(List<KVMPhysicalDisk> disks,
|
||||
KVMStoragePool temporaryStoragePool,
|
||||
String temporaryConvertUuid) {
|
||||
for (KVMPhysicalDisk disk : disks) {
|
||||
logger.info(String.format("Cleaning up temporary disk %s after conversion from temporary location", disk.getName()));
|
||||
temporaryStoragePool.deletePhysicalDisk(disk.getName(), Storage.ImageFormat.QCOW2);
|
||||
}
|
||||
logger.info(String.format("Cleaning up temporary domain %s after conversion from temporary location", temporaryConvertUuid));
|
||||
FileUtil.deleteFiles(temporaryStoragePool.getLocalPath(), temporaryConvertUuid, ".xml");
|
||||
}
|
||||
|
||||
protected void sanitizeDisksPath(List<LibvirtVMDef.DiskDef> disks) {
|
||||
for (LibvirtVMDef.DiskDef disk : disks) {
|
||||
String[] diskPathParts = disk.getDiskPath().split("/");
|
||||
|
|
@ -262,114 +198,6 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
}
|
||||
}
|
||||
|
||||
protected List<KVMPhysicalDisk> moveTemporaryDisksToDestination(List<KVMPhysicalDisk> temporaryDisks,
|
||||
List<String> destinationStoragePools,
|
||||
KVMStoragePoolManager storagePoolMgr) {
|
||||
List<KVMPhysicalDisk> targetDisks = new ArrayList<>();
|
||||
if (temporaryDisks.size() != destinationStoragePools.size()) {
|
||||
String warn = String.format("Discrepancy between the converted instance disks (%s) " +
|
||||
"and the expected number of disks (%s)", temporaryDisks.size(), destinationStoragePools.size());
|
||||
logger.warn(warn);
|
||||
}
|
||||
for (int i = 0; i < temporaryDisks.size(); i++) {
|
||||
String poolPath = destinationStoragePools.get(i);
|
||||
KVMStoragePool destinationPool = storagePoolMgr.getStoragePool(Storage.StoragePoolType.NetworkFilesystem, poolPath);
|
||||
if (destinationPool == null) {
|
||||
String err = String.format("Could not find a storage pool by URI: %s", poolPath);
|
||||
logger.error(err);
|
||||
continue;
|
||||
}
|
||||
if (destinationPool.getType() != Storage.StoragePoolType.NetworkFilesystem) {
|
||||
String err = String.format("Storage pool by URI: %s is not an NFS storage", poolPath);
|
||||
logger.error(err);
|
||||
continue;
|
||||
}
|
||||
KVMPhysicalDisk sourceDisk = temporaryDisks.get(i);
|
||||
if (logger.isDebugEnabled()) {
|
||||
String msg = String.format("Trying to copy converted instance disk number %s from the temporary location %s" +
|
||||
" to destination storage pool %s", i, sourceDisk.getPool().getLocalPath(), destinationPool.getUuid());
|
||||
logger.debug(msg);
|
||||
}
|
||||
|
||||
String destinationName = UUID.randomUUID().toString();
|
||||
|
||||
KVMPhysicalDisk destinationDisk = storagePoolMgr.copyPhysicalDisk(sourceDisk, destinationName, destinationPool, 7200 * 1000);
|
||||
targetDisks.add(destinationDisk);
|
||||
}
|
||||
return targetDisks;
|
||||
}
|
||||
|
||||
private UnmanagedInstanceTO getConvertedUnmanagedInstance(String baseName,
|
||||
List<KVMPhysicalDisk> vmDisks,
|
||||
LibvirtDomainXMLParser xmlParser) {
|
||||
UnmanagedInstanceTO instanceTO = new UnmanagedInstanceTO();
|
||||
instanceTO.setName(baseName);
|
||||
instanceTO.setDisks(getUnmanagedInstanceDisks(vmDisks, xmlParser));
|
||||
instanceTO.setNics(getUnmanagedInstanceNics(xmlParser));
|
||||
return instanceTO;
|
||||
}
|
||||
|
||||
private List<UnmanagedInstanceTO.Nic> getUnmanagedInstanceNics(LibvirtDomainXMLParser xmlParser) {
|
||||
List<UnmanagedInstanceTO.Nic> nics = new ArrayList<>();
|
||||
if (xmlParser != null) {
|
||||
List<LibvirtVMDef.InterfaceDef> interfaces = xmlParser.getInterfaces();
|
||||
for (LibvirtVMDef.InterfaceDef interfaceDef : interfaces) {
|
||||
UnmanagedInstanceTO.Nic nic = new UnmanagedInstanceTO.Nic();
|
||||
nic.setMacAddress(interfaceDef.getMacAddress());
|
||||
nic.setNicId(interfaceDef.getBrName());
|
||||
nic.setAdapterType(interfaceDef.getModel().toString());
|
||||
nics.add(nic);
|
||||
}
|
||||
}
|
||||
return nics;
|
||||
}
|
||||
|
||||
protected List<UnmanagedInstanceTO.Disk> getUnmanagedInstanceDisks(List<KVMPhysicalDisk> vmDisks, LibvirtDomainXMLParser xmlParser) {
|
||||
List<UnmanagedInstanceTO.Disk> instanceDisks = new ArrayList<>();
|
||||
List<LibvirtVMDef.DiskDef> diskDefs = xmlParser != null ? xmlParser.getDisks() : null;
|
||||
for (int i = 0; i< vmDisks.size(); i++) {
|
||||
KVMPhysicalDisk physicalDisk = vmDisks.get(i);
|
||||
KVMStoragePool storagePool = physicalDisk.getPool();
|
||||
UnmanagedInstanceTO.Disk disk = new UnmanagedInstanceTO.Disk();
|
||||
disk.setPosition(i);
|
||||
Pair<String, String> storagePoolHostAndPath = getNfsStoragePoolHostAndPath(storagePool);
|
||||
disk.setDatastoreHost(storagePoolHostAndPath.first());
|
||||
disk.setDatastorePath(storagePoolHostAndPath.second());
|
||||
disk.setDatastoreName(storagePool.getUuid());
|
||||
disk.setDatastoreType(storagePool.getType().name());
|
||||
disk.setCapacity(physicalDisk.getVirtualSize());
|
||||
disk.setFileBaseName(physicalDisk.getName());
|
||||
if (CollectionUtils.isNotEmpty(diskDefs)) {
|
||||
LibvirtVMDef.DiskDef diskDef = diskDefs.get(i);
|
||||
disk.setController(diskDef.getBusType() != null ? diskDef.getBusType().toString() : LibvirtVMDef.DiskDef.DiskBus.VIRTIO.toString());
|
||||
} else {
|
||||
// If the job is finished but we cannot parse the XML, the guest VM can use the virtio driver
|
||||
disk.setController(LibvirtVMDef.DiskDef.DiskBus.VIRTIO.toString());
|
||||
}
|
||||
instanceDisks.add(disk);
|
||||
}
|
||||
return instanceDisks;
|
||||
}
|
||||
|
||||
protected Pair<String, String> getNfsStoragePoolHostAndPath(KVMStoragePool storagePool) {
|
||||
String sourceHostIp = null;
|
||||
String sourcePath = null;
|
||||
List<String[]> commands = new ArrayList<>();
|
||||
commands.add(new String[]{Script.getExecutableAbsolutePath("mount")});
|
||||
commands.add(new String[]{Script.getExecutableAbsolutePath("grep"), storagePool.getLocalPath()});
|
||||
String storagePoolMountPoint = Script.executePipedCommands(commands, 0).second();
|
||||
logger.debug(String.format("NFS Storage pool: %s - local path: %s, mount point: %s", storagePool.getUuid(), storagePool.getLocalPath(), storagePoolMountPoint));
|
||||
if (StringUtils.isNotEmpty(storagePoolMountPoint)) {
|
||||
String[] res = storagePoolMountPoint.strip().split(" ");
|
||||
res = res[0].split(":");
|
||||
if (res.length > 1) {
|
||||
sourceHostIp = res[0].strip();
|
||||
sourcePath = res[1].strip();
|
||||
}
|
||||
}
|
||||
return new Pair<>(sourceHostIp, sourcePath);
|
||||
}
|
||||
|
||||
private boolean exportOVAFromVMOnVcenter(String vmExportUrl,
|
||||
String targetOvfDir,
|
||||
int noOfThreads,
|
||||
|
|
@ -412,27 +240,6 @@ public class LibvirtConvertInstanceCommandWrapper extends CommandWrapper<Convert
|
|||
return exitValue == 0;
|
||||
}
|
||||
|
||||
protected LibvirtDomainXMLParser parseMigratedVMXmlDomain(String installPath) throws IOException {
|
||||
String xmlPath = String.format("%s.xml", installPath);
|
||||
if (!new File(xmlPath).exists()) {
|
||||
String err = String.format("Conversion failed. Unable to find the converted XML domain, expected %s", xmlPath);
|
||||
logger.error(err);
|
||||
throw new CloudRuntimeException(err);
|
||||
}
|
||||
InputStream is = new BufferedInputStream(new FileInputStream(xmlPath));
|
||||
String xml = IOUtils.toString(is, Charset.defaultCharset());
|
||||
final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
|
||||
try {
|
||||
parser.parseDomainXML(xml);
|
||||
return parser;
|
||||
} catch (RuntimeException e) {
|
||||
String err = String.format("Error parsing the converted instance XML domain at %s: %s", xmlPath, e.getMessage());
|
||||
logger.error(err, e);
|
||||
logger.debug(xml);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
protected String encodeUsername(String username) {
|
||||
return URLEncoder.encode(username, Charset.defaultCharset());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,15 +31,22 @@ import com.cloud.resource.CommandWrapper;
|
|||
import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.storage.volume.VolumeOnStorageTO;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@ResourceWrapper(handles = CopyRemoteVolumeCommand.class)
|
||||
public final class LibvirtCopyRemoteVolumeCommandWrapper extends CommandWrapper<CopyRemoteVolumeCommand, Answer, LibvirtComputingResource> {
|
||||
private static final List<Storage.StoragePoolType> STORAGE_POOL_TYPES_SUPPORTED = Arrays.asList(Storage.StoragePoolType.Filesystem, Storage.StoragePoolType.NetworkFilesystem);
|
||||
|
||||
@Override
|
||||
public Answer execute(final CopyRemoteVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) {
|
||||
|
|
@ -55,14 +62,19 @@ public final class LibvirtCopyRemoteVolumeCommandWrapper extends CommandWrapper<
|
|||
int timeoutInSecs = command.getWait();
|
||||
|
||||
try {
|
||||
if (storageFilerTO.getType() == Storage.StoragePoolType.Filesystem ||
|
||||
storageFilerTO.getType() == Storage.StoragePoolType.NetworkFilesystem) {
|
||||
if (STORAGE_POOL_TYPES_SUPPORTED.contains(storageFilerTO.getType())) {
|
||||
String filename = libvirtComputingResource.copyVolume(srcIp, username, password, dstPath, srcFile, tmpPath, timeoutInSecs);
|
||||
logger.debug("Volume " + srcFile + " copy successful, copied to file: " + filename);
|
||||
final KVMPhysicalDisk vol = pool.getPhysicalDisk(filename);
|
||||
final String path = vol.getPath();
|
||||
long size = getVirtualSizeFromFile(path);
|
||||
return new CopyRemoteVolumeAnswer(command, "", filename, size);
|
||||
try {
|
||||
KVMPhysicalDisk.checkQcow2File(path);
|
||||
} catch (final CloudRuntimeException e) {
|
||||
return new CopyRemoteVolumeAnswer(command, false, "", filename, 0, getVolumeDetails(pool, vol));
|
||||
}
|
||||
|
||||
long size = KVMPhysicalDisk.getVirtualSizeFromFile(path);
|
||||
return new CopyRemoteVolumeAnswer(command, true, "", filename, size, getVolumeDetails(pool, vol));
|
||||
} else {
|
||||
String msg = "Unsupported storage pool type: " + storageFilerTO.getType().toString() + ", only local and NFS pools are supported";
|
||||
return new Answer(command, false, msg);
|
||||
|
|
@ -74,18 +86,56 @@ public final class LibvirtCopyRemoteVolumeCommandWrapper extends CommandWrapper<
|
|||
}
|
||||
}
|
||||
|
||||
private long getVirtualSizeFromFile(String path) {
|
||||
private Map<VolumeOnStorageTO.Detail, String> getVolumeDetails(KVMStoragePool pool, KVMPhysicalDisk disk) {
|
||||
Map<String, String> info = getDiskFileInfo(pool, disk, true);
|
||||
if (MapUtils.isEmpty(info)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Map<VolumeOnStorageTO.Detail, String> volumeDetails = new HashMap<>();
|
||||
|
||||
String backingFilePath = info.get(QemuImg.BACKING_FILE);
|
||||
if (StringUtils.isNotBlank(backingFilePath)) {
|
||||
volumeDetails.put(VolumeOnStorageTO.Detail.BACKING_FILE, backingFilePath);
|
||||
}
|
||||
String backingFileFormat = info.get(QemuImg.BACKING_FILE_FORMAT);
|
||||
if (StringUtils.isNotBlank(backingFileFormat)) {
|
||||
volumeDetails.put(VolumeOnStorageTO.Detail.BACKING_FILE_FORMAT, backingFileFormat);
|
||||
}
|
||||
String clusterSize = info.get(QemuImg.CLUSTER_SIZE);
|
||||
if (StringUtils.isNotBlank(clusterSize)) {
|
||||
volumeDetails.put(VolumeOnStorageTO.Detail.CLUSTER_SIZE, clusterSize);
|
||||
}
|
||||
String fileFormat = info.get(QemuImg.FILE_FORMAT);
|
||||
if (StringUtils.isNotBlank(fileFormat)) {
|
||||
volumeDetails.put(VolumeOnStorageTO.Detail.FILE_FORMAT, fileFormat);
|
||||
}
|
||||
String encrypted = info.get(QemuImg.ENCRYPTED);
|
||||
if (StringUtils.isNotBlank(encrypted) && encrypted.equalsIgnoreCase("yes")) {
|
||||
volumeDetails.put(VolumeOnStorageTO.Detail.IS_ENCRYPTED, String.valueOf(Boolean.TRUE));
|
||||
}
|
||||
Boolean isLocked = isDiskFileLocked(pool, disk);
|
||||
volumeDetails.put(VolumeOnStorageTO.Detail.IS_LOCKED, String.valueOf(isLocked));
|
||||
|
||||
return volumeDetails;
|
||||
}
|
||||
|
||||
private Map<String, String> getDiskFileInfo(KVMStoragePool pool, KVMPhysicalDisk disk, boolean secure) {
|
||||
if (!STORAGE_POOL_TYPES_SUPPORTED.contains(pool.getType())) {
|
||||
return new HashMap<>(); // unknown
|
||||
}
|
||||
try {
|
||||
QemuImg qemu = new QemuImg(0);
|
||||
QemuImgFile qemuFile = new QemuImgFile(path);
|
||||
Map<String, String> info = qemu.info(qemuFile);
|
||||
if (info.containsKey(QemuImg.VIRTUAL_SIZE)) {
|
||||
return Long.parseLong(info.get(QemuImg.VIRTUAL_SIZE));
|
||||
} else {
|
||||
throw new CloudRuntimeException("Unable to determine virtual size of volume at path " + path);
|
||||
}
|
||||
QemuImgFile qemuFile = new QemuImgFile(disk.getPath(), disk.getFormat());
|
||||
return qemu.info(qemuFile, secure);
|
||||
} catch (QemuImgException | LibvirtException ex) {
|
||||
throw new CloudRuntimeException("Error when inspecting volume at path " + path, ex);
|
||||
logger.error("Failed to get info of disk file: " + ex.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isDiskFileLocked(KVMStoragePool pool, KVMPhysicalDisk disk) {
|
||||
Map<String, String> info = getDiskFileInfo(pool, disk, false);
|
||||
return info == null;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ import org.apache.cloudstack.utils.qemu.QemuImg;
|
|||
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
|
|
@ -91,37 +92,46 @@ public final class LibvirtGetVolumesOnStorageCommandWrapper extends CommandWrapp
|
|||
if (disk.getQemuEncryptFormat() != null) {
|
||||
volumeOnStorageTO.setQemuEncryptFormat(disk.getQemuEncryptFormat().toString());
|
||||
}
|
||||
String backingFilePath = info.get(QemuImg.BACKING_FILE);
|
||||
if (StringUtils.isNotBlank(backingFilePath)) {
|
||||
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.BACKING_FILE, backingFilePath);
|
||||
}
|
||||
String backingFileFormat = info.get(QemuImg.BACKING_FILE_FORMAT);
|
||||
if (StringUtils.isNotBlank(backingFileFormat)) {
|
||||
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.BACKING_FILE_FORMAT, backingFileFormat);
|
||||
}
|
||||
String clusterSize = info.get(QemuImg.CLUSTER_SIZE);
|
||||
if (StringUtils.isNotBlank(clusterSize)) {
|
||||
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.CLUSTER_SIZE, clusterSize);
|
||||
}
|
||||
String fileFormat = info.get(QemuImg.FILE_FORMAT);
|
||||
if (StringUtils.isNotBlank(fileFormat)) {
|
||||
if (!fileFormat.equalsIgnoreCase(disk.getFormat().toString())) {
|
||||
return new GetVolumesOnStorageAnswer(command, false, String.format("The file format is %s, but expected to be %s", fileFormat, disk.getFormat()));
|
||||
}
|
||||
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.FILE_FORMAT, fileFormat);
|
||||
if (StringUtils.isNotBlank(fileFormat) && !fileFormat.equalsIgnoreCase(disk.getFormat().toString())) {
|
||||
return new GetVolumesOnStorageAnswer(command, false, String.format("The file format is %s, but expected to be %s", fileFormat, disk.getFormat()));
|
||||
}
|
||||
String encrypted = info.get(QemuImg.ENCRYPTED);
|
||||
if (StringUtils.isNotBlank(encrypted) && encrypted.equalsIgnoreCase("yes")) {
|
||||
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.IS_ENCRYPTED, String.valueOf(Boolean.TRUE));
|
||||
}
|
||||
Boolean isLocked = isDiskFileLocked(storagePool, disk);
|
||||
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.IS_LOCKED, String.valueOf(isLocked));
|
||||
addDetailsToVolumeOnStorageTO(volumeOnStorageTO, info, storagePool, disk);
|
||||
|
||||
volumes.add(volumeOnStorageTO);
|
||||
}
|
||||
return new GetVolumesOnStorageAnswer(command, volumes);
|
||||
}
|
||||
|
||||
private void addDetailsToVolumeOnStorageTO(VolumeOnStorageTO volumeOnStorageTO, final Map<String, String> info, final KVMStoragePool storagePool, final KVMPhysicalDisk disk) {
|
||||
if (MapUtils.isEmpty(info)) {
|
||||
return;
|
||||
}
|
||||
|
||||
String backingFilePath = info.get(QemuImg.BACKING_FILE);
|
||||
if (StringUtils.isNotBlank(backingFilePath)) {
|
||||
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.BACKING_FILE, backingFilePath);
|
||||
}
|
||||
String backingFileFormat = info.get(QemuImg.BACKING_FILE_FORMAT);
|
||||
if (StringUtils.isNotBlank(backingFileFormat)) {
|
||||
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.BACKING_FILE_FORMAT, backingFileFormat);
|
||||
}
|
||||
String clusterSize = info.get(QemuImg.CLUSTER_SIZE);
|
||||
if (StringUtils.isNotBlank(clusterSize)) {
|
||||
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.CLUSTER_SIZE, clusterSize);
|
||||
}
|
||||
String fileFormat = info.get(QemuImg.FILE_FORMAT);
|
||||
if (StringUtils.isNotBlank(fileFormat)) {
|
||||
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.FILE_FORMAT, fileFormat);
|
||||
}
|
||||
String encrypted = info.get(QemuImg.ENCRYPTED);
|
||||
if (StringUtils.isNotBlank(encrypted) && encrypted.equalsIgnoreCase("yes")) {
|
||||
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.IS_ENCRYPTED, String.valueOf(Boolean.TRUE));
|
||||
}
|
||||
Boolean isLocked = isDiskFileLocked(storagePool, disk);
|
||||
volumeOnStorageTO.addDetail(VolumeOnStorageTO.Detail.IS_LOCKED, String.valueOf(isLocked));
|
||||
}
|
||||
|
||||
private GetVolumesOnStorageAnswer addAllVolumes(final GetVolumesOnStorageCommand command, final KVMStoragePool storagePool, String keyword) {
|
||||
List<VolumeOnStorageTO> volumes = new ArrayList<>();
|
||||
|
||||
|
|
@ -134,11 +144,21 @@ public final class LibvirtGetVolumesOnStorageCommandWrapper extends CommandWrapp
|
|||
if (!isDiskFormatSupported(disk)) {
|
||||
continue;
|
||||
}
|
||||
Map<String, String> info = getDiskFileInfo(storagePool, disk, true);
|
||||
if (info == null) {
|
||||
continue;
|
||||
}
|
||||
VolumeOnStorageTO volumeOnStorageTO = new VolumeOnStorageTO(Hypervisor.HypervisorType.KVM, disk.getName(), disk.getName(), disk.getPath(),
|
||||
disk.getFormat().toString(), disk.getSize(), disk.getVirtualSize());
|
||||
if (disk.getQemuEncryptFormat() != null) {
|
||||
volumeOnStorageTO.setQemuEncryptFormat(disk.getQemuEncryptFormat().toString());
|
||||
}
|
||||
String fileFormat = info.get(QemuImg.FILE_FORMAT);
|
||||
if (StringUtils.isNotBlank(fileFormat) && !fileFormat.equalsIgnoreCase(disk.getFormat().toString())) {
|
||||
continue;
|
||||
}
|
||||
addDetailsToVolumeOnStorageTO(volumeOnStorageTO, info, storagePool, disk);
|
||||
|
||||
volumes.add(volumeOnStorageTO);
|
||||
}
|
||||
return new GetVolumesOnStorageAnswer(command, volumes);
|
||||
|
|
|
|||
|
|
@ -899,7 +899,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
Node sourceNode = diskChildNode;
|
||||
NamedNodeMap sourceNodeAttributes = sourceNode.getAttributes();
|
||||
Node sourceNodeAttribute = sourceNodeAttributes.getNamedItem("file");
|
||||
if ( sourceNodeAttribute.getNodeValue().contains(vmName)) {
|
||||
if (sourceNodeAttribute != null && sourceNodeAttribute.getNodeValue().contains(vmName)) {
|
||||
diskNode.removeChild(diskChildNode);
|
||||
Element newChildSourceNode = doc.createElement("source");
|
||||
newChildSourceNode.setAttribute("file", isoPath);
|
||||
|
|
|
|||
|
|
@ -43,17 +43,25 @@ public final class LibvirtReadyCommandWrapper extends CommandWrapper<ReadyComman
|
|||
public Answer execute(final ReadyCommand command, final LibvirtComputingResource libvirtComputingResource) {
|
||||
Map<String, String> hostDetails = new HashMap<String, String>();
|
||||
|
||||
if (hostSupportsUefi(libvirtComputingResource.isUbuntuHost()) && libvirtComputingResource.isUefiPropertiesFileLoaded()) {
|
||||
if (hostSupportsUefi(libvirtComputingResource.isUbuntuOrDebianHost()) && libvirtComputingResource.isUefiPropertiesFileLoaded()) {
|
||||
hostDetails.put(Host.HOST_UEFI_ENABLE, Boolean.TRUE.toString());
|
||||
}
|
||||
|
||||
if (libvirtComputingResource.hostSupportsInstanceConversion()) {
|
||||
hostDetails.put(Host.HOST_VIRTV2V_VERSION, libvirtComputingResource.getHostVirtV2vVersion());
|
||||
}
|
||||
|
||||
if (libvirtComputingResource.hostSupportsOvfExport()) {
|
||||
hostDetails.put(Host.HOST_OVFTOOL_VERSION, libvirtComputingResource.getHostOvfToolVersion());
|
||||
}
|
||||
|
||||
return new ReadyAnswer(command, hostDetails);
|
||||
}
|
||||
|
||||
private boolean hostSupportsUefi(boolean isUbuntuHost) {
|
||||
private boolean hostSupportsUefi(boolean isUbuntuOrDebianHost) {
|
||||
int timeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.AGENT_SCRIPT_TIMEOUT) * 1000; // Get property value & convert to milliseconds
|
||||
int result;
|
||||
if (isUbuntuHost) {
|
||||
if (isUbuntuOrDebianHost) {
|
||||
logger.debug("Running command : [dpkg -l ovmf] with timeout : " + timeout + " ms");
|
||||
result = Script.executeCommandForExitValue(timeout, Script.getExecutableAbsolutePath("dpkg"), "-l", "ovmf");
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -25,7 +25,6 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.hypervisor.kvm.storage.ScaleIOStorageAdaptor;
|
||||
import org.apache.cloudstack.utils.cryptsetup.KeyFile;
|
||||
|
|
@ -33,7 +32,6 @@ import org.apache.cloudstack.utils.qemu.QemuImageOptions;
|
|||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.apache.cloudstack.utils.qemu.QemuObject;
|
||||
import org.libvirt.Connect;
|
||||
import org.libvirt.Domain;
|
||||
|
|
@ -100,7 +98,7 @@ public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper<Resi
|
|||
newSize = ScaleIOStorageAdaptor.getUsableBytesFromRawBytes(newSize);
|
||||
} else if (spool.getType().equals(StoragePoolType.PowerFlex)) {
|
||||
// PowerFlex RAW/LUKS is already resized, we just notify the domain based on new size (considering LUKS overhead)
|
||||
newSize = getVirtualSizeFromFile(path);
|
||||
newSize = KVMPhysicalDisk.getVirtualSizeFromFile(path);
|
||||
}
|
||||
|
||||
if (pool.getType() != StoragePoolType.RBD && pool.getType() != StoragePoolType.Linstor && pool.getType() != StoragePoolType.PowerFlex) {
|
||||
|
|
@ -214,21 +212,6 @@ public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper<Resi
|
|||
}
|
||||
}
|
||||
|
||||
private long getVirtualSizeFromFile(String path) {
|
||||
try {
|
||||
QemuImg qemu = new QemuImg(0);
|
||||
QemuImgFile qemuFile = new QemuImgFile(path);
|
||||
Map<String, String> info = qemu.info(qemuFile);
|
||||
if (info.containsKey(QemuImg.VIRTUAL_SIZE)) {
|
||||
return Long.parseLong(info.get(QemuImg.VIRTUAL_SIZE));
|
||||
} else {
|
||||
throw new CloudRuntimeException("Unable to determine virtual size of volume at path " + path);
|
||||
}
|
||||
} catch (QemuImgException | LibvirtException ex) {
|
||||
throw new CloudRuntimeException("Error when inspecting volume at path " + path, ex);
|
||||
}
|
||||
}
|
||||
|
||||
private Answer handleMultipathSCSIResize(ResizeVolumeCommand command, KVMStoragePool pool) {
|
||||
((MultipathSCSIPool)pool).resize(command.getPath(), command.getInstanceName(), command.getNewSize());
|
||||
return new ResizeVolumeAnswer(command, true, "");
|
||||
|
|
|
|||
|
|
@ -62,16 +62,25 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
|
|||
String restoreVolumeUuid = command.getRestoreVolumeUUID();
|
||||
|
||||
String newVolumeId = null;
|
||||
if (Objects.isNull(vmExists)) {
|
||||
String volumePath = volumePaths.get(0);
|
||||
int lastIndex = volumePath.lastIndexOf("/");
|
||||
newVolumeId = volumePath.substring(lastIndex + 1);
|
||||
restoreVolume(backupPath, backupRepoType, backupRepoAddress, volumePath, diskType, restoreVolumeUuid,
|
||||
new Pair<>(vmName, command.getVmState()), mountOptions);
|
||||
} else if (Boolean.TRUE.equals(vmExists)) {
|
||||
restoreVolumesOfExistingVM(volumePaths, backupPath, backupRepoType, backupRepoAddress, mountOptions);
|
||||
} else {
|
||||
restoreVolumesOfDestroyedVMs(volumePaths, vmName, backupPath, backupRepoType, backupRepoAddress, mountOptions);
|
||||
try {
|
||||
if (Objects.isNull(vmExists)) {
|
||||
String volumePath = volumePaths.get(0);
|
||||
int lastIndex = volumePath.lastIndexOf("/");
|
||||
newVolumeId = volumePath.substring(lastIndex + 1);
|
||||
restoreVolume(backupPath, backupRepoType, backupRepoAddress, volumePath, diskType, restoreVolumeUuid,
|
||||
new Pair<>(vmName, command.getVmState()), mountOptions);
|
||||
} else if (Boolean.TRUE.equals(vmExists)) {
|
||||
restoreVolumesOfExistingVM(volumePaths, backupPath, backupRepoType, backupRepoAddress, mountOptions);
|
||||
} else {
|
||||
restoreVolumesOfDestroyedVMs(volumePaths, vmName, backupPath, backupRepoType, backupRepoAddress, mountOptions);
|
||||
}
|
||||
} catch (CloudRuntimeException e) {
|
||||
String errorMessage = "Failed to restore backup for VM: " + vmName + ".";
|
||||
if (e.getMessage() != null && !e.getMessage().isEmpty()) {
|
||||
errorMessage += " Details: " + e.getMessage();
|
||||
}
|
||||
logger.error(errorMessage);
|
||||
return new BackupAnswer(command, false, errorMessage);
|
||||
}
|
||||
|
||||
return new BackupAnswer(command, true, newVolumeId);
|
||||
|
|
@ -86,10 +95,8 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
|
|||
String volumePath = volumePaths.get(idx);
|
||||
Pair<String, String> bkpPathAndVolUuid = getBackupPath(mountDirectory, volumePath, backupPath, diskType, null);
|
||||
diskType = "datadisk";
|
||||
try {
|
||||
replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first());
|
||||
} catch (IOException e) {
|
||||
throw new CloudRuntimeException(String.format("Unable to revert backup for volume [%s] due to [%s].", bkpPathAndVolUuid.second(), e.getMessage()), e);
|
||||
if (!replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first())) {
|
||||
throw new CloudRuntimeException(String.format("Unable to restore backup for volume [%s].", bkpPathAndVolUuid.second()));
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
|
|
@ -108,10 +115,8 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
|
|||
String volumePath = volumePaths.get(i);
|
||||
Pair<String, String> bkpPathAndVolUuid = getBackupPath(mountDirectory, volumePath, backupPath, diskType, null);
|
||||
diskType = "datadisk";
|
||||
try {
|
||||
replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first());
|
||||
} catch (IOException e) {
|
||||
throw new CloudRuntimeException(String.format("Unable to revert backup for volume [%s] due to [%s].", bkpPathAndVolUuid.second(), e.getMessage()), e);
|
||||
if (!replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first())) {
|
||||
throw new CloudRuntimeException(String.format("Unable to restore backup for volume [%s].", bkpPathAndVolUuid.second()));
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
|
|
@ -126,15 +131,13 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
|
|||
Pair<String, String> bkpPathAndVolUuid;
|
||||
try {
|
||||
bkpPathAndVolUuid = getBackupPath(mountDirectory, volumePath, backupPath, diskType, volumeUUID);
|
||||
try {
|
||||
replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first());
|
||||
if (VirtualMachine.State.Running.equals(vmNameAndState.second())) {
|
||||
if (!attachVolumeToVm(vmNameAndState.first(), volumePath)) {
|
||||
throw new CloudRuntimeException(String.format("Failed to attach volume to VM: %s", vmNameAndState.first()));
|
||||
}
|
||||
if (!replaceVolumeWithBackup(volumePath, bkpPathAndVolUuid.first())) {
|
||||
throw new CloudRuntimeException(String.format("Unable to restore backup for volume [%s].", bkpPathAndVolUuid.second()));
|
||||
}
|
||||
if (VirtualMachine.State.Running.equals(vmNameAndState.second())) {
|
||||
if (!attachVolumeToVm(vmNameAndState.first(), volumePath)) {
|
||||
throw new CloudRuntimeException(String.format("Failed to attach volume to VM: %s", vmNameAndState.first()));
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new CloudRuntimeException(String.format("Unable to revert backup for volume [%s] due to [%s].", bkpPathAndVolUuid.second(), e.getMessage()), e);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new CloudRuntimeException("Failed to restore volume", e);
|
||||
|
|
@ -194,8 +197,9 @@ public class LibvirtRestoreBackupCommandWrapper extends CommandWrapper<RestoreBa
|
|||
return new Pair<>(bkpPath, volUuid);
|
||||
}
|
||||
|
||||
private void replaceVolumeWithBackup(String volumePath, String backupPath) throws IOException {
|
||||
Script.runSimpleBashScript(String.format(RSYNC_COMMAND, backupPath, volumePath));
|
||||
private boolean replaceVolumeWithBackup(String volumePath, String backupPath) {
|
||||
int exitValue = Script.runSimpleBashScriptForExitValue(String.format(RSYNC_COMMAND, backupPath, volumePath));
|
||||
return exitValue == 0;
|
||||
}
|
||||
|
||||
private boolean attachVolumeToVm(String vmName, String volumePath) {
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ public class LibvirtSetupDirectDownloadCertificateCommandWrapper extends Command
|
|||
private void importCertificate(String tempCerFilePath, String keyStoreFile, String certificateName, String privatePassword) {
|
||||
logger.debug("Importing certificate from temporary file to keystore");
|
||||
String keyToolPath = Script.getExecutableAbsolutePath("keytool");
|
||||
int result = Script.executeCommandForExitValue(keyToolPath, "-importcert", "file", tempCerFilePath,
|
||||
int result = Script.executeCommandForExitValue(keyToolPath, "-importcert", "-file", tempCerFilePath,
|
||||
"-keystore", keyStoreFile, "-alias", sanitizeBashCommandArgument(certificateName), "-storepass",
|
||||
privatePassword, "-noprompt");
|
||||
if (result != 0) {
|
||||
|
|
|
|||
|
|
@ -16,13 +16,21 @@
|
|||
// under the License.
|
||||
package com.cloud.hypervisor.kvm.storage;
|
||||
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.storage.formatinspector.Qcow2Inspector;
|
||||
import org.apache.cloudstack.utils.imagestore.ImageStoreUtil;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.apache.cloudstack.utils.qemu.QemuObject;
|
||||
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class KVMPhysicalDisk {
|
||||
private String path;
|
||||
|
|
@ -71,6 +79,31 @@ public class KVMPhysicalDisk {
|
|||
return hostIp;
|
||||
}
|
||||
|
||||
public static long getVirtualSizeFromFile(String path) {
|
||||
try {
|
||||
QemuImg qemu = new QemuImg(0);
|
||||
QemuImgFile qemuFile = new QemuImgFile(path);
|
||||
Map<String, String> info = qemu.info(qemuFile);
|
||||
if (info.containsKey(QemuImg.VIRTUAL_SIZE)) {
|
||||
return Long.parseLong(info.get(QemuImg.VIRTUAL_SIZE));
|
||||
} else {
|
||||
throw new CloudRuntimeException("Unable to determine virtual size of volume at path " + path);
|
||||
}
|
||||
} catch (QemuImgException | LibvirtException ex) {
|
||||
throw new CloudRuntimeException("Error when inspecting volume at path " + path, ex);
|
||||
}
|
||||
}
|
||||
|
||||
public static void checkQcow2File(String path) {
|
||||
if (ImageStoreUtil.isCorrectExtension(path, "qcow2")) {
|
||||
try {
|
||||
Qcow2Inspector.validateQcow2File(path);
|
||||
} catch (RuntimeException e) {
|
||||
throw new CloudRuntimeException("The volume file at path " + path + " is not a valid QCOW2. Error: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private PhysicalDiskFormat format;
|
||||
private long size;
|
||||
private long virtualSize;
|
||||
|
|
|
|||
|
|
@ -407,17 +407,19 @@ public class KVMStoragePoolManager {
|
|||
String uuid = null;
|
||||
String sourceHost = "";
|
||||
StoragePoolType protocol = null;
|
||||
final String scheme = (storageUri.getScheme() != null) ? storageUri.getScheme().toLowerCase() : "";
|
||||
List<String> acceptedSchemes = List.of("nfs", "networkfilesystem", "filesystem");
|
||||
if (acceptedSchemes.contains(scheme)) {
|
||||
sourcePath = storageUri.getPath();
|
||||
sourcePath = sourcePath.replace("//", "/");
|
||||
sourceHost = storageUri.getHost();
|
||||
uuid = UUID.nameUUIDFromBytes(new String(sourceHost + sourcePath).getBytes()).toString();
|
||||
protocol = scheme.equals("filesystem") ? StoragePoolType.Filesystem: StoragePoolType.NetworkFilesystem;
|
||||
if (storageUri.getScheme() == null || !acceptedSchemes.contains(storageUri.getScheme().toLowerCase())) {
|
||||
throw new CloudRuntimeException("Empty or unsupported storage pool uri scheme");
|
||||
}
|
||||
|
||||
// secondary storage registers itself through here
|
||||
final String scheme = storageUri.getScheme().toLowerCase();
|
||||
sourcePath = storageUri.getPath();
|
||||
sourcePath = sourcePath.replace("//", "/");
|
||||
sourceHost = storageUri.getHost();
|
||||
uuid = UUID.nameUUIDFromBytes(new String(sourceHost + sourcePath).getBytes()).toString();
|
||||
protocol = scheme.equals("filesystem") ? StoragePoolType.Filesystem: StoragePoolType.NetworkFilesystem;
|
||||
|
||||
// storage registers itself through here
|
||||
return createStoragePool(uuid, sourceHost, 0, sourcePath, "", protocol, null, false);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -84,9 +84,8 @@ import org.apache.commons.lang3.BooleanUtils;
|
|||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.lang3.builder.ToStringBuilder;
|
||||
import org.apache.commons.lang3.builder.ToStringStyle;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.libvirt.Connect;
|
||||
import org.libvirt.Domain;
|
||||
import org.libvirt.DomainInfo;
|
||||
|
|
@ -2488,7 +2487,9 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
if (template != null) {
|
||||
templatePath = template.getPath();
|
||||
}
|
||||
if (StringUtils.isEmpty(templatePath)) {
|
||||
if (ImageFormat.ISO.equals(cmd.getFormat())) {
|
||||
logger.debug("Skipping template validations as image format is {}", cmd.getFormat());
|
||||
} else if (StringUtils.isEmpty(templatePath)) {
|
||||
logger.warn("Skipped validation whether downloaded file is QCOW2 for template {}, due to downloaded template path is empty", template.getName());
|
||||
} else if (!new File(templatePath).exists()) {
|
||||
logger.warn("Skipped validation whether downloaded file is QCOW2 for template {}, due to downloaded template path is not valid: {}", template.getName(), templatePath);
|
||||
|
|
|
|||
|
|
@ -733,10 +733,9 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
|||
|
||||
@Override
|
||||
public KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo, StoragePoolType type, Map<String, String> details, boolean isPrimaryStorage) {
|
||||
logger.info("Attempting to create storage pool " + name + " (" + type.toString() + ") in libvirt");
|
||||
|
||||
StoragePool sp = null;
|
||||
Connect conn = null;
|
||||
logger.info("Attempting to create storage pool {} ({}) in libvirt", name, type);
|
||||
StoragePool sp;
|
||||
Connect conn;
|
||||
try {
|
||||
conn = LibvirtConnection.getConnection();
|
||||
} catch (LibvirtException e) {
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ import org.apache.cloudstack.utils.qemu.QemuImg;
|
|||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.apache.cloudstack.utils.qemu.QemuObject;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.io.filefilter.WildcardFileFilter;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
|
@ -581,14 +582,23 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor {
|
|||
}
|
||||
|
||||
if (!ScaleIOUtil.isSDCServiceActive()) {
|
||||
logger.debug("SDC service is not active on host, starting it");
|
||||
if (!ScaleIOUtil.startSDCService()) {
|
||||
return new Ternary<>(false, null, "Couldn't start SDC service on host");
|
||||
}
|
||||
} else if (!ScaleIOUtil.restartSDCService()) {
|
||||
return new Ternary<>(false, null, "Couldn't restart SDC service on host");
|
||||
} else {
|
||||
logger.debug("SDC service is active on host, re-starting it");
|
||||
if (!ScaleIOUtil.restartSDCService()) {
|
||||
return new Ternary<>(false, null, "Couldn't restart SDC service on host");
|
||||
}
|
||||
}
|
||||
|
||||
return new Ternary<>( true, getSDCDetails(details), "Prepared client successfully");
|
||||
Map<String, String> sdcDetails = getSDCDetails(details);
|
||||
if (MapUtils.isEmpty(sdcDetails)) {
|
||||
return new Ternary<>(false, null, "Couldn't get the SDC details on the host");
|
||||
}
|
||||
|
||||
return new Ternary<>( true, sdcDetails, "Prepared client successfully");
|
||||
}
|
||||
|
||||
public Pair<Boolean, String> unprepareStorageClient(Storage.StoragePoolType type, String uuid) {
|
||||
|
|
@ -611,20 +621,40 @@ public class ScaleIOStorageAdaptor implements StorageAdaptor {
|
|||
|
||||
private Map<String, String> getSDCDetails(Map<String, String> details) {
|
||||
Map<String, String> sdcDetails = new HashMap<String, String>();
|
||||
if (details == null || !details.containsKey(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)) {
|
||||
if (MapUtils.isEmpty(details) || !details.containsKey(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID)) {
|
||||
return sdcDetails;
|
||||
}
|
||||
|
||||
String storageSystemId = details.get(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID);
|
||||
String sdcId = ScaleIOUtil.getSdcId(storageSystemId);
|
||||
if (sdcId != null) {
|
||||
sdcDetails.put(ScaleIOGatewayClient.SDC_ID, sdcId);
|
||||
} else {
|
||||
String sdcGuId = ScaleIOUtil.getSdcGuid();
|
||||
if (sdcGuId != null) {
|
||||
sdcDetails.put(ScaleIOGatewayClient.SDC_GUID, sdcGuId);
|
||||
}
|
||||
if (StringUtils.isEmpty(storageSystemId)) {
|
||||
return sdcDetails;
|
||||
}
|
||||
|
||||
int numberOfTries = 5;
|
||||
int timeBetweenTries = 1000; // Try more frequently (every sec) and return early when SDC Id or Guid found
|
||||
int attempt = 1;
|
||||
do {
|
||||
logger.debug("Get SDC details, attempt #{}", attempt);
|
||||
String sdcId = ScaleIOUtil.getSdcId(storageSystemId);
|
||||
if (sdcId != null) {
|
||||
sdcDetails.put(ScaleIOGatewayClient.SDC_ID, sdcId);
|
||||
return sdcDetails;
|
||||
} else {
|
||||
String sdcGuId = ScaleIOUtil.getSdcGuid();
|
||||
if (sdcGuId != null) {
|
||||
sdcDetails.put(ScaleIOGatewayClient.SDC_GUID, sdcGuId);
|
||||
return sdcDetails;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
Thread.sleep(timeBetweenTries);
|
||||
} catch (Exception ignore) {
|
||||
}
|
||||
numberOfTries--;
|
||||
attempt++;
|
||||
} while (numberOfTries > 0);
|
||||
|
||||
return sdcDetails;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ import java.util.List;
|
|||
import java.util.UUID;
|
||||
|
||||
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
|
||||
import org.apache.cloudstack.vm.UnmanagedInstanceTO;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
|
@ -40,13 +39,10 @@ import com.cloud.agent.api.to.NfsTO;
|
|||
import com.cloud.agent.api.to.RemoteInstanceTO;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtDomainXMLParser;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
|
||||
import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.script.Script;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
|
|
@ -118,72 +114,6 @@ public class LibvirtConvertInstanceCommandWrapperTest {
|
|||
Assert.assertEquals(relativePath, diskDef.getDiskPath());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMoveTemporaryDisksToDestination() {
|
||||
KVMPhysicalDisk sourceDisk = Mockito.mock(KVMPhysicalDisk.class);
|
||||
List<KVMPhysicalDisk> disks = List.of(sourceDisk);
|
||||
String destinationPoolUuid = UUID.randomUUID().toString();
|
||||
List<String> destinationPools = List.of(destinationPoolUuid);
|
||||
|
||||
KVMPhysicalDisk destDisk = Mockito.mock(KVMPhysicalDisk.class);
|
||||
Mockito.when(destDisk.getPath()).thenReturn("xyz");
|
||||
Mockito.when(storagePoolManager.getStoragePool(Storage.StoragePoolType.NetworkFilesystem, destinationPoolUuid))
|
||||
.thenReturn(destinationPool);
|
||||
Mockito.when(destinationPool.getType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
|
||||
Mockito.when(storagePoolManager.copyPhysicalDisk(Mockito.eq(sourceDisk), Mockito.anyString(), Mockito.eq(destinationPool), Mockito.anyInt()))
|
||||
.thenReturn(destDisk);
|
||||
|
||||
List<KVMPhysicalDisk> movedDisks = convertInstanceCommandWrapper.moveTemporaryDisksToDestination(disks, destinationPools, storagePoolManager);
|
||||
Assert.assertEquals(1, movedDisks.size());
|
||||
Assert.assertEquals("xyz", movedDisks.get(0).getPath());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetUnmanagedInstanceDisks() {
|
||||
try (MockedStatic<Script> ignored = Mockito.mockStatic(Script.class)) {
|
||||
String relativePath = UUID.randomUUID().toString();
|
||||
LibvirtVMDef.DiskDef diskDef = new LibvirtVMDef.DiskDef();
|
||||
LibvirtVMDef.DiskDef.DiskBus bus = LibvirtVMDef.DiskDef.DiskBus.IDE;
|
||||
LibvirtVMDef.DiskDef.DiskFmtType type = LibvirtVMDef.DiskDef.DiskFmtType.QCOW2;
|
||||
diskDef.defFileBasedDisk(relativePath, relativePath, bus, type);
|
||||
|
||||
KVMPhysicalDisk sourceDisk = Mockito.mock(KVMPhysicalDisk.class);
|
||||
Mockito.when(sourceDisk.getName()).thenReturn(UUID.randomUUID().toString());
|
||||
Mockito.when(sourceDisk.getPool()).thenReturn(destinationPool);
|
||||
Mockito.when(destinationPool.getType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
|
||||
List<KVMPhysicalDisk> disks = List.of(sourceDisk);
|
||||
|
||||
LibvirtDomainXMLParser parser = Mockito.mock(LibvirtDomainXMLParser.class);
|
||||
Mockito.when(parser.getDisks()).thenReturn(List.of(diskDef));
|
||||
Mockito.doReturn(new Pair<String, String>(null, null)).when(convertInstanceCommandWrapper).getNfsStoragePoolHostAndPath(destinationPool);
|
||||
|
||||
Mockito.when(Script.executePipedCommands(Mockito.anyList(), Mockito.anyLong()))
|
||||
.thenReturn(new Pair<>(0, null));
|
||||
|
||||
List<UnmanagedInstanceTO.Disk> unmanagedInstanceDisks = convertInstanceCommandWrapper.getUnmanagedInstanceDisks(disks, parser);
|
||||
Assert.assertEquals(1, unmanagedInstanceDisks.size());
|
||||
UnmanagedInstanceTO.Disk disk = unmanagedInstanceDisks.get(0);
|
||||
Assert.assertEquals(LibvirtVMDef.DiskDef.DiskBus.IDE.toString(), disk.getController());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetNfsStoragePoolHostAndPath() {
|
||||
try (MockedStatic<Script> ignored = Mockito.mockStatic(Script.class)) {
|
||||
String localMountPoint = "/mnt/xyz";
|
||||
String host = "192.168.1.2";
|
||||
String path = "/secondary";
|
||||
String mountOutput = String.format("%s:%s on %s type nfs (...)", host, path, localMountPoint);
|
||||
Mockito.when(temporaryPool.getLocalPath()).thenReturn(localMountPoint);
|
||||
Mockito.when(Script.executePipedCommands(Mockito.anyList(), Mockito.anyLong()))
|
||||
.thenReturn(new Pair<>(0, mountOutput));
|
||||
|
||||
Pair<String, String> pair = convertInstanceCommandWrapper.getNfsStoragePoolHostAndPath(temporaryPool);
|
||||
Assert.assertEquals(host, pair.first());
|
||||
Assert.assertEquals(path, pair.second());
|
||||
}
|
||||
}
|
||||
|
||||
private RemoteInstanceTO getRemoteInstanceTO(Hypervisor.HypervisorType hypervisorType) {
|
||||
RemoteInstanceTO remoteInstanceTO = Mockito.mock(RemoteInstanceTO.class);
|
||||
Mockito.when(remoteInstanceTO.getHypervisorType()).thenReturn(hypervisorType);
|
||||
|
|
|
|||
|
|
@ -116,9 +116,9 @@ public class ScaleIOStorageAdaptorTest {
|
|||
|
||||
Ternary<Boolean, Map<String, String>, String> result = scaleIOStorageAdaptor.prepareStorageClient(Storage.StoragePoolType.PowerFlex, poolUuid, new HashMap<>());
|
||||
|
||||
Assert.assertTrue(result.first());
|
||||
Assert.assertNotNull(result.second());
|
||||
Assert.assertTrue(result.second().isEmpty());
|
||||
Assert.assertFalse(result.first());
|
||||
Assert.assertNull(result.second());
|
||||
Assert.assertEquals("Couldn't get the SDC details on the host", result.third());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
|||
|
|
@ -49,9 +49,12 @@ public final class CitrixResizeVolumeCommandWrapper extends CommandWrapper<Resiz
|
|||
|
||||
try {
|
||||
|
||||
if (command.getCurrentSize() >= newSize) {
|
||||
logger.info("No need to resize volume: " + volId +", current size " + toHumanReadableSize(command.getCurrentSize()) + " is same as new size " + toHumanReadableSize(newSize));
|
||||
if (command.getCurrentSize() == newSize) {
|
||||
logger.info("No need to resize volume [{}], current size [{}] is same as new size [{}].", volId, toHumanReadableSize(command.getCurrentSize()), toHumanReadableSize(newSize));
|
||||
return new ResizeVolumeAnswer(command, true, "success", newSize);
|
||||
} else if (command.getCurrentSize() > newSize) {
|
||||
logger.error("XenServer does not support volume shrink. Volume [{}] current size [{}] is smaller than new size [{}]", volId, toHumanReadableSize(command.getCurrentSize()), toHumanReadableSize(newSize));
|
||||
return new ResizeVolumeAnswer(command, false, "operation not supported");
|
||||
}
|
||||
if (command.isManaged()) {
|
||||
resizeSr(conn, command);
|
||||
|
|
|
|||
|
|
@ -433,8 +433,14 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||
return null;
|
||||
}
|
||||
|
||||
public VMTemplateVO getKubernetesServiceTemplate(DataCenter dataCenter, Hypervisor.HypervisorType hypervisorType) {
|
||||
VMTemplateVO template = templateDao.findSystemVMReadyTemplate(dataCenter.getId(), hypervisorType);
|
||||
public VMTemplateVO getKubernetesServiceTemplate(DataCenter dataCenter, Hypervisor.HypervisorType hypervisorType,
|
||||
KubernetesSupportedVersion clusterKubernetesVersion) {
|
||||
String systemVMPreferredArchitecture = ResourceManager.SystemVmPreferredArchitecture.valueIn(dataCenter.getId());
|
||||
VMTemplateVO cksIso = clusterKubernetesVersion != null ?
|
||||
templateDao.findById(clusterKubernetesVersion.getIsoId()) :
|
||||
null;
|
||||
String preferredArchitecture = getCksClusterPreferredArch(systemVMPreferredArchitecture, cksIso);
|
||||
VMTemplateVO template = templateDao.findSystemVMReadyTemplate(dataCenter.getId(), hypervisorType, preferredArchitecture);
|
||||
if (DataCenter.Type.Edge.equals(dataCenter.getType()) && template != null && !template.isDirectDownload()) {
|
||||
logger.debug(String.format("Template %s can not be used for edge zone %s", template, dataCenter));
|
||||
template = templateDao.findRoutingTemplate(hypervisorType, networkHelper.getHypervisorRouterTemplateConfigMap().get(hypervisorType).valueIn(dataCenter.getId()));
|
||||
|
|
@ -445,6 +451,14 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||
return template;
|
||||
}
|
||||
|
||||
protected String getCksClusterPreferredArch(String systemVMPreferredArchitecture, VMTemplateVO cksIso) {
|
||||
if (cksIso == null) {
|
||||
return systemVMPreferredArchitecture;
|
||||
}
|
||||
String cksIsoArchName = cksIso.getArch().name();
|
||||
return cksIsoArchName.equals(systemVMPreferredArchitecture) ? systemVMPreferredArchitecture : cksIsoArchName;
|
||||
}
|
||||
|
||||
protected void validateIsolatedNetworkIpRules(long ipId, FirewallRule.Purpose purpose, Network network, int clusterTotalNodeCount) {
|
||||
List<FirewallRuleVO> rules = firewallRulesDao.listByIpAndPurposeAndNotRevoked(ipId, purpose);
|
||||
for (FirewallRuleVO rule : rules) {
|
||||
|
|
@ -1302,7 +1316,10 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||
}
|
||||
|
||||
final Network defaultNetwork = getKubernetesClusterNetworkIfMissing(cmd.getName(), zone, owner, (int)controlNodeCount, (int)clusterSize, cmd.getExternalLoadBalancerIpAddress(), cmd.getNetworkId());
|
||||
final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(zone, deployDestination.getCluster().getHypervisorType());
|
||||
final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(zone, deployDestination.getCluster().getHypervisorType(), clusterKubernetesVersion);
|
||||
|
||||
compareKubernetesIsoArchToSelectedTemplateArch(clusterKubernetesVersion, finalTemplate);
|
||||
|
||||
final long cores = serviceOffering.getCpu() * (controlNodeCount + clusterSize);
|
||||
final long memory = serviceOffering.getRamSize() * (controlNodeCount + clusterSize);
|
||||
|
||||
|
|
@ -1331,6 +1348,21 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
|
|||
return cluster;
|
||||
}
|
||||
|
||||
private void compareKubernetesIsoArchToSelectedTemplateArch(KubernetesSupportedVersion clusterKubernetesVersion, VMTemplateVO finalTemplate) {
|
||||
VMTemplateVO cksIso = templateDao.findById(clusterKubernetesVersion.getIsoId());
|
||||
if (cksIso == null) {
|
||||
String err = String.format("Cannot find Kubernetes ISO associated to the Kubernetes version %s (id=%s)",
|
||||
clusterKubernetesVersion.getName(), clusterKubernetesVersion.getUuid());
|
||||
throw new CloudRuntimeException(err);
|
||||
}
|
||||
if (!cksIso.getArch().equals(finalTemplate.getArch())) {
|
||||
String err = String.format("The selected Kubernetes ISO %s arch (%s) doesn't match the template %s arch (%s) " +
|
||||
"to deploy the Kubernetes cluster",
|
||||
clusterKubernetesVersion.getName(), cksIso.getArch(), finalTemplate.getName(), finalTemplate.getArch());
|
||||
throw new CloudRuntimeException(err);
|
||||
}
|
||||
}
|
||||
|
||||
private SecurityGroup getOrCreateSecurityGroupForAccount(Account owner) {
|
||||
String securityGroupName = String.format("%s-%s", KubernetesClusterActionWorker.CKS_CLUSTER_SECURITY_GROUP_NAME, owner.getUuid());
|
||||
String securityGroupDesc = String.format("%s and account %s", KubernetesClusterActionWorker.CKS_SECURITY_GROUP_DESCRIPTION, owner.getName());
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ import java.util.stream.Collectors;
|
|||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.kubernetes.version.KubernetesSupportedVersionVO;
|
||||
import org.apache.logging.log4j.Level;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
|
@ -194,7 +195,8 @@ public class KubernetesClusterActionWorker {
|
|||
DataCenterVO dataCenterVO = dataCenterDao.findById(zoneId);
|
||||
VMTemplateVO template = templateDao.findById(templateId);
|
||||
Hypervisor.HypervisorType type = template.getHypervisorType();
|
||||
this.clusterTemplate = manager.getKubernetesServiceTemplate(dataCenterVO, type);
|
||||
KubernetesSupportedVersionVO kubernetesSupportedVersion = kubernetesSupportedVersionDao.findById(this.kubernetesCluster.getKubernetesVersionId());
|
||||
this.clusterTemplate = manager.getKubernetesServiceTemplate(dataCenterVO, type, kubernetesSupportedVersion);
|
||||
this.sshKeyFile = getManagementServerSshPublicKeyFile();
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -159,6 +159,8 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
|
|||
|
||||
protected String kubernetesClusterNodeNamePrefix;
|
||||
|
||||
private static final int MAX_CLUSTER_PREFIX_LENGTH = 43;
|
||||
|
||||
protected KubernetesClusterResourceModifierActionWorker(final KubernetesCluster kubernetesCluster, final KubernetesClusterManagerImpl clusterManager) {
|
||||
super(kubernetesCluster, clusterManager);
|
||||
}
|
||||
|
|
@ -248,7 +250,7 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
|
|||
for (Map.Entry<String, Pair<HostVO, Integer>> hostEntry : hosts_with_resevered_capacity.entrySet()) {
|
||||
Pair<HostVO, Integer> hp = hostEntry.getValue();
|
||||
HostVO h = hp.first();
|
||||
if (!h.getHypervisorType().equals(clusterTemplate.getHypervisorType())) {
|
||||
if (!h.getHypervisorType().equals(clusterTemplate.getHypervisorType()) || !h.getArch().equals(clusterTemplate.getArch())) {
|
||||
continue;
|
||||
}
|
||||
hostDao.loadHostTags(h);
|
||||
|
|
@ -775,19 +777,35 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a valid name prefix for Kubernetes cluster nodes.
|
||||
*
|
||||
* <p>The prefix must comply with Kubernetes naming constraints:
|
||||
* <ul>
|
||||
* <li>Maximum 63 characters total</li>
|
||||
* <li>Only lowercase alphanumeric characters and hyphens</li>
|
||||
* <li>Must start with a letter</li>
|
||||
* <li>Must end with an alphanumeric character</li>
|
||||
* </ul>
|
||||
*
|
||||
* <p>The generated prefix is limited to 43 characters to accommodate the full node naming pattern:
|
||||
* <pre>{'prefix'}-{'control' | 'node'}-{'11-digit-hash'}</pre>
|
||||
*
|
||||
* @return A valid node name prefix, truncated if necessary
|
||||
* @see <a href="https://kubernetes.io/docs/concepts/overview/working-with-objects/names/">Kubernetes "Object Names and IDs" documentation</a>
|
||||
*/
|
||||
protected String getKubernetesClusterNodeNamePrefix() {
|
||||
String prefix = kubernetesCluster.getName();
|
||||
if (!NetUtils.verifyDomainNameLabel(prefix, true)) {
|
||||
prefix = prefix.replaceAll("[^a-zA-Z0-9-]", "");
|
||||
if (prefix.length() == 0) {
|
||||
prefix = kubernetesCluster.getUuid();
|
||||
}
|
||||
prefix = "k8s-" + prefix;
|
||||
String prefix = kubernetesCluster.getName().toLowerCase();
|
||||
|
||||
if (NetUtils.verifyDomainNameLabel(prefix, true)) {
|
||||
return StringUtils.truncate(prefix, MAX_CLUSTER_PREFIX_LENGTH);
|
||||
}
|
||||
if (prefix.length() > 40) {
|
||||
prefix = prefix.substring(0, 40);
|
||||
|
||||
prefix = prefix.replaceAll("[^a-z0-9-]", "");
|
||||
if (prefix.isEmpty()) {
|
||||
prefix = kubernetesCluster.getUuid();
|
||||
}
|
||||
return prefix;
|
||||
return StringUtils.truncate("k8s-" + prefix, MAX_CLUSTER_PREFIX_LENGTH);
|
||||
}
|
||||
|
||||
protected KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, final Long memory, final Long size,
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ package com.cloud.kubernetes.cluster;
|
|||
|
||||
import com.cloud.api.query.dao.TemplateJoinDao;
|
||||
import com.cloud.api.query.vo.TemplateJoinVO;
|
||||
import com.cloud.cpu.CPU;
|
||||
import com.cloud.dc.DataCenter;
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import com.cloud.exception.PermissionDeniedException;
|
||||
|
|
@ -292,4 +293,22 @@ public class KubernetesClusterManagerImplTest {
|
|||
Mockito.when(kubernetesClusterDao.findById(Mockito.anyLong())).thenReturn(cluster);
|
||||
Assert.assertTrue(kubernetesClusterManager.removeVmsFromCluster(cmd).size() > 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetCksClusterPreferredArchDifferentArchsPreferCKSIsoArch() {
|
||||
String systemVMArch = "x86_64";
|
||||
VMTemplateVO cksIso = Mockito.mock(VMTemplateVO.class);
|
||||
Mockito.when(cksIso.getArch()).thenReturn(CPU.CPUArch.arm64);
|
||||
String cksClusterPreferredArch = kubernetesClusterManager.getCksClusterPreferredArch(systemVMArch, cksIso);
|
||||
Assert.assertEquals(CPU.CPUArch.arm64.name(), cksClusterPreferredArch);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetCksClusterPreferredArchSameArch() {
|
||||
String systemVMArch = "x86_64";
|
||||
VMTemplateVO cksIso = Mockito.mock(VMTemplateVO.class);
|
||||
Mockito.when(cksIso.getArch()).thenReturn(CPU.CPUArch.amd64);
|
||||
String cksClusterPreferredArch = kubernetesClusterManager.getCksClusterPreferredArch(systemVMArch, cksIso);
|
||||
Assert.assertEquals(CPU.CPUArch.amd64.name(), cksClusterPreferredArch);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,138 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package com.cloud.kubernetes.cluster.actionworkers;
|
||||
|
||||
import com.cloud.kubernetes.cluster.KubernetesCluster;
|
||||
import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl;
|
||||
import com.cloud.kubernetes.cluster.dao.KubernetesClusterDao;
|
||||
import com.cloud.kubernetes.cluster.dao.KubernetesClusterDetailsDao;
|
||||
import com.cloud.kubernetes.cluster.dao.KubernetesClusterVmMapDao;
|
||||
import com.cloud.kubernetes.version.dao.KubernetesSupportedVersionDao;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class KubernetesClusterResourceModifierActionWorkerTest {
|
||||
@Mock
|
||||
private KubernetesClusterDao kubernetesClusterDaoMock;
|
||||
|
||||
@Mock
|
||||
private KubernetesClusterDetailsDao kubernetesClusterDetailsDaoMock;
|
||||
|
||||
@Mock
|
||||
private KubernetesClusterVmMapDao kubernetesClusterVmMapDaoMock;
|
||||
|
||||
@Mock
|
||||
private KubernetesSupportedVersionDao kubernetesSupportedVersionDaoMock;
|
||||
|
||||
@Mock
|
||||
private KubernetesClusterManagerImpl kubernetesClusterManagerMock;
|
||||
|
||||
@Mock
|
||||
private KubernetesCluster kubernetesClusterMock;
|
||||
|
||||
private KubernetesClusterResourceModifierActionWorker kubernetesClusterResourceModifierActionWorker;
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
kubernetesClusterManagerMock.kubernetesClusterDao = kubernetesClusterDaoMock;
|
||||
kubernetesClusterManagerMock.kubernetesSupportedVersionDao = kubernetesSupportedVersionDaoMock;
|
||||
kubernetesClusterManagerMock.kubernetesClusterDetailsDao = kubernetesClusterDetailsDaoMock;
|
||||
kubernetesClusterManagerMock.kubernetesClusterVmMapDao = kubernetesClusterVmMapDaoMock;
|
||||
|
||||
kubernetesClusterResourceModifierActionWorker = new KubernetesClusterResourceModifierActionWorker(kubernetesClusterMock, kubernetesClusterManagerMock);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getKubernetesClusterNodeNamePrefixTestReturnOriginalPrefixWhenNamingAllRequirementsAreMet() {
|
||||
String originalPrefix = "k8s-cluster-01";
|
||||
String expectedPrefix = "k8s-cluster-01";
|
||||
|
||||
Mockito.when(kubernetesClusterMock.getName()).thenReturn(originalPrefix);
|
||||
Assert.assertEquals(expectedPrefix, kubernetesClusterResourceModifierActionWorker.getKubernetesClusterNodeNamePrefix());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getKubernetesClusterNodeNamePrefixTestNormalizedPrefixShouldOnlyContainLowerCaseCharacters() {
|
||||
String originalPrefix = "k8s-CLUSTER-01";
|
||||
String expectedPrefix = "k8s-cluster-01";
|
||||
|
||||
Mockito.when(kubernetesClusterMock.getName()).thenReturn(originalPrefix);
|
||||
Assert.assertEquals(expectedPrefix, kubernetesClusterResourceModifierActionWorker.getKubernetesClusterNodeNamePrefix());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getKubernetesClusterNodeNamePrefixTestNormalizedPrefixShouldBeTruncatedWhenRequired() {
|
||||
int maxPrefixLength = 43;
|
||||
|
||||
String originalPrefix = "c".repeat(maxPrefixLength + 1);
|
||||
String expectedPrefix = "c".repeat(maxPrefixLength);
|
||||
|
||||
Mockito.when(kubernetesClusterMock.getName()).thenReturn(originalPrefix);
|
||||
String normalizedPrefix = kubernetesClusterResourceModifierActionWorker.getKubernetesClusterNodeNamePrefix();
|
||||
Assert.assertEquals(expectedPrefix, normalizedPrefix);
|
||||
Assert.assertEquals(maxPrefixLength, normalizedPrefix.length());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getKubernetesClusterNodeNamePrefixTestNormalizedPrefixShouldBeTruncatedWhenRequiredAndWhenOriginalPrefixIsInvalid() {
|
||||
int maxPrefixLength = 43;
|
||||
|
||||
String originalPrefix = "1!" + "c".repeat(maxPrefixLength);
|
||||
String expectedPrefix = "k8s-1" + "c".repeat(maxPrefixLength - 5);
|
||||
|
||||
Mockito.when(kubernetesClusterMock.getName()).thenReturn(originalPrefix);
|
||||
String normalizedPrefix = kubernetesClusterResourceModifierActionWorker.getKubernetesClusterNodeNamePrefix();
|
||||
Assert.assertEquals(expectedPrefix, normalizedPrefix);
|
||||
Assert.assertEquals(maxPrefixLength, normalizedPrefix.length());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getKubernetesClusterNodeNamePrefixTestNormalizedPrefixShouldOnlyIncludeAlphanumericCharactersAndHyphen() {
|
||||
String originalPrefix = "Cluster!@#$%^&*()_+?.-01|<>";
|
||||
String expectedPrefix = "k8s-cluster-01";
|
||||
|
||||
Mockito.when(kubernetesClusterMock.getName()).thenReturn(originalPrefix);
|
||||
Assert.assertEquals(expectedPrefix, kubernetesClusterResourceModifierActionWorker.getKubernetesClusterNodeNamePrefix());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getKubernetesClusterNodeNamePrefixTestNormalizedPrefixShouldContainClusterUuidWhenAllCharactersAreInvalid() {
|
||||
String clusterUuid = "2699b547-cb56-4a59-a2c6-331cfb21d2e4";
|
||||
String originalPrefix = "!@#$%^&*()_+?.|<>";
|
||||
String expectedPrefix = "k8s-" + clusterUuid;
|
||||
|
||||
Mockito.when(kubernetesClusterMock.getUuid()).thenReturn(clusterUuid);
|
||||
Mockito.when(kubernetesClusterMock.getName()).thenReturn(originalPrefix);
|
||||
Assert.assertEquals(expectedPrefix, kubernetesClusterResourceModifierActionWorker.getKubernetesClusterNodeNamePrefix());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getKubernetesClusterNodeNamePrefixTestNormalizedPrefixShouldNotStartWithADigit() {
|
||||
String originalPrefix = "1 cluster";
|
||||
String expectedPrefix = "k8s-1cluster";
|
||||
|
||||
Mockito.when(kubernetesClusterMock.getName()).thenReturn(originalPrefix);
|
||||
Assert.assertEquals(expectedPrefix, kubernetesClusterResourceModifierActionWorker.getKubernetesClusterNodeNamePrefix());
|
||||
}
|
||||
}
|
||||
|
|
@ -641,7 +641,7 @@ public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements
|
|||
metricsResponse.setStorageUsedThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeUsed(), poolResponse.getOverProvisionFactor(), storageThreshold);
|
||||
metricsResponse.setStorageUsedDisableThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeUsed(), poolResponse.getOverProvisionFactor(), storageDisableThreshold);
|
||||
metricsResponse.setStorageAllocatedThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeAllocated(), poolResponse.getOverProvisionFactor(), storageThreshold);
|
||||
metricsResponse.setStorageAllocatedDisableThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeUsed(), poolResponse.getOverProvisionFactor(), storageDisableThreshold);
|
||||
metricsResponse.setStorageAllocatedDisableThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeAllocated(), poolResponse.getOverProvisionFactor(), storageDisableThreshold);
|
||||
metricsResponses.add(metricsResponse);
|
||||
}
|
||||
return metricsResponses;
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ public class VmMetricsStatsResponse extends BaseResponse {
|
|||
private String displayName;
|
||||
|
||||
@SerializedName("stats")
|
||||
@Param(description = "the list of VM stats")
|
||||
@Param(description = "the list of VM stats", responseObject = StatsResponse.class)
|
||||
private List<StatsResponse> stats;
|
||||
|
||||
public void setId(String id) {
|
||||
|
|
|
|||
|
|
@ -48,6 +48,10 @@ public class EventUtils {
|
|||
|
||||
private static EventDistributor eventDistributor;
|
||||
|
||||
private static final String MODULE_TOP_LEVEL_PACKAGE =
|
||||
EventUtils.class.getPackage().getName().substring(0,
|
||||
EventUtils.class.getPackage().getName().lastIndexOf('.'));
|
||||
|
||||
public EventUtils() {
|
||||
}
|
||||
|
||||
|
|
@ -143,6 +147,13 @@ public class EventUtils {
|
|||
@Override
|
||||
public void interceptComplete(Method method, Object target, Object event) {
|
||||
ActionEvent actionEvent = method.getAnnotation(ActionEvent.class);
|
||||
boolean sameModule = false;
|
||||
if (target != null && target.getClass().getPackage() != null) {
|
||||
sameModule = target.getClass().getPackage().getName().startsWith(MODULE_TOP_LEVEL_PACKAGE);
|
||||
}
|
||||
if (!sameModule) {
|
||||
return;
|
||||
}
|
||||
if (actionEvent != null) {
|
||||
CallContext ctx = CallContext.current();
|
||||
if (!actionEvent.create()) {
|
||||
|
|
|
|||
|
|
@ -236,6 +236,12 @@ public class MockAccountManager extends ManagerBase implements AccountManager {
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isResourceDomainAdmin(Long accountId) {
|
||||
// TODO Auto-generated method stub
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isNormalUser(long accountId) {
|
||||
// TODO Auto-generated method stub
|
||||
|
|
|
|||
|
|
@ -24,10 +24,6 @@ import java.util.UUID;
|
|||
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.utils.SwiftUtil;
|
||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
|
|
@ -37,6 +33,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
|
|||
import org.apache.cloudstack.engine.subsystem.api.storage.StorageCacheManager;
|
||||
import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
|
||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
import org.apache.cloudstack.storage.command.DownloadCommand;
|
||||
import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDao;
|
||||
import org.apache.cloudstack.storage.image.BaseImageStoreDriverImpl;
|
||||
|
|
@ -47,8 +44,9 @@ import com.cloud.agent.api.storage.DownloadAnswer;
|
|||
import com.cloud.agent.api.to.DataObjectType;
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.agent.api.to.SwiftTO;
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.utils.SwiftUtil;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
public class SwiftImageStoreDriverImpl extends BaseImageStoreDriverImpl {
|
||||
|
|
@ -99,8 +97,13 @@ public class SwiftImageStoreDriverImpl extends BaseImageStoreDriverImpl {
|
|||
@Override
|
||||
public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback<CreateCmdResult> callback) {
|
||||
Long maxTemplateSizeInBytes = getMaxTemplateSizeInBytes();
|
||||
VirtualMachineTemplate tmpl = _templateDao.findById(data.getId());
|
||||
DataStore cacheStore = cacheManager.getCacheStorage(dataStore.getScope());
|
||||
if (cacheStore == null) {
|
||||
String errMsg = String.format("No cache store found for scope: %s",
|
||||
dataStore.getScope().getScopeType().name());
|
||||
logger.error(errMsg);
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
DownloadCommand dcmd = new DownloadCommand((TemplateObjectTO)(data.getTO()), maxTemplateSizeInBytes);
|
||||
dcmd.setCacheStore(cacheStore.getTO());
|
||||
dcmd.setProxy(getHttpProxy());
|
||||
|
|
|
|||
|
|
@ -140,13 +140,18 @@ public class StorageVmSharedFSLifeCycle implements SharedFSLifeCycle {
|
|||
return fsVmConfig;
|
||||
}
|
||||
|
||||
private String getStorageVmName(String fileShareName) {
|
||||
private String getStorageVmPrefix(String fileShareName) {
|
||||
String prefix = String.format("%s-%s", SharedFSVmNamePrefix, fileShareName);
|
||||
String suffix = Long.toHexString(System.currentTimeMillis());
|
||||
|
||||
if (!NetUtils.verifyDomainNameLabel(prefix, true)) {
|
||||
prefix = prefix.replaceAll("[^a-zA-Z0-9-]", "");
|
||||
}
|
||||
return prefix;
|
||||
}
|
||||
|
||||
private String getStorageVmName(String fileShareName) {
|
||||
String prefix = getStorageVmPrefix(fileShareName);
|
||||
String suffix = Long.toHexString(System.currentTimeMillis());
|
||||
|
||||
int nameLength = prefix.length() + suffix.length() + SharedFSVmNamePrefix.length();
|
||||
if (nameLength > 63) {
|
||||
int prefixLength = prefix.length() - (nameLength - 63);
|
||||
|
|
@ -174,10 +179,11 @@ public class StorageVmSharedFSLifeCycle implements SharedFSLifeCycle {
|
|||
customParameterMap.put("maxIopsDo", maxIops.toString());
|
||||
}
|
||||
List<String> keypairs = new ArrayList<String>();
|
||||
String preferredArchitecture = ResourceManager.SystemVmPreferredArchitecture.valueIn(zoneId);
|
||||
|
||||
for (final Iterator<Hypervisor.HypervisorType> iter = hypervisors.iterator(); iter.hasNext();) {
|
||||
final Hypervisor.HypervisorType hypervisor = iter.next();
|
||||
VMTemplateVO template = templateDao.findSystemVMReadyTemplate(zoneId, hypervisor);
|
||||
VMTemplateVO template = templateDao.findSystemVMReadyTemplate(zoneId, hypervisor, preferredArchitecture);
|
||||
if (template == null && !iter.hasNext()) {
|
||||
throw new CloudRuntimeException(String.format("Unable to find the systemvm template for %s or it was not downloaded in %s.", hypervisor.toString(), zone.toString()));
|
||||
}
|
||||
|
|
@ -236,8 +242,18 @@ public class StorageVmSharedFSLifeCycle implements SharedFSLifeCycle {
|
|||
Account owner = accountMgr.getActiveAccountById(sharedFS.getAccountId());
|
||||
UserVm vm = deploySharedFSVM(sharedFS.getDataCenterId(), owner, List.of(networkId), sharedFS.getName(), sharedFS.getServiceOfferingId(), diskOfferingId, sharedFS.getFsType(), size, minIops, maxIops);
|
||||
|
||||
List<VolumeVO> volumes = volumeDao.findByInstanceAndType(vm.getId(), Volume.Type.DATADISK);
|
||||
return new Pair<>(volumes.get(0).getId(), vm.getId());
|
||||
List<VolumeVO> volumes = volumeDao.findByInstance(vm.getId());
|
||||
VolumeVO dataVol = null;
|
||||
for (VolumeVO vol : volumes) {
|
||||
String volumeName = vol.getName();
|
||||
String updatedVolumeName = SharedFSVmNamePrefix + "-" + volumeName;
|
||||
vol.setName(updatedVolumeName);
|
||||
volumeDao.update(vol.getId(), vol);
|
||||
if (vol.getVolumeType() == Volume.Type.DATADISK) {
|
||||
dataVol = vol;
|
||||
}
|
||||
}
|
||||
return new Pair<>(dataVol.getId(), vm.getId());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
|||
|
|
@ -236,7 +236,7 @@ public class StorageVmSharedFSLifeCycleTest {
|
|||
when(serviceOfferingDao.findById(s_serviceOfferingId)).thenReturn(serviceOffering);
|
||||
|
||||
VMTemplateVO template = mock(VMTemplateVO.class);
|
||||
when(templateDao.findSystemVMReadyTemplate(s_zoneId, Hypervisor.HypervisorType.KVM)).thenReturn(template);
|
||||
when(templateDao.findSystemVMReadyTemplate(s_zoneId, Hypervisor.HypervisorType.KVM, ResourceManager.SystemVmPreferredArchitecture.defaultValue())).thenReturn(template);
|
||||
when(template.getId()).thenReturn(s_templateId);
|
||||
|
||||
return sharedFS;
|
||||
|
|
@ -260,9 +260,14 @@ public class StorageVmSharedFSLifeCycleTest {
|
|||
anyMap(), isNull(), isNull(), isNull(), isNull(),
|
||||
anyBoolean(), anyString(), isNull())).thenReturn(vm);
|
||||
|
||||
VolumeVO volume = mock(VolumeVO.class);
|
||||
when(volume.getId()).thenReturn(s_volumeId);
|
||||
when(volumeDao.findByInstanceAndType(s_vmId, Volume.Type.DATADISK)).thenReturn(List.of(volume));
|
||||
VolumeVO rootVol = mock(VolumeVO.class);
|
||||
when(rootVol.getVolumeType()).thenReturn(Volume.Type.ROOT);
|
||||
when(rootVol.getName()).thenReturn("ROOT-1");
|
||||
VolumeVO dataVol = mock(VolumeVO.class);
|
||||
when(dataVol.getId()).thenReturn(s_volumeId);
|
||||
when(dataVol.getName()).thenReturn("DATA-1");
|
||||
when(dataVol.getVolumeType()).thenReturn(Volume.Type.DATADISK);
|
||||
when(volumeDao.findByInstance(s_vmId)).thenReturn(List.of(rootVol, dataVol));
|
||||
|
||||
Pair<Long, Long> result = lifeCycle.deploySharedFS(sharedFS, s_networkId, s_diskOfferingId, s_size, s_minIops, s_maxIops);
|
||||
Assert.assertEquals(Optional.ofNullable(result.first()), Optional.ofNullable(s_volumeId));
|
||||
|
|
@ -298,7 +303,6 @@ public class StorageVmSharedFSLifeCycleTest {
|
|||
when(dataCenterDao.findById(s_zoneId)).thenReturn(zone);
|
||||
when(resourceMgr.getSupportedHypervisorTypes(s_zoneId, false, null)).thenReturn(List.of(Hypervisor.HypervisorType.KVM));
|
||||
|
||||
when(templateDao.findSystemVMReadyTemplate(s_zoneId, Hypervisor.HypervisorType.KVM)).thenReturn(null);
|
||||
lifeCycle.deploySharedFS(sharedFS, s_networkId, s_diskOfferingId, s_size, s_minIops, s_maxIops);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,18 @@ All notable changes to Linstor CloudStack plugin will be documented in this file
|
|||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [2025-08-05]
|
||||
|
||||
### Fixed
|
||||
|
||||
- getVolumeStats wasn't correctly working if multiple Linstor clusters/primary storages are used.
|
||||
|
||||
## [2025-07-01]
|
||||
|
||||
### Fixed
|
||||
|
||||
- Regression in 4.19.3 and 4.21.0 with templates from snapshots
|
||||
|
||||
## [2025-05-07]
|
||||
|
||||
### Added
|
||||
|
|
|
|||
|
|
@ -622,7 +622,7 @@ public class LinstorStorageAdaptor implements StorageAdaptor {
|
|||
try {
|
||||
templateProps.load(new FileInputStream(propFile.toFile()));
|
||||
String desc = templateProps.getProperty("description");
|
||||
if (desc.startsWith("SystemVM Template")) {
|
||||
if (desc != null && desc.startsWith("SystemVM Template")) {
|
||||
return true;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
|
|
|
|||
|
|
@ -74,12 +74,14 @@ import com.cloud.storage.StorageManager;
|
|||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.VMTemplateStoragePoolVO;
|
||||
import com.cloud.storage.VMTemplateStorageResourceAssoc;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.storage.VolumeDetailVO;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.dao.SnapshotDao;
|
||||
import com.cloud.storage.dao.SnapshotDetailsDao;
|
||||
import com.cloud.storage.dao.SnapshotDetailsVO;
|
||||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
import com.cloud.storage.dao.VMTemplatePoolDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.storage.dao.VolumeDetailsDao;
|
||||
|
|
@ -133,8 +135,9 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
|||
ConfigurationDao _configDao;
|
||||
@Inject
|
||||
private HostDao _hostDao;
|
||||
@Inject private VMTemplateDao _vmTemplateDao;
|
||||
|
||||
private long volumeStatsLastUpdate = 0L;
|
||||
private final Map<String, Long> volumeStatsLastUpdate = new HashMap<>();
|
||||
private final Map<String, Pair<Long, Long>> volumeStats = new HashMap<>();
|
||||
|
||||
public LinstorPrimaryDataStoreDriverImpl()
|
||||
|
|
@ -670,8 +673,15 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
|||
storagePoolVO.getId(), csCloneId, null);
|
||||
|
||||
if (tmplPoolRef != null) {
|
||||
final String templateRscName = LinstorUtil.RSC_PREFIX + tmplPoolRef.getLocalDownloadPath();
|
||||
final String templateRscName;
|
||||
if (tmplPoolRef.getLocalDownloadPath() == null) {
|
||||
VMTemplateVO vmTemplateVO = _vmTemplateDao.findById(tmplPoolRef.getTemplateId());
|
||||
templateRscName = LinstorUtil.RSC_PREFIX + vmTemplateVO.getUuid();
|
||||
} else {
|
||||
templateRscName = LinstorUtil.RSC_PREFIX + tmplPoolRef.getLocalDownloadPath();
|
||||
}
|
||||
final String rscName = LinstorUtil.RSC_PREFIX + volumeInfo.getUuid();
|
||||
|
||||
final DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
|
||||
|
||||
try {
|
||||
|
|
@ -1525,11 +1535,12 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
|||
|
||||
/**
|
||||
* Updates the cache map containing current allocated size data.
|
||||
* @param api Linstor Developers api object
|
||||
* @param linstorAddr Linstor cluster api address
|
||||
*/
|
||||
private void fillVolumeStatsCache(DevelopersApi api) {
|
||||
private void fillVolumeStatsCache(String linstorAddr) {
|
||||
final DevelopersApi api = LinstorUtil.getLinstorAPI(linstorAddr);
|
||||
try {
|
||||
logger.trace("Start volume stats cache update");
|
||||
logger.trace("Start volume stats cache update for " + linstorAddr);
|
||||
List<ResourceWithVolumes> resources = api.viewResources(
|
||||
Collections.emptyList(),
|
||||
Collections.emptyList(),
|
||||
|
|
@ -1556,14 +1567,14 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
|||
}
|
||||
}
|
||||
|
||||
volumeStats.clear();
|
||||
volumeStats.keySet().removeIf(key -> key.startsWith(linstorAddr));
|
||||
for (Map.Entry<String, Long> entry : allocSizeMap.entrySet()) {
|
||||
Long reserved = resSizeMap.getOrDefault(entry.getKey(), 0L);
|
||||
Pair<Long, Long> volStat = new Pair<>(entry.getValue(), reserved);
|
||||
volumeStats.put(entry.getKey(), volStat);
|
||||
volumeStats.put(linstorAddr + "/" + entry.getKey(), volStat);
|
||||
}
|
||||
volumeStatsLastUpdate = System.currentTimeMillis();
|
||||
logger.trace("Done volume stats cache update: {}", volumeStats.size());
|
||||
volumeStatsLastUpdate.put(linstorAddr, System.currentTimeMillis());
|
||||
logger.debug(String.format("Done volume stats cache update for %s: %d", linstorAddr, volumeStats.size()));
|
||||
} catch (ApiException e) {
|
||||
logger.error("Unable to fetch Linstor resources: {}", e.getBestMessage());
|
||||
}
|
||||
|
|
@ -1571,14 +1582,19 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
|||
|
||||
@Override
|
||||
public Pair<Long, Long> getVolumeStats(StoragePool storagePool, String volumeId) {
|
||||
final DevelopersApi api = LinstorUtil.getLinstorAPI(storagePool.getHostAddress());
|
||||
String linstorAddr = storagePool.getHostAddress();
|
||||
synchronized (volumeStats) {
|
||||
long invalidateCacheTime = volumeStatsLastUpdate +
|
||||
long invalidateCacheTime = volumeStatsLastUpdate.getOrDefault(storagePool.getHostAddress(), 0L) +
|
||||
LinstorConfigurationManager.VolumeStatsCacheTime.value() * 1000;
|
||||
if (invalidateCacheTime < System.currentTimeMillis()) {
|
||||
fillVolumeStatsCache(api);
|
||||
fillVolumeStatsCache(storagePool.getHostAddress());
|
||||
}
|
||||
return volumeStats.get(LinstorUtil.RSC_PREFIX + volumeId);
|
||||
String volumeKey = linstorAddr + "/" + LinstorUtil.RSC_PREFIX + volumeId;
|
||||
Pair<Long, Long> sizePair = volumeStats.get(volumeKey);
|
||||
if (sizePair == null) {
|
||||
logger.warn(String.format("Volumestats for %s not found in cache", volumeKey));
|
||||
}
|
||||
return sizePair;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1230,13 +1230,13 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
}
|
||||
|
||||
org.apache.cloudstack.storage.datastore.api.Volume scaleIOVolume = client.getVolume(scaleIOVolumeId);
|
||||
long newSizeInGB = newSizeInBytes / (1024 * 1024 * 1024);
|
||||
long newSizeIn8gbBoundary = (long) (Math.ceil(newSizeInGB / 8.0) * 8.0);
|
||||
double newSizeInGB = newSizeInBytes / (1024.0 * 1024 * 1024);
|
||||
long newSizeIn8GBBoundary = (long) (Math.ceil(newSizeInGB / 8.0) * 8.0);
|
||||
|
||||
if (scaleIOVolume.getSizeInKb() == newSizeIn8gbBoundary << 20) {
|
||||
if (scaleIOVolume.getSizeInKb() == newSizeIn8GBBoundary << 20) {
|
||||
logger.debug("No resize necessary at API");
|
||||
} else {
|
||||
scaleIOVolume = client.resizeVolume(scaleIOVolumeId, (int) newSizeIn8gbBoundary);
|
||||
scaleIOVolume = client.resizeVolume(scaleIOVolumeId, (int) newSizeIn8GBBoundary);
|
||||
if (scaleIOVolume == null) {
|
||||
throw new CloudRuntimeException("Failed to resize volume: " + volumeInfo.getName());
|
||||
}
|
||||
|
|
@ -1362,12 +1362,12 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
|
|||
|
||||
@Override
|
||||
public long getVolumeSizeRequiredOnPool(long volumeSize, Long templateSize, boolean isEncryptionRequired) {
|
||||
long newSizeInGB = volumeSize / (1024 * 1024 * 1024);
|
||||
double newSizeInGB = volumeSize / (1024.0 * 1024 * 1024);
|
||||
if (templateSize != null && isEncryptionRequired && needsExpansionForEncryptionHeader(templateSize, volumeSize)) {
|
||||
newSizeInGB = (volumeSize + (1<<30)) / (1024 * 1024 * 1024);
|
||||
newSizeInGB = (volumeSize + (1<<30)) / (1024.0 * 1024 * 1024);
|
||||
}
|
||||
long newSizeIn8gbBoundary = (long) (Math.ceil(newSizeInGB / 8.0) * 8.0);
|
||||
return newSizeIn8gbBoundary * (1024 * 1024 * 1024);
|
||||
long newSizeIn8GBBoundary = (long) (Math.ceil(newSizeInGB / 8.0) * 8.0);
|
||||
return newSizeIn8GBBoundary * (1024 * 1024 * 1024);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
|||
|
|
@ -183,12 +183,13 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable {
|
|||
storagePoolHost.setLocalPath(sdcId);
|
||||
storagePoolHostDao.update(storagePoolHost.getId(), storagePoolHost);
|
||||
}
|
||||
|
||||
int waitTimeInSecs = 15; // Wait for 15 secs (usual tests with SDC service start took 10-15 secs)
|
||||
if (hostSdcConnected(sdcId, dataStore, waitTimeInSecs)) {
|
||||
return sdcId;
|
||||
}
|
||||
}
|
||||
|
||||
int waitTimeInSecs = 15; // Wait for 15 secs (usual tests with SDC service start took 10-15 secs)
|
||||
if (hostSdcConnected(sdcId, dataStore, waitTimeInSecs)) {
|
||||
return sdcId;
|
||||
}
|
||||
return null;
|
||||
} finally {
|
||||
if (storageSystemIdLock != null) {
|
||||
|
|
@ -246,7 +247,7 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable {
|
|||
}
|
||||
|
||||
if (StringUtils.isBlank(sdcId)) {
|
||||
logger.warn("Couldn't retrieve PowerFlex storage SDC details from the host: {}, try (re)install SDC and restart agent", host);
|
||||
logger.warn("Couldn't retrieve PowerFlex storage SDC details from the host: {}, add MDMs if not or try (re)install SDC & restart agent", host);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
@ -381,6 +382,9 @@ public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable {
|
|||
|
||||
private ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception {
|
||||
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
|
||||
if (storagePool == null) {
|
||||
throw new CloudRuntimeException("Unable to find the storage pool with id " + storagePoolId);
|
||||
}
|
||||
return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePool, storagePoolDetailsDao);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -102,12 +102,12 @@ public class ScaleIOHostListener implements HypervisorHostListener {
|
|||
if (systemId == null) {
|
||||
throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool " + storagePool.getName());
|
||||
}
|
||||
Map<String,String> details = new HashMap<>();
|
||||
Map<String, String> details = new HashMap<>();
|
||||
details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, systemId);
|
||||
|
||||
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool, storagePool.getPath(), details);
|
||||
ModifyStoragePoolAnswer answer = sendModifyStoragePoolCommand(cmd, storagePool, host);
|
||||
Map<String,String> poolDetails = answer.getPoolInfo().getDetails();
|
||||
Map<String, String> poolDetails = answer.getPoolInfo().getDetails();
|
||||
if (MapUtils.isEmpty(poolDetails)) {
|
||||
String msg = String.format("PowerFlex storage SDC details not found on the host: %s, (re)install SDC and restart agent", host);
|
||||
logger.warn(msg);
|
||||
|
|
@ -124,7 +124,7 @@ public class ScaleIOHostListener implements HypervisorHostListener {
|
|||
}
|
||||
|
||||
if (StringUtils.isBlank(sdcId)) {
|
||||
String msg = String.format("Couldn't retrieve PowerFlex storage SDC details from the host: %s, (re)install SDC and restart agent", host);
|
||||
String msg = String.format("Couldn't retrieve PowerFlex storage SDC details from the host: %s, add MDMs if not or try (re)install SDC & restart agent", host);
|
||||
logger.warn(msg);
|
||||
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC details not found on host: " + host.getUuid(), msg);
|
||||
return null;
|
||||
|
|
|
|||
|
|
@ -17,6 +17,8 @@
|
|||
|
||||
package org.apache.cloudstack.storage.datastore.util;
|
||||
|
||||
import com.cloud.agent.properties.AgentProperties;
|
||||
import com.cloud.agent.properties.AgentPropertiesFileHandler;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
|
|
@ -60,6 +62,14 @@ public class ScaleIOUtil {
|
|||
private static final String SDC_SERVICE_ENABLE_CMD = "systemctl enable scini";
|
||||
|
||||
public static final String CONNECTED_SDC_COUNT_STAT = "ConnectedSDCCount";
|
||||
|
||||
/**
|
||||
* Time (in seconds) to wait after SDC service 'scini' start/restart/stop.<br>
|
||||
* Data type: Integer.<br>
|
||||
* Default value: <code>3</code>
|
||||
*/
|
||||
public static final AgentProperties.Property<Integer> SDC_SERVICE_ACTION_WAIT = new AgentProperties.Property<>("powerflex.sdc.service.wait", 3);
|
||||
|
||||
/**
|
||||
* Cmd for querying volumes in SDC
|
||||
* Sample output for cmd: drv_cfg --query_vols:
|
||||
|
|
@ -216,16 +226,41 @@ public class ScaleIOUtil {
|
|||
|
||||
public static boolean startSDCService() {
|
||||
int exitValue = Script.runSimpleBashScriptForExitValue(SDC_SERVICE_START_CMD);
|
||||
return exitValue == 0;
|
||||
if (exitValue != 0) {
|
||||
return false;
|
||||
}
|
||||
waitForSdcServiceActionToComplete();
|
||||
return true;
|
||||
}
|
||||
|
||||
public static boolean stopSDCService() {
|
||||
int exitValue = Script.runSimpleBashScriptForExitValue(SDC_SERVICE_STOP_CMD);
|
||||
return exitValue == 0;
|
||||
if (exitValue != 0) {
|
||||
return false;
|
||||
}
|
||||
waitForSdcServiceActionToComplete();
|
||||
return true;
|
||||
}
|
||||
|
||||
public static boolean restartSDCService() {
|
||||
int exitValue = Script.runSimpleBashScriptForExitValue(SDC_SERVICE_RESTART_CMD);
|
||||
return exitValue == 0;
|
||||
if (exitValue != 0) {
|
||||
return false;
|
||||
}
|
||||
waitForSdcServiceActionToComplete();
|
||||
return true;
|
||||
}
|
||||
|
||||
private static void waitForSdcServiceActionToComplete() {
|
||||
// Wait for the SDC service to settle after start/restart/stop and reaches a stable state
|
||||
int waitTimeInSecs = AgentPropertiesFileHandler.getPropertyValue(SDC_SERVICE_ACTION_WAIT);
|
||||
if (waitTimeInSecs < 0) {
|
||||
waitTimeInSecs = SDC_SERVICE_ACTION_WAIT.getDefaultValue();
|
||||
}
|
||||
try {
|
||||
LOGGER.debug(String.format("Waiting for %d secs after SDC service action, to reach a stable state", waitTimeInSecs));
|
||||
Thread.sleep(waitTimeInSecs * 1000L);
|
||||
} catch (InterruptedException ignore) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -555,6 +555,18 @@ public class ScaleIOPrimaryDataStoreDriverTest {
|
|||
|
||||
@Test
|
||||
public void testGetVolumeSizeRequiredOnPool() {
|
||||
Assert.assertEquals(8L * (1024 * 1024 * 1024),
|
||||
scaleIOPrimaryDataStoreDriver.getVolumeSizeRequiredOnPool(
|
||||
52428800,
|
||||
null,
|
||||
false));
|
||||
|
||||
Assert.assertEquals(8L * (1024 * 1024 * 1024),
|
||||
scaleIOPrimaryDataStoreDriver.getVolumeSizeRequiredOnPool(
|
||||
52428800,
|
||||
52428800L,
|
||||
true));
|
||||
|
||||
Assert.assertEquals(16L * (1024 * 1024 * 1024),
|
||||
scaleIOPrimaryDataStoreDriver.getVolumeSizeRequiredOnPool(
|
||||
10L * (1024 * 1024 * 1024),
|
||||
|
|
|
|||
|
|
@ -139,6 +139,9 @@ public class StorPoolStorageAdaptor implements StorageAdaptor {
|
|||
}
|
||||
|
||||
public static String getVolumeNameFromPath(final String volumeUuid, boolean tildeNeeded) {
|
||||
if (volumeUuid == null) {
|
||||
return null;
|
||||
}
|
||||
if (volumeUuid.startsWith("/dev/storpool/")) {
|
||||
return volumeUuid.split("/")[3];
|
||||
} else if (volumeUuid.startsWith("/dev/storpool-byid/")) {
|
||||
|
|
|
|||
|
|
@ -140,10 +140,11 @@ public class LdapListUsersCmd extends BaseListCmd {
|
|||
try {
|
||||
final List<LdapUser> users = _ldapManager.getUsers(domainId);
|
||||
ldapResponses = createLdapUserResponse(users);
|
||||
// now filter and annotate
|
||||
// now filter and annotate
|
||||
ldapResponses = applyUserFilter(ldapResponses);
|
||||
} catch (final NoLdapUserMatchingQueryException ex) {
|
||||
// ok, we'll make do with the empty list ldapResponses = new ArrayList<LdapUserResponse>();
|
||||
logger.debug(ex.getMessage());
|
||||
// ok, we'll make do with the empty list
|
||||
} finally {
|
||||
response.setResponses(ldapResponses);
|
||||
response.setResponseName(getCommandName());
|
||||
|
|
|
|||
|
|
@ -45,6 +45,8 @@ public class LdapAuthenticator extends AdapterBase implements UserAuthenticator
|
|||
@Inject
|
||||
private AccountManager _accountManager;
|
||||
|
||||
private static final String LDAP_READ_TIMED_OUT_MESSAGE = "LDAP response read timed out";
|
||||
|
||||
public LdapAuthenticator() {
|
||||
super();
|
||||
}
|
||||
|
|
@ -74,8 +76,8 @@ public class LdapAuthenticator extends AdapterBase implements UserAuthenticator
|
|||
return rc;
|
||||
}
|
||||
List<LdapTrustMapVO> ldapTrustMapVOs = getLdapTrustMapVOS(domainId);
|
||||
if(ldapTrustMapVOs != null && ldapTrustMapVOs.size() > 0) {
|
||||
if(ldapTrustMapVOs.size() == 1 && ldapTrustMapVOs.get(0).getAccountId() == 0) {
|
||||
if (ldapTrustMapVOs != null && ldapTrustMapVOs.size() > 0) {
|
||||
if (ldapTrustMapVOs.size() == 1 && ldapTrustMapVOs.get(0).getAccountId() == 0) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("We have a single mapping of a domain to an ldap group or ou");
|
||||
}
|
||||
|
|
@ -125,11 +127,11 @@ public class LdapAuthenticator extends AdapterBase implements UserAuthenticator
|
|||
mappedGroups.retainAll(memberships);
|
||||
tracelist("actual groups for " + username, mappedGroups);
|
||||
// check membership, there must be only one match in this domain
|
||||
if(ldapUser.isDisabled()) {
|
||||
if (ldapUser.isDisabled()) {
|
||||
logAndDisable(userAccount, "attempt to log on using disabled ldap user " + userAccount.getUsername(), false);
|
||||
} else if(mappedGroups.size() > 1) {
|
||||
} else if (mappedGroups.size() > 1) {
|
||||
logAndDisable(userAccount, "user '" + username + "' is mapped to more then one account in domain and will be disabled.", false);
|
||||
} else if(mappedGroups.size() < 1) {
|
||||
} else if (mappedGroups.size() < 1) {
|
||||
logAndDisable(userAccount, "user '" + username + "' is not mapped to an account in domain and will be removed.", true);
|
||||
} else {
|
||||
// a valid ldap configured user exists
|
||||
|
|
@ -137,12 +139,12 @@ public class LdapAuthenticator extends AdapterBase implements UserAuthenticator
|
|||
// we could now assert that ldapTrustMapVOs.contains(mapping);
|
||||
// createUser in Account can only be done by account name not by account id;
|
||||
Account account = _accountManager.getAccount(mapping.getAccountId());
|
||||
if(null == account) {
|
||||
if (null == account) {
|
||||
throw new CloudRuntimeException(String.format("account for user (%s) not found by id %d", username, mapping.getAccountId()));
|
||||
}
|
||||
String accountName = account.getAccountName();
|
||||
rc.first(_ldapManager.canAuthenticate(ldapUser.getPrincipal(), password, domainId));
|
||||
if (! rc.first()) {
|
||||
if (!rc.first()) {
|
||||
rc.second(ActionOnFailedAuthentication.INCREMENT_INCORRECT_LOGIN_ATTEMPT_COUNT);
|
||||
}
|
||||
// for security reasons we keep processing on faulty login attempt to not give a way information on userid existence
|
||||
|
|
@ -162,7 +164,7 @@ public class LdapAuthenticator extends AdapterBase implements UserAuthenticator
|
|||
userAccount = _accountManager.getUserAccountById(user.getId());
|
||||
} else {
|
||||
// not a new user, check if mapped group has changed
|
||||
if(userAccount.getAccountId() != mapping.getAccountId()) {
|
||||
if (userAccount.getAccountId() != mapping.getAccountId()) {
|
||||
final Account mappedAccount = _accountManager.getAccount(mapping.getAccountId());
|
||||
if (mappedAccount == null || mappedAccount.getRemoved() != null) {
|
||||
throw new CloudRuntimeException("Mapped account for users does not exist. Please contact your administrator.");
|
||||
|
|
@ -174,12 +176,21 @@ public class LdapAuthenticator extends AdapterBase implements UserAuthenticator
|
|||
}
|
||||
} catch (NoLdapUserMatchingQueryException e) {
|
||||
logger.debug(e.getMessage());
|
||||
disableUserInCloudStack(userAccount);
|
||||
processLdapUserErrorMessage(userAccount, e.getMessage(), rc);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
private void processLdapUserErrorMessage(UserAccount user, String errorMessage, Pair<Boolean, ActionOnFailedAuthentication> rc) {
|
||||
if (StringUtils.isNotEmpty(errorMessage) && errorMessage.contains(LDAP_READ_TIMED_OUT_MESSAGE) && !rc.first()) {
|
||||
rc.second(ActionOnFailedAuthentication.INCREMENT_INCORRECT_LOGIN_ATTEMPT_COUNT);
|
||||
} else {
|
||||
// no user in ldap ==>> disable user in cloudstack
|
||||
disableUserInCloudStack(user);
|
||||
}
|
||||
}
|
||||
|
||||
private void tracelist(String msg, List<String> listToTrace) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder logMsg = new StringBuilder();
|
||||
|
|
@ -197,7 +208,7 @@ public class LdapAuthenticator extends AdapterBase implements UserAuthenticator
|
|||
if (logger.isInfoEnabled()) {
|
||||
logger.info(msg);
|
||||
}
|
||||
if(remove) {
|
||||
if (remove) {
|
||||
removeUserInCloudStack(userAccount);
|
||||
} else {
|
||||
disableUserInCloudStack(userAccount);
|
||||
|
|
@ -229,23 +240,22 @@ public class LdapAuthenticator extends AdapterBase implements UserAuthenticator
|
|||
processLdapUser(password, domainId, user, rc, ldapUser, accountType);
|
||||
} catch (NoLdapUserMatchingQueryException e) {
|
||||
logger.debug(e.getMessage());
|
||||
// no user in ldap ==>> disable user in cloudstack
|
||||
disableUserInCloudStack(user);
|
||||
processLdapUserErrorMessage(user, e.getMessage(), rc);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
private void processLdapUser(String password, Long domainId, UserAccount user, Pair<Boolean, ActionOnFailedAuthentication> rc, LdapUser ldapUser, Account.Type accountType) {
|
||||
if(!ldapUser.isDisabled()) {
|
||||
if (!ldapUser.isDisabled()) {
|
||||
rc.first(_ldapManager.canAuthenticate(ldapUser.getPrincipal(), password, domainId));
|
||||
if(rc.first()) {
|
||||
if(user == null) {
|
||||
if (rc.first()) {
|
||||
if (user == null) {
|
||||
// import user to cloudstack
|
||||
createCloudStackUserAccount(ldapUser, domainId, accountType);
|
||||
} else {
|
||||
enableUserInCloudStack(user);
|
||||
}
|
||||
} else if(user != null) {
|
||||
} else if (user != null) {
|
||||
rc.second(ActionOnFailedAuthentication.INCREMENT_INCORRECT_LOGIN_ATTEMPT_COUNT);
|
||||
}
|
||||
} else {
|
||||
|
|
@ -264,30 +274,34 @@ public class LdapAuthenticator extends AdapterBase implements UserAuthenticator
|
|||
*/
|
||||
Pair<Boolean, ActionOnFailedAuthentication> authenticate(String username, String password, Long domainId, UserAccount user) {
|
||||
boolean result = false;
|
||||
boolean timedOut = false;
|
||||
|
||||
if(user != null ) {
|
||||
if (user != null ) {
|
||||
try {
|
||||
LdapUser ldapUser = _ldapManager.getUser(username, domainId);
|
||||
if(!ldapUser.isDisabled()) {
|
||||
if (!ldapUser.isDisabled()) {
|
||||
result = _ldapManager.canAuthenticate(ldapUser.getPrincipal(), password, domainId);
|
||||
} else {
|
||||
logger.debug("user with principal "+ ldapUser.getPrincipal() + " is disabled in ldap");
|
||||
}
|
||||
} catch (NoLdapUserMatchingQueryException e) {
|
||||
logger.debug(e.getMessage());
|
||||
if (e.getMessage().contains(LDAP_READ_TIMED_OUT_MESSAGE)) {
|
||||
timedOut = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return processResultAndAction(user, result);
|
||||
return processResultAndAction(user, result, timedOut);
|
||||
}
|
||||
|
||||
private Pair<Boolean, ActionOnFailedAuthentication> processResultAndAction(UserAccount user, boolean result) {
|
||||
return (!result && user != null) ?
|
||||
private Pair<Boolean, ActionOnFailedAuthentication> processResultAndAction(UserAccount user, boolean result, boolean timedOut) {
|
||||
return (!result && (user != null || timedOut)) ?
|
||||
new Pair<Boolean, ActionOnFailedAuthentication>(result, ActionOnFailedAuthentication.INCREMENT_INCORRECT_LOGIN_ATTEMPT_COUNT):
|
||||
new Pair<Boolean, ActionOnFailedAuthentication>(result, null);
|
||||
}
|
||||
|
||||
private void enableUserInCloudStack(UserAccount user) {
|
||||
if(user != null && (user.getState().equalsIgnoreCase(Account.State.DISABLED.toString()))) {
|
||||
if (user != null && (user.getState().equalsIgnoreCase(Account.State.DISABLED.toString()))) {
|
||||
_accountManager.enableUser(user.getId());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
// under the License.
|
||||
package org.apache.cloudstack.ldap;
|
||||
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.Hashtable;
|
||||
|
||||
|
|
@ -24,6 +25,7 @@ import javax.naming.Context;
|
|||
import javax.naming.NamingException;
|
||||
import javax.naming.ldap.InitialLdapContext;
|
||||
import javax.naming.ldap.LdapContext;
|
||||
import java.security.KeyStore;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
|
@ -52,14 +54,14 @@ public class LdapContextFactory {
|
|||
return createInitialDirContext(bindPrincipal, bindPassword, providerUrl, true, domainId);
|
||||
}
|
||||
|
||||
private LdapContext createInitialDirContext(final String principal, final String password, final boolean isSystemContext, Long domainId) throws NamingException, IOException {
|
||||
private LdapContext createInitialDirContext(final String principal, final String password, final boolean isSystemContext, Long domainId) throws NamingException {
|
||||
return createInitialDirContext(principal, password, null, isSystemContext, domainId);
|
||||
}
|
||||
|
||||
private LdapContext createInitialDirContext(final String principal, final String password, final String providerUrl, final boolean isSystemContext, Long domainId)
|
||||
throws NamingException, IOException {
|
||||
throws NamingException {
|
||||
Hashtable<String, String> environment = getEnvironment(principal, password, providerUrl, isSystemContext, domainId);
|
||||
logger.debug("initializing ldap with provider url: " + environment.get(Context.PROVIDER_URL));
|
||||
logger.debug("initializing ldap with provider url: {}", environment.get(Context.PROVIDER_URL));
|
||||
return new InitialLdapContext(environment, null);
|
||||
}
|
||||
|
||||
|
|
@ -73,8 +75,36 @@ public class LdapContextFactory {
|
|||
if (sslStatus) {
|
||||
logger.info("LDAP SSL enabled.");
|
||||
environment.put(Context.SECURITY_PROTOCOL, "ssl");
|
||||
System.setProperty("javax.net.ssl.trustStore", _ldapConfiguration.getTrustStore(domainId));
|
||||
System.setProperty("javax.net.ssl.trustStorePassword", _ldapConfiguration.getTrustStorePassword(domainId));
|
||||
String trustStore = _ldapConfiguration.getTrustStore(domainId);
|
||||
String trustStorePassword = _ldapConfiguration.getTrustStorePassword(domainId);
|
||||
|
||||
if (!validateTrustStore(trustStore, trustStorePassword)) {
|
||||
throw new RuntimeException("Invalid truststore or truststore password");
|
||||
}
|
||||
|
||||
System.setProperty("javax.net.ssl.trustStore", trustStore);
|
||||
System.setProperty("javax.net.ssl.trustStorePassword", trustStorePassword);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean validateTrustStore(String trustStore, String trustStorePassword) {
|
||||
if (trustStore == null) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (trustStorePassword == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
KeyStore.getInstance("JKS").load(
|
||||
new FileInputStream(trustStore),
|
||||
trustStorePassword.toCharArray()
|
||||
);
|
||||
return true;
|
||||
} catch (Exception e) {
|
||||
logger.warn("Failed to validate truststore: {}", e.getMessage());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -166,7 +166,7 @@ public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManag
|
|||
|
||||
private LdapConfigurationResponse addConfigurationInternal(final String hostname, int port, final Long domainId) throws InvalidParameterValueException {
|
||||
// TODO evaluate what the right default should be
|
||||
if(port <= 0) {
|
||||
if (port <= 0) {
|
||||
port = 389;
|
||||
}
|
||||
|
||||
|
|
@ -184,6 +184,11 @@ public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManag
|
|||
} catch (NamingException | IOException e) {
|
||||
logger.debug("NamingException while doing an LDAP bind", e);
|
||||
throw new InvalidParameterValueException("Unable to bind to the given LDAP server");
|
||||
} catch (RuntimeException e) {
|
||||
if (e.getMessage().contains("Invalid truststore")) {
|
||||
throw new InvalidParameterValueException("Invalid truststore or truststore password");
|
||||
}
|
||||
throw e;
|
||||
} finally {
|
||||
closeContext(context);
|
||||
}
|
||||
|
|
@ -205,7 +210,7 @@ public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManag
|
|||
// TODO return the right account for this user
|
||||
final LdapContext context = _ldapContextFactory.createUserContext(principal, password, domainId);
|
||||
closeContext(context);
|
||||
if(logger.isTraceEnabled()) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(String.format("User(%s) authenticated for domain(%s)", principal, domainId));
|
||||
}
|
||||
return true;
|
||||
|
|
@ -229,7 +234,7 @@ public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManag
|
|||
@Override
|
||||
public LdapConfigurationResponse createLdapConfigurationResponse(final LdapConfigurationVO configuration) {
|
||||
String domainUuid = null;
|
||||
if(configuration.getDomainId() != null) {
|
||||
if (configuration.getDomainId() != null) {
|
||||
DomainVO domain = domainDao.findById(configuration.getDomainId());
|
||||
if (domain != null) {
|
||||
domainUuid = domain.getUuid();
|
||||
|
|
@ -298,8 +303,8 @@ public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManag
|
|||
return _ldapUserManagerFactory.getInstance(_ldapConfiguration.getLdapProvider(null)).getUser(escapedUsername, context, domainId);
|
||||
|
||||
} catch (NamingException | IOException e) {
|
||||
logger.debug("ldap Exception: ",e);
|
||||
throw new NoLdapUserMatchingQueryException("No Ldap User found for username: "+username);
|
||||
logger.debug("LDAP Exception: ", e);
|
||||
throw new NoLdapUserMatchingQueryException("Unable to find LDAP User for username: " + username + ", due to " + e.getMessage());
|
||||
} finally {
|
||||
closeContext(context);
|
||||
}
|
||||
|
|
@ -319,8 +324,8 @@ public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManag
|
|||
LdapUserManager userManagerFactory = _ldapUserManagerFactory.getInstance(ldapProvider);
|
||||
return userManagerFactory.getUser(escapedUsername, type, name, context, domainId);
|
||||
} catch (NamingException | IOException e) {
|
||||
logger.debug("ldap Exception: ",e);
|
||||
throw new NoLdapUserMatchingQueryException("No Ldap User found for username: "+username + " in group: " + name + " of type: " + type);
|
||||
logger.debug("LDAP Exception: ", e);
|
||||
throw new NoLdapUserMatchingQueryException("Unable to find LDAP User for username: " + username + " in group: " + name + " of type: " + type + ", due to " + e.getMessage());
|
||||
} finally {
|
||||
closeContext(context);
|
||||
}
|
||||
|
|
@ -333,7 +338,7 @@ public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManag
|
|||
context = _ldapContextFactory.createBindContext(domainId);
|
||||
return _ldapUserManagerFactory.getInstance(_ldapConfiguration.getLdapProvider(domainId)).getUsers(context, domainId);
|
||||
} catch (NamingException | IOException e) {
|
||||
logger.debug("ldap Exception: ",e);
|
||||
logger.debug("LDAP Exception: ", e);
|
||||
throw new NoLdapUserMatchingQueryException("*");
|
||||
} finally {
|
||||
closeContext(context);
|
||||
|
|
@ -347,7 +352,7 @@ public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManag
|
|||
context = _ldapContextFactory.createBindContext(domainId);
|
||||
return _ldapUserManagerFactory.getInstance(_ldapConfiguration.getLdapProvider(domainId)).getUsersInGroup(groupName, context, domainId);
|
||||
} catch (NamingException | IOException e) {
|
||||
logger.debug("ldap NamingException: ",e);
|
||||
logger.debug("LDAP Exception: ", e);
|
||||
throw new NoLdapUserMatchingQueryException("groupName=" + groupName);
|
||||
} finally {
|
||||
closeContext(context);
|
||||
|
|
@ -385,7 +390,7 @@ public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManag
|
|||
final String escapedUsername = LdapUtils.escapeLDAPSearchFilter(username);
|
||||
return _ldapUserManagerFactory.getInstance(_ldapConfiguration.getLdapProvider(null)).getUsers("*" + escapedUsername + "*", context, null);
|
||||
} catch (NamingException | IOException e) {
|
||||
logger.debug("ldap Exception: ",e);
|
||||
logger.debug("LDAP Exception: ",e);
|
||||
throw new NoLdapUserMatchingQueryException(username);
|
||||
} finally {
|
||||
closeContext(context);
|
||||
|
|
@ -476,7 +481,7 @@ public class LdapManagerImpl extends ComponentLifecycleBase implements LdapManag
|
|||
private void clearOldAccountMapping(LinkAccountToLdapCmd cmd) {
|
||||
// first find if exists log warning and update
|
||||
LdapTrustMapVO oldVo = _ldapTrustMapDao.findGroupInDomain(cmd.getDomainId(), cmd.getLdapDomain());
|
||||
if(oldVo != null) {
|
||||
if (oldVo != null) {
|
||||
// deal with edge cases, i.e. check if the old account is indeed deleted etc.
|
||||
if (oldVo.getAccountId() != 0l) {
|
||||
AccountVO oldAcount = accountDao.findByIdIncludingRemoved(oldVo.getAccountId());
|
||||
|
|
|
|||
2
pom.xml
2
pom.xml
|
|
@ -50,7 +50,7 @@
|
|||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
|
||||
<project.systemvm.template.location>https://download.cloudstack.org/systemvm</project.systemvm.template.location>
|
||||
<project.systemvm.template.version>4.20.1.0</project.systemvm.template.version>
|
||||
<project.systemvm.template.version>4.20.2.0</project.systemvm.template.version>
|
||||
<sonar.organization>apache</sonar.organization>
|
||||
<sonar.host.url>https://sonarcloud.io</sonar.host.url>
|
||||
|
||||
|
|
|
|||
|
|
@ -89,6 +89,10 @@ import com.cloud.utils.Pair;
|
|||
import com.cloud.utils.component.ManagerBase;
|
||||
import com.cloud.utils.concurrency.NamedThreadFactory;
|
||||
import com.cloud.utils.db.SearchCriteria;
|
||||
import com.cloud.utils.db.Transaction;
|
||||
import com.cloud.utils.db.TransactionCallbackNoReturn;
|
||||
import com.cloud.utils.db.TransactionStatus;
|
||||
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public class AlertManagerImpl extends ManagerBase implements AlertManager, Configurable {
|
||||
|
|
@ -290,8 +294,13 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
|
|||
Math.min(CapacityManager.CapacityCalculateWorkers.value(), hostIds.size())));
|
||||
for (Long hostId : hostIds) {
|
||||
futures.put(hostId, executorService.submit(() -> {
|
||||
final HostVO host = hostDao.findById(hostId);
|
||||
_capacityMgr.updateCapacityForHost(host);
|
||||
Transaction.execute(new TransactionCallbackNoReturn() {
|
||||
@Override
|
||||
public void doInTransactionWithoutResult(TransactionStatus status) {
|
||||
final HostVO host = hostDao.findById(hostId);
|
||||
_capacityMgr.updateCapacityForHost(host);
|
||||
}
|
||||
});
|
||||
return null;
|
||||
}));
|
||||
}
|
||||
|
|
@ -316,13 +325,18 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi
|
|||
Math.min(CapacityManager.CapacityCalculateWorkers.value(), storagePoolIds.size())));
|
||||
for (Long poolId: storagePoolIds) {
|
||||
futures.put(poolId, executorService.submit(() -> {
|
||||
final StoragePoolVO pool = _storagePoolDao.findById(poolId);
|
||||
long disk = _capacityMgr.getAllocatedPoolCapacity(pool, null);
|
||||
if (pool.isShared()) {
|
||||
_storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, disk);
|
||||
} else {
|
||||
_storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, disk);
|
||||
}
|
||||
Transaction.execute(new TransactionCallbackNoReturn() {
|
||||
@Override
|
||||
public void doInTransactionWithoutResult(TransactionStatus status) {
|
||||
final StoragePoolVO pool = _storagePoolDao.findById(poolId);
|
||||
long disk = _capacityMgr.getAllocatedPoolCapacity(pool, null);
|
||||
if (pool.isShared()) {
|
||||
_storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, disk);
|
||||
} else {
|
||||
_storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, disk);
|
||||
}
|
||||
}
|
||||
});
|
||||
return null;
|
||||
}));
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -476,6 +476,7 @@ public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation<Templa
|
|||
isoResponse.setExtractable(iso.isExtractable() && !(iso.getTemplateType() == TemplateType.PERHOST));
|
||||
isoResponse.setCreated(iso.getCreatedOnStore());
|
||||
isoResponse.setDynamicallyScalable(iso.isDynamicallyScalable());
|
||||
isoResponse.setFormat(iso.getFormat());
|
||||
if (iso.getTemplateType() == TemplateType.PERHOST) {
|
||||
// for TemplateManager.XS_TOOLS_ISO and TemplateManager.VMWARE_TOOLS_ISO, we didn't download, but is ready to use.
|
||||
isoResponse.setReady(true);
|
||||
|
|
@ -574,10 +575,14 @@ public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation<Templa
|
|||
isoResponse.setZoneName(iso.getDataCenterName());
|
||||
}
|
||||
|
||||
Long isoSize = iso.getSize();
|
||||
long isoSize = iso.getSize();
|
||||
if (isoSize > 0) {
|
||||
isoResponse.setSize(isoSize);
|
||||
}
|
||||
long isoPhysicalSize = iso.getPhysicalSize();
|
||||
if (isoPhysicalSize > 0) {
|
||||
isoResponse.setPhysicalSize(isoPhysicalSize);
|
||||
}
|
||||
|
||||
if (iso.getUserDataId() != null) {
|
||||
isoResponse.setUserDataId(iso.getUserDataUUid());
|
||||
|
|
|
|||
|
|
@ -50,6 +50,8 @@ import java.util.stream.Collectors;
|
|||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
|
||||
import com.cloud.network.as.AutoScaleManager;
|
||||
import com.cloud.user.AccountManagerImpl;
|
||||
import org.apache.cloudstack.acl.RoleType;
|
||||
import org.apache.cloudstack.acl.SecurityChecker;
|
||||
|
|
@ -582,6 +584,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
|
|||
configValuesForValidation.add(UserDataManager.VM_USERDATA_MAX_LENGTH_STRING);
|
||||
configValuesForValidation.add(UnmanagedVMsManager.RemoteKvmInstanceDisksCopyTimeout.key());
|
||||
configValuesForValidation.add(UnmanagedVMsManager.ConvertVmwareInstanceToKvmTimeout.key());
|
||||
configValuesForValidation.add(AutoScaleManager.AutoScaleErroredInstanceThreshold.key());
|
||||
}
|
||||
|
||||
protected void weightBasedParametersForValidation() {
|
||||
|
|
|
|||
|
|
@ -1276,6 +1276,10 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy
|
|||
buf.append(" vmpassword=").append(configurationDao.getValue("system.vm.password"));
|
||||
}
|
||||
|
||||
if (StringUtils.isNotEmpty(NTPServerConfig.value())) {
|
||||
buf.append(" ntpserverlist=").append(NTPServerConfig.value().replaceAll("\\s+",""));
|
||||
}
|
||||
|
||||
for (NicProfile nic : profile.getNics()) {
|
||||
int deviceId = nic.getDeviceId();
|
||||
if (nic.getIPv4Address() == null) {
|
||||
|
|
@ -1506,7 +1510,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy
|
|||
public Long[] getScannablePools() {
|
||||
List<Long> zoneIds = dataCenterDao.listEnabledNonEdgeZoneIds();
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(String.format("Enabled non-edge zones available for scan: %s", org.apache.commons.lang3.StringUtils.join(zoneIds, ",")));
|
||||
logger.debug(String.format("Enabled non-edge zones available for scan: %s", StringUtils.join(zoneIds, ",")));
|
||||
}
|
||||
return zoneIds.toArray(Long[]::new);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -796,7 +796,7 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel, Confi
|
|||
NetworkVO ret_network = null;
|
||||
for (NetworkVO nw : networks) {
|
||||
try {
|
||||
checkAccountNetworkPermissions(account, nw);
|
||||
checkNetworkPermissions(account, nw);
|
||||
} catch (PermissionDeniedException e) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5658,7 +5658,6 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService, C
|
|||
}
|
||||
|
||||
addProviderToPhysicalNetwork(physicalNetworkId, Provider.Nsx.getName(), null, null);
|
||||
enableProvider(Provider.Nsx.getName());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,6 +45,12 @@ public interface AutoScaleManager extends AutoScaleService {
|
|||
"If true, the auto scale vm group name will be used as a prefix for the auto scale vm hostnames.",
|
||||
true);
|
||||
|
||||
ConfigKey<Integer> AutoScaleErroredInstanceThreshold = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Integer.class,
|
||||
"autoscale.errored.instance.threshold",
|
||||
"10",
|
||||
"The number of Error Instances allowed in autoscale vm groups for scale up.",
|
||||
true);
|
||||
|
||||
void checkAutoScaleUser(Long autoscaleUserId, long accountId);
|
||||
|
||||
boolean deleteAutoScaleVmGroupsByAccount(Account account);
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue