Merge branch '4.20' of https://github.com/apache/cloudstack into ghi11438-errorprone-fixes

This commit is contained in:
Pearl Dsilva 2026-01-29 19:24:53 -05:00
commit 7e5e80ea06
179 changed files with 4306 additions and 1768 deletions

View File

@ -331,7 +331,7 @@ public class ConsoleProxyResource extends ServerResourceBase implements ServerRe
final Object resource = this;
logger.info("Building class loader for com.cloud.consoleproxy.ConsoleProxy");
if (consoleProxyMain == null) {
logger.info("Running com.cloud.consoleproxy.ConsoleProxy with encryptor password={}", encryptorPassword);
logger.info("Running com.cloud.consoleproxy.ConsoleProxy");
consoleProxyMain = new Thread(new ManagedContextRunnable() {
@Override
protected void runInContext() {

View File

@ -107,6 +107,10 @@ public interface NetworkService {
PhysicalNetwork physicalNetwork, long zoneId, ControlledEntity.ACLType aclType) throws
InsufficientCapacityException, ConcurrentOperationException, ResourceAllocationException;
Network createGuestNetwork(long networkOfferingId, String name, String displayText, Account owner,
PhysicalNetwork physicalNetwork, long zoneId, ControlledEntity.ACLType aclType, Pair<Integer, Integer> vrIfaceMTUs) throws
InsufficientCapacityException, ConcurrentOperationException, ResourceAllocationException;
Pair<List<? extends Network>, Integer> searchForNetworks(ListNetworksCmd cmd);
boolean deleteNetwork(long networkId, boolean forced);

View File

@ -71,8 +71,8 @@ public interface AlertService {
public static final AlertType ALERT_TYPE_HA_ACTION = new AlertType((short)30, "ALERT.HA.ACTION", true);
public static final AlertType ALERT_TYPE_CA_CERT = new AlertType((short)31, "ALERT.CA.CERT", true);
public static final AlertType ALERT_TYPE_VM_SNAPSHOT = new AlertType((short)32, "ALERT.VM.SNAPSHOT", true);
public static final AlertType ALERT_TYPE_VR_PUBLIC_IFACE_MTU = new AlertType((short)32, "ALERT.VR.PUBLIC.IFACE.MTU", true);
public static final AlertType ALERT_TYPE_VR_PRIVATE_IFACE_MTU = new AlertType((short)32, "ALERT.VR.PRIVATE.IFACE.MTU", true);
public static final AlertType ALERT_TYPE_VR_PUBLIC_IFACE_MTU = new AlertType((short)33, "ALERT.VR.PUBLIC.IFACE.MTU", true);
public static final AlertType ALERT_TYPE_VR_PRIVATE_IFACE_MTU = new AlertType((short)34, "ALERT.VR.PRIVATE.IFACE.MTU", true);
public short getType() {
return type;

View File

@ -1097,6 +1097,7 @@ public class ApiConstants {
public static final String DOCKER_REGISTRY_EMAIL = "dockerregistryemail";
public static final String ISO_NAME = "isoname";
public static final String ISO_STATE = "isostate";
public static final String ISO_URL = "isourl";
public static final String SEMANTIC_VERSION = "semanticversion";
public static final String KUBERNETES_VERSION_ID = "kubernetesversionid";
public static final String KUBERNETES_VERSION_NAME = "kubernetesversionname";

View File

@ -42,7 +42,7 @@ public abstract class BaseUpdateTemplateOrIsoCmd extends BaseCmd {
@Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = TemplateResponse.class, required = true, description = "The ID of the image file")
private Long id;
@Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "The name of the image file")
@Parameter(name = ApiConstants.NAME, type = CommandType.STRING, length = 251, description = "The name of the image file")
private String templateName;
@Parameter(name = ApiConstants.OS_TYPE_ID,

View File

@ -29,6 +29,11 @@ import org.apache.cloudstack.api.response.ZoneResponse;
import com.cloud.exception.DiscoveryException;
import com.cloud.storage.ImageStore;
import com.cloud.user.Account;
import org.apache.commons.collections.MapUtils;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
@APICommand(name = "addSecondaryStorage", description = "Adds secondary storage.", responseObject = ImageStoreResponse.class,
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false)
@ -44,6 +49,9 @@ public class AddSecondaryStorageCmd extends BaseCmd {
@Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, description = "The Zone ID for the secondary storage")
protected Long zoneId;
@Parameter(name = ApiConstants.DETAILS, type = CommandType.MAP, description = "Details in key/value pairs using format details[i].keyname=keyvalue. Example: details[0].copytemplatesfromothersecondarystorages=true")
protected Map details;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -56,6 +64,20 @@ public class AddSecondaryStorageCmd extends BaseCmd {
return zoneId;
}
public Map<String, String> getDetails() {
Map<String, String> detailsMap = new HashMap<>();
if (MapUtils.isNotEmpty(details)) {
Collection<?> props = details.values();
for (Object prop : props) {
HashMap<String, String> detail = (HashMap<String, String>) prop;
for (Map.Entry<String, String> entry: detail.entrySet()) {
detailsMap.put(entry.getKey(),entry.getValue());
}
}
}
return detailsMap;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@ -68,7 +90,7 @@ public class AddSecondaryStorageCmd extends BaseCmd {
@Override
public void execute(){
try{
ImageStore result = _storageService.discoverImageStore(null, getUrl(), "NFS", getZoneId(), null);
ImageStore result = _storageService.discoverImageStore(null, getUrl(), "NFS", getZoneId(), getDetails());
ImageStoreResponse storeResponse = null;
if (result != null ) {
storeResponse = _responseGenerator.createImageStoreResponse(result);

View File

@ -109,6 +109,9 @@ public class ListHostsCmd extends BaseListCmd {
@Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, description = "CPU Arch of the host", since = "4.20.1")
private String arch;
@Parameter(name = ApiConstants.VERSION, type = CommandType.STRING, description = "the host version", since = "4.20.3")
private String version;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -197,6 +200,10 @@ public class ListHostsCmd extends BaseListCmd {
return StringUtils.isBlank(arch) ? null : CPU.CPUArch.fromType(arch);
}
public String getVersion() {
return version;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -45,6 +45,10 @@ public class ListMgmtsCmd extends BaseListCmd {
since = "4.20.1.0")
private Boolean peers;
@Parameter(name = ApiConstants.VERSION, type = CommandType.STRING,
description = "the version of the management server", since = "4.20.3")
private String version;
/////////////////////////////////////////////////////
/////////////////// Accessors ///////////////////////
/////////////////////////////////////////////////////
@ -61,6 +65,10 @@ public class ListMgmtsCmd extends BaseListCmd {
return BooleanUtils.toBooleanDefaultIfNull(peers, false);
}
public String getVersion() {
return version;
}
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////

View File

@ -78,6 +78,7 @@ public class UpdateNetworkOfferingCmd extends BaseCmd {
@Parameter(name = ApiConstants.DOMAIN_ID,
type = CommandType.STRING,
length = 4096,
description = "The ID of the containing domain(s) as comma separated string, public for public offerings")
private String domainIds;

View File

@ -75,6 +75,7 @@ public class UpdateDiskOfferingCmd extends BaseCmd {
@Parameter(name = ApiConstants.ZONE_ID,
type = CommandType.STRING,
description = "The ID of the containing zone(s) as comma separated string, all for all zones offerings",
length = 4096,
since = "4.13")
private String zoneIds;

View File

@ -69,6 +69,7 @@ public class UpdateServiceOfferingCmd extends BaseCmd {
@Parameter(name = ApiConstants.ZONE_ID,
type = CommandType.STRING,
description = "The ID of the containing zone(s) as comma separated string, all for all zones offerings",
length = 4096,
since = "4.13")
private String zoneIds;

View File

@ -65,6 +65,7 @@ public class UpdateVPCOfferingCmd extends BaseAsyncCmd {
@Parameter(name = ApiConstants.ZONE_ID,
type = CommandType.STRING,
description = "The ID of the containing zone(s) as comma separated string, all for all zones offerings",
length = 4096,
since = "4.13")
private String zoneIds;

View File

@ -70,7 +70,7 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd {
@Parameter(name = ApiConstants.IS_EXTRACTABLE, type = CommandType.BOOLEAN, description = "True if the ISO or its derivatives are extractable; default is false")
private Boolean extractable;
@Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, description = "The name of the ISO")
@Parameter(name = ApiConstants.NAME, type = CommandType.STRING, required = true, length = 251, description = "The name of the ISO")
private String isoName;
@Parameter(name = ApiConstants.OS_TYPE_ID,

View File

@ -244,8 +244,7 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd {
}
private Snapshot.LocationType getLocationType() {
if (Snapshot.LocationType.values() == null || Snapshot.LocationType.values().length == 0 || locationType == null) {
if (locationType == null) {
return null;
}

View File

@ -43,7 +43,7 @@ import java.util.List;
public class DeployVnfApplianceCmd extends DeployVMCmd implements UserCmd {
@Parameter(name = ApiConstants.VNF_CONFIGURE_MANAGEMENT, type = CommandType.BOOLEAN, required = false,
description = "True by default, security group or network rules (source nat and firewall rules) will be configured for VNF management interfaces. False otherwise. " +
description = "False by default, security group or network rules (source nat and firewall rules) will be configured for VNF management interfaces. True otherwise. " +
"Network rules are configured if management network is an isolated network or shared network with security groups.")
private Boolean vnfConfigureManagement;

View File

@ -118,7 +118,7 @@ public interface QueryService {
ConfigKey<String> UserVMReadOnlyDetails = new ConfigKey<>(String.class,
"user.vm.readonly.details", "Advanced", "dataDiskController, rootDiskController",
"List of read-only VM settings/details as comma separated string", true, ConfigKey.Scope.Global, null, null, null, null, null, ConfigKey.Kind.CSV, null);
"List of read-only VM settings/details as comma separated string", true, ConfigKey.Scope.Global, null, null, null, null, null, ConfigKey.Kind.CSV, null, "");
ConfigKey<Boolean> SortKeyAscending = new ConfigKey<>("Advanced", Boolean.class, "sortkey.algorithm", "true",
"Sort algorithm - ascending or descending - to use. For entities that use sort key(template, disk offering, service offering, " +

View File

@ -29,6 +29,7 @@ import org.apache.cloudstack.api.command.user.template.UpdateVnfTemplateCmd;
import org.apache.cloudstack.api.command.user.vm.DeployVnfApplianceCmd;
import org.apache.cloudstack.framework.config.ConfigKey;
import java.util.List;
import java.util.Map;
public interface VnfTemplateManager {
@ -42,11 +43,12 @@ public interface VnfTemplateManager {
void updateVnfTemplate(long templateId, UpdateVnfTemplateCmd cmd);
void validateVnfApplianceNics(VirtualMachineTemplate template, List<Long> networkIds);
void validateVnfApplianceNics(VirtualMachineTemplate template, List<Long> networkIds, Map<Integer, Long> vmNetworkMap);
SecurityGroup createSecurityGroupForVnfAppliance(DataCenter zone, VirtualMachineTemplate template, Account owner, DeployVnfApplianceCmd cmd);
void createIsolatedNetworkRulesForVnfAppliance(DataCenter zone, VirtualMachineTemplate template, Account owner,
UserVm vm, DeployVnfApplianceCmd cmd)
throws InsufficientAddressCapacityException, ResourceAllocationException, ResourceUnavailableException;
}

View File

@ -16,6 +16,7 @@
// under the License.
package org.apache.cloudstack.storage.template;
import com.cloud.agent.api.to.deployasis.OVFNetworkTO;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.network.VNF;
import com.cloud.storage.Storage;
@ -124,6 +125,9 @@ public class VnfTemplateUtils {
public static void validateApiCommandParams(BaseCmd cmd, VirtualMachineTemplate template) {
if (cmd instanceof RegisterVnfTemplateCmd) {
RegisterVnfTemplateCmd registerCmd = (RegisterVnfTemplateCmd) cmd;
if (registerCmd.isDeployAsIs() && CollectionUtils.isNotEmpty(registerCmd.getVnfNics())) {
throw new InvalidParameterValueException("VNF nics cannot be specified when register a deploy-as-is Template. Please wait until Template settings are read from OVA.");
}
validateApiCommandParams(registerCmd.getVnfDetails(), registerCmd.getVnfNics(), registerCmd.getTemplateType());
} else if (cmd instanceof UpdateVnfTemplateCmd) {
UpdateVnfTemplateCmd updateCmd = (UpdateVnfTemplateCmd) cmd;
@ -149,4 +153,18 @@ public class VnfTemplateUtils {
}
}
}
public static void validateDeployAsIsTemplateVnfNics(List<OVFNetworkTO> ovfNetworks, List<VNF.VnfNic> vnfNics) {
if (CollectionUtils.isEmpty(vnfNics)) {
return;
}
if (CollectionUtils.isEmpty(ovfNetworks)) {
throw new InvalidParameterValueException("The list of networks read from OVA is empty. Please wait until the template is fully downloaded and processed.");
}
for (VNF.VnfNic vnfNic : vnfNics) {
if (vnfNic.getDeviceId() < ovfNetworks.size() && !vnfNic.isRequired()) {
throw new InvalidParameterValueException(String.format("The VNF nic [device ID: %s ] is required as it is defined in the OVA template.", vnfNic.getDeviceId()));
}
}
}
}

View File

@ -22,7 +22,6 @@ import java.util.concurrent.Future;
import org.apache.cloudstack.api.response.MigrationResponse;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.TemplateApiResult;
import org.apache.cloudstack.storage.ImageStoreService.MigrationPolicy;
@ -31,5 +30,5 @@ public interface StorageOrchestrationService {
MigrationResponse migrateResources(Long srcImgStoreId, Long destImgStoreId, List<Long> templateIdList, List<Long> snapshotIdList);
Future<TemplateApiResult> orchestrateTemplateCopyToImageStore(TemplateInfo source, DataStore destStore);
Future<TemplateApiResult> orchestrateTemplateCopyFromSecondaryStores(long templateId, DataStore destStore);
}

View File

@ -30,6 +30,8 @@ public interface SnapshotDataFactory {
SnapshotInfo getSnapshot(long snapshotId, long storeId, DataStoreRole role);
SnapshotInfo getSnapshotIncludingRemoved(long snapshotId, long storeId, DataStoreRole role);
SnapshotInfo getSnapshotWithRoleAndZone(long snapshotId, DataStoreRole role, long zoneId);
SnapshotInfo getSnapshotOnPrimaryStore(long snapshotId);

View File

@ -80,4 +80,6 @@ public interface TemplateService {
List<DatadiskTO> getTemplateDatadisksOnImageStore(TemplateInfo templateInfo, String configurationId);
AsyncCallFuture<TemplateApiResult> copyTemplateToImageStore(DataObject source, DataStore destStore);
}
void handleTemplateCopyFromSecondaryStores(long templateId, DataStore destStore);
}

View File

@ -54,5 +54,4 @@ public interface AlertManager extends Manager, AlertService {
void recalculateCapacity();
void sendAlert(AlertType alertType, long dataCenterId, Long podId, String subject, String body);
}

View File

@ -51,8 +51,8 @@ public interface ResourceManager extends ResourceService, Configurable {
ConfigKey<Boolean> KvmSshToAgentEnabled = new ConfigKey<>("Advanced", Boolean.class,
"kvm.ssh.to.agent","true",
"Number of retries when preparing a host into Maintenance Mode is faulty before failing",
false);
"True if the management server will restart the agent service via SSH into the KVM hosts after or during maintenance operations",
true);
ConfigKey<String> HOST_MAINTENANCE_LOCAL_STRATEGY = new ConfigKey<>(String.class,
"host.maintenance.local.storage.strategy", "Advanced","Error",

View File

@ -220,8 +220,9 @@ public interface StorageManager extends StorageService {
"storage.pool.host.connect.workers", "1",
"Number of worker threads to be used to connect hosts to a primary storage", true);
ConfigKey<Boolean> COPY_PUBLIC_TEMPLATES_FROM_OTHER_STORAGES = new ConfigKey<>(Boolean.class, "copy.public.templates.from.other.storages",
"Storage", "true", "Allow SSVMs to try copying public templates from one secondary storage to another instead of downloading them from the source.",
ConfigKey<Boolean> COPY_TEMPLATES_FROM_OTHER_SECONDARY_STORAGES = new ConfigKey<>(Boolean.class, "copy.templates.from.other.secondary.storages",
"Storage", "true", "When enabled, this feature allows templates to be copied from existing Secondary Storage servers (within the same zone or across zones) " +
"while adding a new Secondary Storage. If the copy operation fails, the system falls back to downloading the template from the source URL.",
true, ConfigKey.Scope.Zone, null);
/**

View File

@ -152,6 +152,8 @@ public interface TemplateManager {
TemplateType validateTemplateType(BaseCmd cmd, boolean isAdmin, boolean isCrossZones);
DataStore verifyHeuristicRulesForZone(VMTemplateVO template, Long zoneId);
List<DatadiskTO> getTemplateDisksOnImageStore(VirtualMachineTemplate template, DataStoreRole role, String configurationId);
static Boolean getValidateUrlIsResolvableBeforeRegisteringTemplateValue() {

View File

@ -89,7 +89,7 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
return;
}
for (Long vmId : vmIds) {
if (!notUpdated.containsKey(vmId)) {
if (MapUtils.isEmpty(notUpdated) || !notUpdated.containsKey(vmId)) {
logger.debug("VM state report is updated. {}, {}, power state: {}",
() -> hostCache.get(hostId), () -> vmCache.get(vmId), () -> instancePowerStates.get(vmId));
_messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE,
@ -158,8 +158,8 @@ public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStat
// an update might have occurred that we should not override in case of out of band migration
instancePowerStates.put(instance.getId(), VirtualMachine.PowerState.PowerReportMissing);
} else {
logger.debug("vm id: {} - time since last state update({} ms) has not passed graceful period yet",
instance.getId(), milliSecondsSinceLastStateUpdate);
logger.debug("vm id: {} - time since last state update({} ms) has not passed graceful period ({} ms) yet",
instance.getId(), milliSecondsSinceLastStateUpdate, milliSecondsGracefulPeriod);
}
}
updateAndPublishVmPowerStates(hostId, instancePowerStates, startTime);

View File

@ -3559,8 +3559,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
final HashMap<Long, Long> stillFree = new HashMap<Long, Long>();
final List<Long> networkIds = _networksDao.findNetworksToGarbageCollect();
final int netGcWait = NumbersUtil.parseInt(_configDao.getValue(NetworkGcWait.key()), 60);
logger.info("NetworkGarbageCollector uses '{}' seconds for GC interval.", netGcWait);
final int netGcWait = NetworkGcWait.value();
final int netGcInterval = NetworkGcInterval.value();
logger.info("NetworkGarbageCollector uses '{}' seconds for GC wait and '{}' seconds for GC interval.", netGcWait, netGcInterval);
for (final Long networkId : networkIds) {
if (!_networkModel.isNetworkReadyForGc(networkId)) {
@ -4882,9 +4883,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra
}
public static final ConfigKey<Integer> NetworkGcWait = new ConfigKey<Integer>(Integer.class, "network.gc.wait", "Advanced", "600",
"Time (in seconds) to wait before shutting down a network that's not in used", false, Scope.Global, null);
"Time (in seconds) to wait before shutting down a network that's not in used", true, Scope.Global, null);
public static final ConfigKey<Integer> NetworkGcInterval = new ConfigKey<Integer>(Integer.class, "network.gc.interval", "Advanced", "600",
"Seconds to wait before checking for networks to shutdown", true, Scope.Global, null);
"Seconds to wait before checking for networks to shutdown", false, Scope.Global, null);
@Override
public ConfigKey<?>[] getConfigKeys() {

View File

@ -36,6 +36,9 @@ import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.template.TemplateManager;
import org.apache.cloudstack.api.response.MigrationResponse;
import org.apache.cloudstack.engine.orchestration.service.StorageOrchestrationService;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
@ -45,6 +48,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageServic
import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageService.DataObjectResult;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.TemplateApiResult;
@ -103,6 +107,15 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
VolumeDataStoreDao volumeDataStoreDao;
@Inject
DataMigrationUtility migrationHelper;
@Inject
TemplateManager templateManager;
@Inject
VMTemplateDao templateDao;
@Inject
TemplateDataFactory templateDataFactory;
@Inject
DataCenterDao dcDao;
ConfigKey<Double> ImageStoreImbalanceThreshold = new ConfigKey<>("Advanced", Double.class,
"image.store.imbalance.threshold",
@ -304,8 +317,9 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
}
@Override
public Future<TemplateApiResult> orchestrateTemplateCopyToImageStore(TemplateInfo source, DataStore destStore) {
return submit(destStore.getScope().getScopeId(), new CopyTemplateTask(source, destStore));
public Future<TemplateApiResult> orchestrateTemplateCopyFromSecondaryStores(long srcTemplateId, DataStore destStore) {
Long dstZoneId = destStore.getScope().getScopeId();
return submit(dstZoneId, new CopyTemplateFromSecondaryStorageTask(srcTemplateId, destStore));
}
protected Pair<String, Boolean> migrateCompleted(Long destDatastoreId, DataStore srcDatastore, List<DataObject> files, MigrationPolicy migrationPolicy, int skipped) {
@ -624,13 +638,13 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
}
}
private class CopyTemplateTask implements Callable<TemplateApiResult> {
private TemplateInfo sourceTmpl;
private DataStore destStore;
private String logid;
private class CopyTemplateFromSecondaryStorageTask implements Callable<TemplateApiResult> {
private final long srcTemplateId;
private final DataStore destStore;
private final String logid;
public CopyTemplateTask(TemplateInfo sourceTmpl, DataStore destStore) {
this.sourceTmpl = sourceTmpl;
CopyTemplateFromSecondaryStorageTask(long srcTemplateId, DataStore destStore) {
this.srcTemplateId = srcTemplateId;
this.destStore = destStore;
this.logid = ThreadContext.get(LOGCONTEXTID);
}
@ -639,17 +653,16 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra
public TemplateApiResult call() {
ThreadContext.put(LOGCONTEXTID, logid);
TemplateApiResult result;
AsyncCallFuture<TemplateApiResult> future = templateService.copyTemplateToImageStore(sourceTmpl, destStore);
long destZoneId = destStore.getScope().getScopeId();
TemplateInfo sourceTmpl = templateDataFactory.getTemplate(srcTemplateId, DataStoreRole.Image);
try {
result = future.get();
} catch (ExecutionException | InterruptedException e) {
logger.warn("Exception while copying template [{}] from image store [{}] to image store [{}]: {}",
sourceTmpl.getUniqueName(), sourceTmpl.getDataStore().getName(), destStore.getName(), e.toString());
templateService.handleTemplateCopyFromSecondaryStores(srcTemplateId, destStore);
result = new TemplateApiResult(sourceTmpl);
result.setResult(e.getMessage());
} finally {
tryCleaningUpExecutor(destZoneId);
ThreadContext.clearAll();
}
tryCleaningUpExecutor(destStore.getScope().getScopeId());
ThreadContext.clearAll();
return result;
}
}

View File

@ -137,7 +137,9 @@ public class PhysicalNetworkTrafficTypeDaoImpl extends GenericDaoBase<PhysicalNe
}
sc.setParameters("physicalNetworkId", physicalNetworkId);
sc.setParameters("trafficType", trafficType);
if (trafficType != null) {
sc.setParameters("trafficType", trafficType);
}
List<String> tag = customSearch(sc, null);
return tag.size() == 0 ? null : tag.get(0);

View File

@ -47,6 +47,8 @@ public interface SnapshotDao extends GenericDao<SnapshotVO, Long>, StateDao<Snap
List<SnapshotVO> listAllByStatus(Snapshot.State... status);
List<SnapshotVO> listAllByStatusIncludingRemoved(Snapshot.State... status);
void updateVolumeIds(long oldVolId, long newVolId);
List<SnapshotVO> listByStatusNotIn(long volumeId, Snapshot.State... status);

View File

@ -252,6 +252,13 @@ public class SnapshotDaoImpl extends GenericDaoBase<SnapshotVO, Long> implements
return listBy(sc, null);
}
@Override
public List<SnapshotVO> listAllByStatusIncludingRemoved(Snapshot.State... status) {
SearchCriteria<SnapshotVO> sc = StatusSearch.create();
sc.setParameters("status", (Object[])status);
return listIncludingRemovedBy(sc, null);
}
@Override
public List<SnapshotVO> listByIds(Object... ids) {
SearchCriteria<SnapshotVO> sc = snapshotIdsSearch.create();

View File

@ -48,7 +48,7 @@ public interface VolumeDao extends GenericDao<VolumeVO, Long>, StateDao<Volume.S
List<VolumeVO> findIncludingRemovedByInstanceAndType(long id, Volume.Type vType);
List<VolumeVO> findByInstanceIdAndPoolId(long instanceId, long poolId);
List<VolumeVO> findNonDestroyedVolumesByInstanceIdAndPoolId(long instanceId, long poolId);
List<VolumeVO> findByInstanceIdDestroyed(long vmId);
@ -70,11 +70,11 @@ public interface VolumeDao extends GenericDao<VolumeVO, Long>, StateDao<Volume.S
List<VolumeVO> findCreatedByInstance(long id);
List<VolumeVO> findByPoolId(long poolId);
List<VolumeVO> findNonDestroyedVolumesByPoolId(long poolId);
VolumeVO findByPoolIdName(long poolId, String name);
List<VolumeVO> findByPoolId(long poolId, Volume.Type volumeType);
List<VolumeVO> findNonDestroyedVolumesByPoolId(long poolId, Volume.Type volumeType);
List<VolumeVO> findByPoolIdAndState(long poolid, Volume.State state);

View File

@ -135,7 +135,7 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
}
@Override
public List<VolumeVO> findByPoolId(long poolId) {
public List<VolumeVO> findNonDestroyedVolumesByPoolId(long poolId) {
SearchCriteria<VolumeVO> sc = AllFieldsSearch.create();
sc.setParameters("poolId", poolId);
sc.setParameters("notDestroyed", Volume.State.Destroy, Volume.State.Expunged);
@ -144,7 +144,7 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
}
@Override
public List<VolumeVO> findByInstanceIdAndPoolId(long instanceId, long poolId) {
public List<VolumeVO> findNonDestroyedVolumesByInstanceIdAndPoolId(long instanceId, long poolId) {
SearchCriteria<VolumeVO> sc = AllFieldsSearch.create();
sc.setParameters("instanceId", instanceId);
sc.setParameters("poolId", poolId);
@ -161,7 +161,7 @@ public class VolumeDaoImpl extends GenericDaoBase<VolumeVO, Long> implements Vol
}
@Override
public List<VolumeVO> findByPoolId(long poolId, Volume.Type volumeType) {
public List<VolumeVO> findNonDestroyedVolumesByPoolId(long poolId, Volume.Type volumeType) {
SearchCriteria<VolumeVO> sc = AllFieldsSearch.create();
sc.setParameters("poolId", poolId);
sc.setParameters("notDestroyed", Volume.State.Destroy, Volume.State.Expunged);

View File

@ -33,6 +33,7 @@ import java.util.List;
import javax.inject.Inject;
import com.cloud.upgrade.dao.Upgrade42020to42030;
import com.cloud.utils.FileUtil;
import org.apache.cloudstack.utils.CloudStackVersion;
import org.apache.commons.lang3.StringUtils;
@ -236,6 +237,7 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
.next("4.19.0.0", new Upgrade41900to41910())
.next("4.19.1.0", new Upgrade41910to42000())
.next("4.20.0.0", new Upgrade42000to42010())
.next("4.20.2.0", new Upgrade42020to42030())
.build();
}

View File

@ -0,0 +1,64 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.upgrade.dao;
import java.io.InputStream;
import java.sql.Connection;
import com.cloud.utils.exception.CloudRuntimeException;
public class Upgrade42020to42030 extends DbUpgradeAbstractImpl implements DbUpgrade, DbUpgradeSystemVmTemplate {
@Override
public String[] getUpgradableVersionRange() {
return new String[]{"4.20.2.0", "4.20.3.0"};
}
@Override
public String getUpgradedVersion() {
return "4.20.3.0";
}
@Override
public boolean supportsRollingUpgrade() {
return false;
}
@Override
public InputStream[] getPrepareScripts() {
final String scriptFile = "META-INF/db/schema-42020to42030.sql";
final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile);
if (script == null) {
throw new CloudRuntimeException("Unable to find " + scriptFile);
}
return new InputStream[] {script};
}
@Override
public void performDataMigration(Connection conn) {
}
@Override
public InputStream[] getCleanupScripts() {
return null;
}
@Override
public void updateSystemVmTemplates(Connection conn) {
}
}

View File

@ -62,7 +62,7 @@ public interface UsageDao extends GenericDao<UsageVO, Long> {
void saveUsageRecords(List<UsageVO> usageRecords);
void removeOldUsageRecords(int days);
void expungeAllOlderThan(int days, long limitPerQuery);
UsageVO persistUsage(final UsageVO usage);

View File

@ -26,15 +26,16 @@ import com.cloud.utils.Pair;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.QueryBuilder;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.db.TransactionCallback;
import com.cloud.utils.db.TransactionCallbackNoReturn;
import com.cloud.utils.db.TransactionLegacy;
import com.cloud.utils.db.TransactionStatus;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.acl.RoleType;
import org.apache.commons.lang3.time.DateUtils;
import org.springframework.stereotype.Component;
import java.sql.PreparedStatement;
@ -51,8 +52,7 @@ import java.util.TimeZone;
public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements UsageDao {
private static final String DELETE_ALL = "DELETE FROM cloud_usage";
private static final String DELETE_ALL_BY_ACCOUNTID = "DELETE FROM cloud_usage WHERE account_id = ?";
private static final String DELETE_ALL_BY_INTERVAL = "DELETE FROM cloud_usage WHERE end_date < DATE_SUB(CURRENT_DATE(), INTERVAL ? DAY)";
private static final String INSERT_ACCOUNT = "INSERT INTO cloud_usage.account (id, account_name, type, role_id, domain_id, removed, cleanup_needed) VALUES (?,?,?,?,?,?,?)";
private static final String INSERT_ACCOUNT = "INSERT INTO cloud_usage.account (id, account_name, uuid, type, role_id, domain_id, removed, cleanup_needed) VALUES (?,?,?,?,?,?,?,?)";
private static final String INSERT_USER_STATS = "INSERT INTO cloud_usage.user_statistics (id, data_center_id, account_id, public_ip_address, device_id, device_type, network_id, net_bytes_received,"
+ " net_bytes_sent, current_bytes_received, current_bytes_sent, agg_bytes_received, agg_bytes_sent) VALUES (?,?,?,?,?,?,?,?,?,?, ?, ?, ?)";
@ -88,8 +88,12 @@ public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements Usage
private static final String UPDATE_BUCKET_STATS = "UPDATE cloud_usage.bucket_statistics SET size=? WHERE id=?";
protected SearchBuilder<UsageVO> endDateLessThanSearch;
public UsageDaoImpl() {
endDateLessThanSearch = createSearchBuilder();
endDateLessThanSearch.and("endDate", endDateLessThanSearch.entity().getEndDate(), SearchCriteria.Op.LT);
endDateLessThanSearch.done();
}
@Override
@ -129,25 +133,26 @@ public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements Usage
for (AccountVO acct : accounts) {
pstmt.setLong(1, acct.getId());
pstmt.setString(2, acct.getAccountName());
pstmt.setInt(3, acct.getType().ordinal());
pstmt.setString(3, acct.getUuid());
pstmt.setInt(4, acct.getType().ordinal());
//prevent autoboxing NPE by defaulting to User role
if(acct.getRoleId() == null){
pstmt.setLong(4, RoleType.User.getId());
pstmt.setLong(5, RoleType.User.getId());
}else{
pstmt.setLong(4, acct.getRoleId());
pstmt.setLong(5, acct.getRoleId());
}
pstmt.setLong(5, acct.getDomainId());
pstmt.setLong(6, acct.getDomainId());
Date removed = acct.getRemoved();
if (removed == null) {
pstmt.setString(6, null);
pstmt.setString(7, null);
} else {
pstmt.setString(6, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), acct.getRemoved()));
pstmt.setString(7, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), acct.getRemoved()));
}
pstmt.setBoolean(7, acct.getNeedsCleanup());
pstmt.setBoolean(8, acct.getNeedsCleanup());
pstmt.addBatch();
}
@ -538,21 +543,20 @@ public class UsageDaoImpl extends GenericDaoBase<UsageVO, Long> implements Usage
}
@Override
public void removeOldUsageRecords(int days) {
Transaction.execute(TransactionLegacy.USAGE_DB, new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(TransactionStatus status) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
PreparedStatement pstmt = null;
try {
pstmt = txn.prepareAutoCloseStatement(DELETE_ALL_BY_INTERVAL);
pstmt.setLong(1, days);
pstmt.executeUpdate();
} catch (Exception ex) {
logger.error("error removing old cloud_usage records for interval: " + days);
}
}
});
public void expungeAllOlderThan(int days, long limitPerQuery) {
SearchCriteria<UsageVO> sc = endDateLessThanSearch.create();
Date limit = DateUtils.addDays(new Date(), -days);
sc.setParameters("endDate", limit);
TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
logger.debug("Removing all cloud_usage records older than [{}].", limit);
int totalExpunged = batchExpunge(sc, limitPerQuery);
logger.info("Removed a total of [{}] cloud_usage records older than [{}].", totalExpunged, limit);
} finally {
txn.close();
}
}
public UsageVO persistUsage(final UsageVO usage) {

View File

@ -28,6 +28,8 @@ public interface UsageJobDao extends GenericDao<UsageJobVO, Long> {
UsageJobVO getLastJob();
UsageJobVO getNextRecurringJob();
UsageJobVO getNextImmediateJob();
long getLastJobSuccessDateMillis();

View File

@ -156,7 +156,8 @@ public class UsageJobDaoImpl extends GenericDaoBase<UsageJobVO, Long> implements
return jobs.get(0);
}
private UsageJobVO getNextRecurringJob() {
@Override
public UsageJobVO getNextRecurringJob() {
Filter filter = new Filter(UsageJobVO.class, "id", false, Long.valueOf(0), Long.valueOf(1));
SearchCriteria<UsageJobVO> sc = createSearchCriteria();
sc.addAnd("endMillis", SearchCriteria.Op.EQ, Long.valueOf(0));

View File

@ -56,6 +56,8 @@ StateDao<ObjectInDataStoreStateMachine.State, ObjectInDataStoreStateMachine.Even
List<SnapshotDataStoreVO> findBySnapshotId(long snapshotId);
List<SnapshotDataStoreVO> findBySnapshotIdWithNonDestroyedState(long snapshotId);
void duplicateCacheRecordsOnRegionStore(long storeId);
// delete the snapshot entry on primary data store to make sure that next snapshot will be full snapshot
@ -108,4 +110,18 @@ StateDao<ObjectInDataStoreStateMachine.State, ObjectInDataStoreStateMachine.Even
void updateDisplayForSnapshotStoreRole(long snapshotId, long storeId, DataStoreRole role, boolean display);
int expungeBySnapshotList(List<Long> snapshotIds, Long batchSize);
/**
* Returns the total physical size, in bytes, of all snapshots stored on primary
* storage for the specified account that have not yet been backed up to
* secondary storage.
*
* <p>If no such snapshots are found, this method returns {@code 0}.</p>
*
* @param accountId the ID of the account whose snapshots on primary storage
* should be considered
* @return the total physical size in bytes of matching snapshots on primary
* storage, or {@code 0} if none are found
*/
long getSnapshotsPhysicalSizeOnPrimaryStorageByAccountId(long accountId);
}

View File

@ -78,6 +78,15 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
" order by created %s " +
" limit 1";
private static final String GET_PHYSICAL_SIZE_OF_SNAPSHOTS_ON_PRIMARY_BY_ACCOUNT = "SELECT SUM(s.physical_size) " +
"FROM cloud.snapshot_store_ref s " +
"LEFT JOIN cloud.snapshots ON s.snapshot_id = snapshots.id " +
"WHERE snapshots.account_id = ? " +
"AND snapshots.removed IS NULL " +
"AND s.state = 'Ready' " +
"AND s.store_role = 'Primary' " +
"AND NOT EXISTS (SELECT 1 FROM cloud.snapshot_store_ref i WHERE i.snapshot_id = s.snapshot_id AND i.store_role = 'Image')";
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
@ -118,7 +127,6 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
stateSearch.and(STATE, stateSearch.entity().getState(), SearchCriteria.Op.IN);
stateSearch.done();
idStateNeqSearch = createSearchBuilder();
idStateNeqSearch.and(SNAPSHOT_ID, idStateNeqSearch.entity().getSnapshotId(), SearchCriteria.Op.EQ);
idStateNeqSearch.and(STATE, idStateNeqSearch.entity().getState(), SearchCriteria.Op.NEQ);
@ -340,6 +348,13 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
@Override
public List<SnapshotDataStoreVO> findBySnapshotId(long snapshotId) {
SearchCriteria<SnapshotDataStoreVO> sc = searchFilteringStoreIdEqStateEqStoreRoleEqIdEqUpdateCountEqSnapshotIdEqVolumeIdEq.create();
sc.setParameters(SNAPSHOT_ID, snapshotId);
return listBy(sc);
}
@Override
public List<SnapshotDataStoreVO> findBySnapshotIdWithNonDestroyedState(long snapshotId) {
SearchCriteria<SnapshotDataStoreVO> sc = idStateNeqSearch.create();
sc.setParameters(SNAPSHOT_ID, snapshotId);
sc.setParameters(STATE, State.Destroyed);
@ -571,4 +586,23 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
sc.setParameters("snapshotIds", snapshotIds.toArray());
return batchExpunge(sc, batchSize);
}
@Override
public long getSnapshotsPhysicalSizeOnPrimaryStorageByAccountId(long accountId) {
long snapshotsPhysicalSize = 0;
try (TransactionLegacy transactionLegacy = TransactionLegacy.currentTxn()) {
try (PreparedStatement preparedStatement = transactionLegacy.prepareStatement(GET_PHYSICAL_SIZE_OF_SNAPSHOTS_ON_PRIMARY_BY_ACCOUNT)) {
preparedStatement.setLong(1, accountId);
try (ResultSet resultSet = preparedStatement.executeQuery()) {
if (resultSet.next()) {
snapshotsPhysicalSize = resultSet.getLong(1);
}
}
}
} catch (SQLException e) {
logger.warn("Failed to get the snapshots physical size for the account [{}] due to [{}].", accountId, e.getMessage(), e);
}
return snapshotsPhysicalSize;
}
}

View File

@ -31,4 +31,6 @@ public interface VMScheduledJobDao extends GenericDao<VMScheduledJobVO, Long> {
int expungeJobsForSchedules(List<Long> scheduleId, Date dateAfter);
int expungeJobsBefore(Date currentTimestamp);
VMScheduledJobVO findByScheduleAndTimestamp(long scheduleId, Date scheduledTimestamp);
}

View File

@ -39,6 +39,8 @@ public class VMScheduledJobDaoImpl extends GenericDaoBase<VMScheduledJobVO, Long
private final SearchBuilder<VMScheduledJobVO> expungeJobForScheduleSearch;
private final SearchBuilder<VMScheduledJobVO> scheduleAndTimestampSearch;
static final String SCHEDULED_TIMESTAMP = "scheduled_timestamp";
static final String VM_SCHEDULE_ID = "vm_schedule_id";
@ -58,6 +60,11 @@ public class VMScheduledJobDaoImpl extends GenericDaoBase<VMScheduledJobVO, Long
expungeJobForScheduleSearch.and(VM_SCHEDULE_ID, expungeJobForScheduleSearch.entity().getVmScheduleId(), SearchCriteria.Op.IN);
expungeJobForScheduleSearch.and(SCHEDULED_TIMESTAMP, expungeJobForScheduleSearch.entity().getScheduledTime(), SearchCriteria.Op.GTEQ);
expungeJobForScheduleSearch.done();
scheduleAndTimestampSearch = createSearchBuilder();
scheduleAndTimestampSearch.and(VM_SCHEDULE_ID, scheduleAndTimestampSearch.entity().getVmScheduleId(), SearchCriteria.Op.EQ);
scheduleAndTimestampSearch.and(SCHEDULED_TIMESTAMP, scheduleAndTimestampSearch.entity().getScheduledTime(), SearchCriteria.Op.EQ);
scheduleAndTimestampSearch.done();
}
/**
@ -92,4 +99,12 @@ public class VMScheduledJobDaoImpl extends GenericDaoBase<VMScheduledJobVO, Long
sc.setParameters(SCHEDULED_TIMESTAMP, date);
return expunge(sc);
}
@Override
public VMScheduledJobVO findByScheduleAndTimestamp(long scheduleId, Date scheduledTimestamp) {
SearchCriteria<VMScheduledJobVO> sc = scheduleAndTimestampSearch.create();
sc.setParameters(VM_SCHEDULE_ID, scheduleId);
sc.setParameters(SCHEDULED_TIMESTAMP, scheduledTimestamp);
return findOneBy(sc);
}
}

View File

@ -0,0 +1,28 @@
-- Licensed to the Apache Software Foundation (ASF) under one
-- or more contributor license agreements. See the NOTICE file
-- distributed with this work for additional information
-- regarding copyright ownership. The ASF licenses this file
-- to you under the Apache License, Version 2.0 (the
-- "License"); you may not use this file except in compliance
-- with the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing,
-- software distributed under the License is distributed on an
-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- KIND, either express or implied. See the License for the
-- specific language governing permissions and limitations
-- under the License.
--;
-- Schema upgrade from 4.20.2.0 to 4.20.3.0
--;
ALTER TABLE `cloud`.`template_store_ref` MODIFY COLUMN `download_url` varchar(2048);
UPDATE `cloud`.`alert` SET type = 33 WHERE name = 'ALERT.VR.PUBLIC.IFACE.MTU';
UPDATE `cloud`.`alert` SET type = 34 WHERE name = 'ALERT.VR.PRIVATE.IFACE.MTU';
-- Update configuration 'kvm.ssh.to.agent' description and is_dynamic fields
UPDATE `cloud`.`configuration` SET description = 'True if the management server will restart the agent service via SSH into the KVM hosts after or during maintenance operations', is_dynamic = 1 WHERE name = 'kvm.ssh.to.agent';

View File

@ -231,9 +231,9 @@ public class ConfigDriveBuilder {
throw new CloudRuntimeException("Cannot create ISO for config drive using any know tool. Known paths [/usr/bin/genisoimage, /usr/bin/mkisofs, /usr/local/bin/mkisofs]");
}
if (!isoCreator.canExecute()) {
throw new CloudRuntimeException("Cannot create ISO for config drive using: " + isoCreator.getCanonicalPath());
throw new CloudRuntimeException("Cannot create ISO for config drive using: " + isoCreator.getAbsolutePath());
}
return isoCreator.getCanonicalPath();
return isoCreator.getAbsolutePath();
}
/**

View File

@ -435,7 +435,7 @@ public class ConfigDriveBuilderTest {
Mockito.verify(genIsoFileMock, Mockito.times(2)).exists();
Mockito.verify(genIsoFileMock).canExecute();
Mockito.verify(genIsoFileMock).getCanonicalPath();
Mockito.verify(genIsoFileMock).getAbsolutePath();
}
}
@ -475,11 +475,11 @@ public class ConfigDriveBuilderTest {
Mockito.verify(genIsoFileMock, Mockito.times(1)).exists();
Mockito.verify(genIsoFileMock, Mockito.times(0)).canExecute();
Mockito.verify(genIsoFileMock, Mockito.times(0)).getCanonicalPath();
Mockito.verify(genIsoFileMock, Mockito.times(0)).getAbsolutePath();
Mockito.verify(mkIsoProgramInLinuxFileMock, Mockito.times(2)).exists();
Mockito.verify(mkIsoProgramInLinuxFileMock, Mockito.times(1)).canExecute();
Mockito.verify(mkIsoProgramInLinuxFileMock, Mockito.times(1)).getCanonicalPath();
Mockito.verify(mkIsoProgramInLinuxFileMock, Mockito.times(1)).getAbsolutePath();
}
}
@ -509,15 +509,15 @@ public class ConfigDriveBuilderTest {
Mockito.verify(genIsoFileMock, Mockito.times(1)).exists();
Mockito.verify(genIsoFileMock, Mockito.times(0)).canExecute();
Mockito.verify(genIsoFileMock, Mockito.times(0)).getCanonicalPath();
Mockito.verify(genIsoFileMock, Mockito.times(0)).getAbsolutePath();
Mockito.verify(mkIsoProgramInLinuxFileMock, Mockito.times(1)).exists();
Mockito.verify(mkIsoProgramInLinuxFileMock, Mockito.times(0)).canExecute();
Mockito.verify(mkIsoProgramInLinuxFileMock, Mockito.times(0)).getCanonicalPath();
Mockito.verify(mkIsoProgramInLinuxFileMock, Mockito.times(0)).getAbsolutePath();
Mockito.verify(mkIsoProgramInMacOsFileMock, Mockito.times(1)).exists();
Mockito.verify(mkIsoProgramInMacOsFileMock, Mockito.times(1)).canExecute();
Mockito.verify(mkIsoProgramInMacOsFileMock, Mockito.times(1)).getCanonicalPath();
Mockito.verify(mkIsoProgramInMacOsFileMock, Mockito.times(1)).getAbsolutePath();
}
}

View File

@ -45,10 +45,12 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.secstorage.heuristics.HeuristicType;
import org.apache.cloudstack.storage.RemoteHostEndPoint;
import org.apache.cloudstack.storage.command.CopyCommand;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
import org.apache.cloudstack.storage.heuristics.HeuristicRuleHelper;
import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
import org.apache.logging.log4j.Logger;
@ -104,6 +106,9 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
@Inject
SnapshotDao snapshotDao;
@Inject
HeuristicRuleHelper heuristicRuleHelper;
@Override
public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
return StrategyPriority.DEFAULT;
@ -374,7 +379,13 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
}
// need to find a nfs or cifs image store, assuming that can't copy volume
// directly to s3
ImageStoreEntity imageStore = (ImageStoreEntity)dataStoreMgr.getImageStoreWithFreeCapacity(destScope.getScopeId());
Long zoneId = destScope.getScopeId();
ImageStoreEntity imageStore = (ImageStoreEntity) heuristicRuleHelper.getImageStoreIfThereIsHeuristicRule(zoneId, HeuristicType.VOLUME, destData);
if (imageStore == null) {
logger.debug("Secondary storage selector did not direct volume migration to a specific secondary storage; using secondary storage with the most free capacity.");
imageStore = (ImageStoreEntity) dataStoreMgr.getImageStoreWithFreeCapacity(zoneId);
}
if (imageStore == null || !imageStore.getProtocol().equalsIgnoreCase("nfs") && !imageStore.getProtocol().equalsIgnoreCase("cifs")) {
String errMsg = "can't find a nfs (or cifs) image store to satisfy the need for a staging store";
Answer answer = new Answer(null, false, errMsg);

View File

@ -280,7 +280,7 @@ public class SecondaryStorageServiceImpl implements SecondaryStorageService {
private void updateDataObject(DataObject srcData, DataObject destData) {
if (destData instanceof SnapshotInfo) {
SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findBySourceSnapshot(srcData.getId(), DataStoreRole.Image);
SnapshotDataStoreVO destSnapshotStore = snapshotStoreDao.findByStoreSnapshot(DataStoreRole.Image, srcData.getDataStore().getId(), srcData.getId());
SnapshotDataStoreVO destSnapshotStore = snapshotStoreDao.findByStoreSnapshot(DataStoreRole.Image, destData.getDataStore().getId(), destData.getId());
if (snapshotStore != null && destSnapshotStore != null) {
destSnapshotStore.setPhysicalSize(snapshotStore.getPhysicalSize());
destSnapshotStore.setCreated(snapshotStore.getCreated());

View File

@ -296,6 +296,9 @@ public class TemplateDataFactoryImpl implements TemplateDataFactory {
@Override
public boolean isTemplateMarkedForDirectDownload(long templateId) {
VMTemplateVO templateVO = imageDataDao.findById(templateId);
if (templateVO == null) {
throw new CloudRuntimeException(String.format("Template not found with ID: %s", templateId));
}
return templateVO.isDirectDownload();
}
}

View File

@ -31,6 +31,8 @@ import java.util.concurrent.ExecutionException;
import javax.inject.Inject;
import com.cloud.exception.StorageUnavailableException;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.orchestration.service.StorageOrchestrationService;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
@ -67,9 +69,11 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
import org.apache.cloudstack.storage.image.store.TemplateObject;
import org.apache.cloudstack.storage.to.TemplateObjectTO;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.ThreadContext;
import org.springframework.stereotype.Component;
import com.cloud.agent.api.Answer;
@ -290,21 +294,41 @@ public class TemplateServiceImpl implements TemplateService {
}
}
protected boolean isSkipTemplateStoreDownload(VMTemplateVO template, Long zoneId) {
protected boolean shouldDownloadTemplateToStore(VMTemplateVO template, DataStore store) {
Long zoneId = store.getScope().getScopeId();
DataStore directedStore = _tmpltMgr.verifyHeuristicRulesForZone(template, zoneId);
if (directedStore != null && store.getId() != directedStore.getId()) {
logger.info("Template [{}] will not be download to image store [{}], as a heuristic rule is directing it to another store.",
template.getUniqueName(), store.getName());
return false;
}
if (template.isPublicTemplate()) {
return false;
logger.debug("Download of template [{}] to image store [{}] cannot be skipped, as it is public.", template.getUniqueName(),
store.getName());
return true;
}
if (template.isFeatured()) {
return false;
logger.debug("Download of template [{}] to image store [{}] cannot be skipped, as it is featured.", template.getUniqueName(),
store.getName());
return true;
}
if (TemplateType.SYSTEM.equals(template.getTemplateType())) {
return false;
logger.debug("Download of template [{}] to image store [{}] cannot be skipped, as it is a system VM template.",
template.getUniqueName(),store.getName());
return true;
}
if (zoneId != null && _vmTemplateStoreDao.findByTemplateZone(template.getId(), zoneId, DataStoreRole.Image) == null) {
logger.debug("Template {} is not present on any image store for the zone ID: {}, its download cannot be skipped", template, zoneId);
return false;
logger.debug("Download of template [{}] to image store [{}] cannot be skipped, as it is not present on any image store of zone [{}].",
template.getUniqueName(), store.getName(), zoneId);
return true;
}
return true;
logger.info("Skipping download of template [{}] to image store [{}].", template.getUniqueName(), store.getName());
return false;
}
@Override
@ -531,8 +555,7 @@ public class TemplateServiceImpl implements TemplateService {
// download.
for (VMTemplateVO tmplt : toBeDownloaded) {
// if this is private template, skip sync to a new image store
if (isSkipTemplateStoreDownload(tmplt, zoneId)) {
logger.info("Skip sync downloading private template {} to a new image store", tmplt);
if (!shouldDownloadTemplateToStore(tmplt, store)) {
continue;
}
@ -548,10 +571,7 @@ public class TemplateServiceImpl implements TemplateService {
}
if (availHypers.contains(tmplt.getHypervisorType())) {
boolean copied = isCopyFromOtherStoragesEnabled(zoneId) && tryCopyingTemplateToImageStore(tmplt, store);
if (!copied) {
tryDownloadingTemplateToImageStore(tmplt, store);
}
storageOrchestrator.orchestrateTemplateCopyFromSecondaryStores(tmplt.getId(), store);
} else {
logger.info("Skip downloading template {} since current data center does not have hypervisor {}", tmplt, tmplt.getHypervisorType());
}
@ -598,6 +618,16 @@ public class TemplateServiceImpl implements TemplateService {
}
@Override
public void handleTemplateCopyFromSecondaryStores(long templateId, DataStore destStore) {
VMTemplateVO template = _templateDao.findById(templateId);
long zoneId = destStore.getScope().getScopeId();
boolean copied = imageStoreDetailsUtil.isCopyTemplatesFromOtherStoragesEnabled(destStore.getId(), zoneId) && tryCopyingTemplateToImageStore(template, destStore);
if (!copied) {
tryDownloadingTemplateToImageStore(template, destStore);
}
}
protected void tryDownloadingTemplateToImageStore(VMTemplateVO tmplt, DataStore destStore) {
if (tmplt.getUrl() == null) {
logger.info("Not downloading template [{}] to image store [{}], as it has no URL.", tmplt.getUniqueName(),
@ -615,28 +645,134 @@ public class TemplateServiceImpl implements TemplateService {
}
protected boolean tryCopyingTemplateToImageStore(VMTemplateVO tmplt, DataStore destStore) {
Long zoneId = destStore.getScope().getScopeId();
List<DataStore> storesInZone = _storeMgr.getImageStoresByZoneIds(zoneId);
for (DataStore sourceStore : storesInZone) {
Map<String, TemplateProp> existingTemplatesInSourceStore = listTemplate(sourceStore);
if (existingTemplatesInSourceStore == null || !existingTemplatesInSourceStore.containsKey(tmplt.getUniqueName())) {
logger.debug("Template [{}] does not exist on image store [{}]; searching on another one.",
tmplt.getUniqueName(), sourceStore.getName());
continue;
}
TemplateObject sourceTmpl = (TemplateObject) _templateFactory.getTemplate(tmplt.getId(), sourceStore);
if (sourceTmpl.getInstallPath() == null) {
logger.warn("Can not copy template [{}] from image store [{}], as it returned a null install path.", tmplt.getUniqueName(),
sourceStore.getName());
continue;
}
storageOrchestrator.orchestrateTemplateCopyToImageStore(sourceTmpl, destStore);
if (searchAndCopyWithinZone(tmplt, destStore)) {
return true;
}
logger.debug("Can't copy template [{}] from another image store.", tmplt.getUniqueName());
Long destZoneId = destStore.getScope().getScopeId();
logger.debug("Template [{}] not found in any image store of zone [{}]. Checking other zones.",
tmplt.getUniqueName(), destZoneId);
return searchAndCopyAcrossZones(tmplt, destStore, destZoneId);
}
private boolean searchAndCopyAcrossZones(VMTemplateVO tmplt, DataStore destStore, Long destZoneId) {
List<Long> allZoneIds = _dcDao.listAllIds();
for (Long otherZoneId : allZoneIds) {
if (otherZoneId.equals(destZoneId)) {
continue;
}
List<DataStore> storesInOtherZone = _storeMgr.getImageStoresByZoneIds(otherZoneId);
logger.debug("Checking zone [{}] for template [{}]...", otherZoneId, tmplt.getUniqueName());
if (CollectionUtils.isEmpty(storesInOtherZone)) {
logger.debug("Zone [{}] has no image stores. Skipping.", otherZoneId);
continue;
}
TemplateObject sourceTmpl = findUsableTemplate(tmplt, storesInOtherZone);
if (sourceTmpl == null) {
logger.debug("Template [{}] not found with a valid install path in any image store of zone [{}].",
tmplt.getUniqueName(), otherZoneId);
continue;
}
logger.info("Template [{}] found in zone [{}]. Initiating cross-zone copy to zone [{}].",
tmplt.getUniqueName(), otherZoneId, destZoneId);
return copyTemplateAcrossZones(destStore, sourceTmpl);
}
logger.debug("Template [{}] was not found in any zone. Cannot perform zone-to-zone copy.", tmplt.getUniqueName());
return false;
}
protected TemplateObject findUsableTemplate(VMTemplateVO tmplt, List<DataStore> imageStores) {
for (DataStore store : imageStores) {
Map<String, TemplateProp> templates = listTemplate(store);
if (templates == null || !templates.containsKey(tmplt.getUniqueName())) {
continue;
}
TemplateObject tmpl = (TemplateObject) _templateFactory.getTemplate(tmplt.getId(), store);
if (tmpl.getInstallPath() == null) {
logger.debug("Template [{}] found in image store [{}] but install path is null. Skipping.",
tmplt.getUniqueName(), store.getName());
continue;
}
return tmpl;
}
return null;
}
private boolean searchAndCopyWithinZone(VMTemplateVO tmplt, DataStore destStore) {
Long destZoneId = destStore.getScope().getScopeId();
List<DataStore> storesInSameZone = _storeMgr.getImageStoresByZoneIds(destZoneId);
TemplateObject sourceTmpl = findUsableTemplate(tmplt, storesInSameZone);
if (sourceTmpl == null) {
return false;
}
TemplateApiResult result;
AsyncCallFuture<TemplateApiResult> future = copyTemplateToImageStore(sourceTmpl, destStore);
try {
result = future.get();
} catch (ExecutionException | InterruptedException e) {
logger.warn("Exception while copying template [{}] from image store [{}] to image store [{}]: {}",
sourceTmpl.getUniqueName(), sourceTmpl.getDataStore().getName(), destStore.getName(), e.toString());
result = new TemplateApiResult(sourceTmpl);
result.setResult(e.getMessage());
}
return result.isSuccess();
}
private boolean copyTemplateAcrossZones(DataStore destStore, TemplateObject sourceTmpl) {
Long dstZoneId = destStore.getScope().getScopeId();
DataCenterVO dstZone = _dcDao.findById(dstZoneId);
if (dstZone == null) {
logger.warn("Destination zone [{}] not found for template [{}].", dstZoneId, sourceTmpl.getUniqueName());
return false;
}
TemplateApiResult result;
try {
VMTemplateVO template = _templateDao.findById(sourceTmpl.getId());
try {
DataStore sourceStore = sourceTmpl.getDataStore();
long userId = CallContext.current().getCallingUserId();
boolean success = _tmpltMgr.copy(userId, template, sourceStore, dstZone);
result = new TemplateApiResult(sourceTmpl);
if (!success) {
result.setResult("Cross-zone template copy failed");
}
} catch (StorageUnavailableException | ResourceAllocationException e) {
logger.error("Exception while copying template [{}] from zone [{}] to zone [{}]",
template,
sourceTmpl.getDataStore().getScope().getScopeId(),
dstZone.getId(),
e);
result = new TemplateApiResult(sourceTmpl);
result.setResult(e.getMessage());
} finally {
ThreadContext.clearAll();
}
} catch (Exception e) {
logger.error("Failed to copy template [{}] from zone [{}] to zone [{}].",
sourceTmpl.getUniqueName(),
sourceTmpl.getDataStore().getScope().getScopeId(),
dstZoneId,
e);
return false;
}
return result.isSuccess();
}
@Override
public AsyncCallFuture<TemplateApiResult> copyTemplateToImageStore(DataObject source, DataStore destStore) {
TemplateObject sourceTmpl = (TemplateObject) source;
@ -680,10 +816,6 @@ public class TemplateServiceImpl implements TemplateService {
return null;
}
protected boolean isCopyFromOtherStoragesEnabled(Long zoneId) {
return StorageManager.COPY_PUBLIC_TEMPLATES_FROM_OTHER_STORAGES.valueIn(zoneId);
}
protected void publishTemplateCreation(TemplateInfo tmplt) {
VMTemplateVO tmpltVo = _templateDao.findById(tmplt.getId());
@ -1318,9 +1450,10 @@ public class TemplateServiceImpl implements TemplateService {
if (_vmTemplateStoreDao.isTemplateMarkedForDirectDownload(tmplt.getId())) {
continue;
}
tmpltStore =
new TemplateDataStoreVO(storeId, tmplt.getId(), new Date(), 100, Status.DOWNLOADED, null, null, null,
TemplateConstants.DEFAULT_SYSTEM_VM_TEMPLATE_PATH + tmplt.getId() + '/', tmplt.getUrl());
String templateDirectoryPath = TemplateConstants.DEFAULT_TMPLT_ROOT_DIR + File.separator + TemplateConstants.DEFAULT_TMPLT_FIRST_LEVEL_DIR;
String installPath = templateDirectoryPath + tmplt.getAccountId() + File.separator + tmplt.getId() + File.separator;
tmpltStore = new TemplateDataStoreVO(storeId, tmplt.getId(), new Date(), 100, Status.DOWNLOADED,
null, null, null, installPath, tmplt.getUrl());
tmpltStore.setSize(0L);
tmpltStore.setPhysicalSize(0); // no size information for
// pre-seeded system vm templates

View File

@ -18,12 +18,20 @@
*/
package org.apache.cloudstack.storage.image;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.StorageUnavailableException;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.template.TemplateProp;
import com.cloud.template.TemplateManager;
import com.cloud.user.Account;
import com.cloud.user.User;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.orchestration.service.StorageOrchestrationService;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
import org.apache.cloudstack.storage.image.store.TemplateObject;
@ -45,6 +53,8 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.mockito.Mockito.mock;
@RunWith(MockitoJUnitRunner.class)
public class TemplateServiceImplTest {
@ -70,6 +80,9 @@ public class TemplateServiceImplTest {
@Mock
TemplateObject templateInfoMock;
@Mock
DataStore dataStoreMock;
@Mock
DataStore sourceStoreMock;
@ -82,6 +95,15 @@ public class TemplateServiceImplTest {
@Mock
StorageOrchestrationService storageOrchestrator;
@Mock
TemplateManager templateManagerMock;
@Mock
VMTemplateDao templateDao;
@Mock
DataCenterDao _dcDao;
Map<String, TemplateProp> templatesInSourceStore = new HashMap<>();
@Before
@ -94,47 +116,46 @@ public class TemplateServiceImplTest {
Mockito.doReturn(List.of(sourceStoreMock, destStoreMock)).when(dataStoreManagerMock).getImageStoresByZoneIds(zoneId);
Mockito.doReturn(templatesInSourceStore).when(templateService).listTemplate(sourceStoreMock);
Mockito.doReturn(null).when(templateService).listTemplate(destStoreMock);
Mockito.doReturn("install-path").when(templateInfoMock).getInstallPath();
Mockito.doReturn(templateInfoMock).when(templateDataFactoryMock).getTemplate(2L, sourceStoreMock);
Mockito.doReturn(3L).when(dataStoreMock).getId();
Mockito.doReturn(zoneScopeMock).when(dataStoreMock).getScope();
}
@Test
public void testIsSkipTemplateStoreDownloadPublicTemplate() {
VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class);
Mockito.when(templateVO.isPublicTemplate()).thenReturn(true);
Assert.assertFalse(templateService.isSkipTemplateStoreDownload(templateVO, 1L));
public void shouldDownloadTemplateToStoreTestSkipsTemplateDirectedToAnotherStorage() {
DataStore destinedStore = Mockito.mock(DataStore.class);
Mockito.doReturn(dataStoreMock.getId() + 1L).when(destinedStore).getId();
Mockito.when(templateManagerMock.verifyHeuristicRulesForZone(tmpltMock, zoneScopeMock.getScopeId())).thenReturn(destinedStore);
Assert.assertFalse(templateService.shouldDownloadTemplateToStore(tmpltMock, dataStoreMock));
}
@Test
public void testIsSkipTemplateStoreDownloadFeaturedTemplate() {
VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class);
Mockito.when(templateVO.isFeatured()).thenReturn(true);
Assert.assertFalse(templateService.isSkipTemplateStoreDownload(templateVO, 1L));
public void shouldDownloadTemplateToStoreTestDownloadsPublicTemplate() {
Mockito.when(tmpltMock.isPublicTemplate()).thenReturn(true);
Assert.assertTrue(templateService.shouldDownloadTemplateToStore(tmpltMock, dataStoreMock));
}
@Test
public void testIsSkipTemplateStoreDownloadSystemTemplate() {
VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class);
Mockito.when(templateVO.getTemplateType()).thenReturn(Storage.TemplateType.SYSTEM);
Assert.assertFalse(templateService.isSkipTemplateStoreDownload(templateVO, 1L));
public void shouldDownloadTemplateToStoreTestDownloadsFeaturedTemplate() {
Mockito.when(tmpltMock.isFeatured()).thenReturn(true);
Assert.assertTrue(templateService.shouldDownloadTemplateToStore(tmpltMock, dataStoreMock));
}
@Test
public void testIsSkipTemplateStoreDownloadPrivateNoRefTemplate() {
VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class);
long id = 1L;
Mockito.when(templateVO.getId()).thenReturn(id);
Mockito.when(templateDataStoreDao.findByTemplateZone(id, id, DataStoreRole.Image)).thenReturn(null);
Assert.assertFalse(templateService.isSkipTemplateStoreDownload(templateVO, id));
public void shouldDownloadTemplateToStoreTestDownloadsSystemTemplate() {
Mockito.when(tmpltMock.getTemplateType()).thenReturn(Storage.TemplateType.SYSTEM);
Assert.assertTrue(templateService.shouldDownloadTemplateToStore(tmpltMock, dataStoreMock));
}
@Test
public void testIsSkipTemplateStoreDownloadPrivateExistingTemplate() {
VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class);
long id = 1L;
Mockito.when(templateVO.getId()).thenReturn(id);
Mockito.when(templateDataStoreDao.findByTemplateZone(id, id, DataStoreRole.Image)).thenReturn(Mockito.mock(TemplateDataStoreVO.class));
Assert.assertTrue(templateService.isSkipTemplateStoreDownload(templateVO, id));
public void shouldDownloadTemplateToStoreTestDownloadsPrivateNoRefTemplate() {
Assert.assertTrue(templateService.shouldDownloadTemplateToStore(tmpltMock, dataStoreMock));
}
@Test
public void shouldDownloadTemplateToStoreTestSkipsPrivateExistingTemplate() {
Mockito.when(templateDataStoreDao.findByTemplateZone(tmpltMock.getId(), zoneScopeMock.getScopeId(), DataStoreRole.Image)).thenReturn(Mockito.mock(TemplateDataStoreVO.class));
Assert.assertFalse(templateService.shouldDownloadTemplateToStore(tmpltMock, dataStoreMock));
}
@Test
@ -159,7 +180,7 @@ public class TemplateServiceImplTest {
boolean result = templateService.tryCopyingTemplateToImageStore(tmpltMock, destStoreMock);
Assert.assertFalse(result);
Mockito.verify(storageOrchestrator, Mockito.never()).orchestrateTemplateCopyToImageStore(Mockito.any(), Mockito.any());
Mockito.verify(storageOrchestrator, Mockito.never()).orchestrateTemplateCopyFromSecondaryStores(Mockito.anyLong(), Mockito.any());
}
@Test
@ -167,20 +188,161 @@ public class TemplateServiceImplTest {
templatesInSourceStore.put(tmpltMock.getUniqueName(), tmpltPropMock);
Mockito.doReturn(null).when(templateInfoMock).getInstallPath();
Scope scopeMock = Mockito.mock(Scope.class);
Mockito.doReturn(scopeMock).when(destStoreMock).getScope();
Mockito.doReturn(1L).when(scopeMock).getScopeId();
Mockito.doReturn(List.of(1L)).when(_dcDao).listAllIds();
boolean result = templateService.tryCopyingTemplateToImageStore(tmpltMock, destStoreMock);
Assert.assertFalse(result);
Mockito.verify(storageOrchestrator, Mockito.never()).orchestrateTemplateCopyToImageStore(Mockito.any(), Mockito.any());
Mockito.verify(storageOrchestrator, Mockito.never()).orchestrateTemplateCopyFromSecondaryStores(Mockito.anyLong(), Mockito.any());
}
@Test
public void tryCopyingTemplateToImageStoreTestReturnsTrueWhenTemplateExistsInAnotherStorageAndTaskWasScheduled() {
templatesInSourceStore.put(tmpltMock.getUniqueName(), tmpltPropMock);
Mockito.doReturn(new AsyncCallFuture<>()).when(storageOrchestrator).orchestrateTemplateCopyToImageStore(Mockito.any(), Mockito.any());
public void tryCopyingTemplateToImageStoreTestReturnsTrueWhenTemplateExistsInAnotherZone() throws StorageUnavailableException, ResourceAllocationException {
Scope scopeMock = Mockito.mock(Scope.class);
Mockito.doReturn(scopeMock).when(destStoreMock).getScope();
Mockito.doReturn(1L).when(scopeMock).getScopeId();
Mockito.doReturn(100L).when(tmpltMock).getId();
Mockito.doReturn("unique-name").when(tmpltMock).getUniqueName();
Mockito.doReturn(List.of(sourceStoreMock)).when(dataStoreManagerMock).getImageStoresByZoneIds(1L);
Mockito.doReturn(null).when(templateService).listTemplate(sourceStoreMock);
Mockito.doReturn(List.of(1L, 2L)).when(_dcDao).listAllIds();
DataStore otherZoneStoreMock = Mockito.mock(DataStore.class);
Mockito.doReturn(List.of(otherZoneStoreMock)).when(dataStoreManagerMock).getImageStoresByZoneIds(2L);
Map<String, TemplateProp> templatesInOtherZone = new HashMap<>();
templatesInOtherZone.put("unique-name", tmpltPropMock);
Mockito.doReturn(templatesInOtherZone).when(templateService).listTemplate(otherZoneStoreMock);
TemplateObject sourceTmplMock = Mockito.mock(TemplateObject.class);
Mockito.doReturn(sourceTmplMock).when(templateDataFactoryMock).getTemplate(100L, otherZoneStoreMock);
Mockito.doReturn("/mnt/secondary/template.qcow2").when(sourceTmplMock).getInstallPath();
DataCenterVO dstZoneMock = Mockito.mock(DataCenterVO.class);
Mockito.doReturn(dstZoneMock).when(_dcDao).findById(1L);
Mockito.doReturn(true).when(templateManagerMock).copy(Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.any());
boolean result = templateService.tryCopyingTemplateToImageStore(tmpltMock, destStoreMock);
Assert.assertTrue(result);
Mockito.verify(storageOrchestrator).orchestrateTemplateCopyToImageStore(Mockito.any(), Mockito.any());
}
@Test
public void tryCopyingTemplateToImageStoreTestReturnsFalseWhenDestinationZoneIsMissing() {
Scope scopeMock = Mockito.mock(Scope.class);
Mockito.doReturn(scopeMock).when(destStoreMock).getScope();
Mockito.doReturn(1L).when(scopeMock).getScopeId();
Mockito.doReturn(100L).when(tmpltMock).getId();
Mockito.doReturn("unique-name").when(tmpltMock).getUniqueName();
Mockito.doReturn(List.of(1L, 2L)).when(_dcDao).listAllIds();
Mockito.doReturn(List.of()).when(dataStoreManagerMock).getImageStoresByZoneIds(1L);
DataStore otherZoneStoreMock = Mockito.mock(DataStore.class);
Mockito.doReturn(List.of(otherZoneStoreMock)).when(dataStoreManagerMock).getImageStoresByZoneIds(2L);
Map<String, TemplateProp> templates = new HashMap<>();
templates.put("unique-name", tmpltPropMock);
Mockito.doReturn(templates).when(templateService).listTemplate(otherZoneStoreMock);
TemplateObject sourceTmplMock = Mockito.mock(TemplateObject.class);
Mockito.doReturn(sourceTmplMock).when(templateDataFactoryMock).getTemplate(100L, otherZoneStoreMock);
Mockito.doReturn("/mnt/secondary/template.qcow2").when(sourceTmplMock).getInstallPath();
Mockito.doReturn(null).when(_dcDao).findById(1L);
boolean result = templateService.tryCopyingTemplateToImageStore(tmpltMock, destStoreMock);
Assert.assertFalse(result);
}
@Test
public void tryCopyingTemplateToImageStoreTestReturnsTrueWhenCrossZoneCopyTaskIsScheduled() throws StorageUnavailableException, ResourceAllocationException {
Scope scopeMock = Mockito.mock(Scope.class);
Mockito.doReturn(scopeMock).when(destStoreMock).getScope();
Mockito.doReturn(1L).when(scopeMock).getScopeId();
Mockito.doReturn(100L).when(tmpltMock).getId();
Mockito.doReturn("unique-name").when(tmpltMock).getUniqueName();
Mockito.doReturn(List.of(1L, 2L)).when(_dcDao).listAllIds();
Mockito.doReturn(List.of()).when(dataStoreManagerMock).getImageStoresByZoneIds(1L);
DataStore otherZoneStoreMock = Mockito.mock(DataStore.class);
Mockito.doReturn(List.of(otherZoneStoreMock)).when(dataStoreManagerMock).getImageStoresByZoneIds(2L);
Map<String, TemplateProp> templates = new HashMap<>();
templates.put("unique-name", tmpltPropMock);
Mockito.doReturn(templates).when(templateService).listTemplate(otherZoneStoreMock);
TemplateObject sourceTmplMock = Mockito.mock(TemplateObject.class);
Mockito.doReturn(sourceTmplMock).when(templateDataFactoryMock).getTemplate(100L, otherZoneStoreMock);
Mockito.doReturn("/mnt/secondary/template.qcow2").when(sourceTmplMock).getInstallPath();
Mockito.doReturn(100L).when(sourceTmplMock).getId();
DataStore sourceStoreMock = Mockito.mock(DataStore.class);
Scope sourceScopeMock = Mockito.mock(Scope.class);
Mockito.doReturn(sourceStoreMock).when(sourceTmplMock).getDataStore();
DataCenterVO dstZoneMock = Mockito.mock(DataCenterVO.class);
Mockito.doReturn(dstZoneMock).when(_dcDao).findById(1L);
VMTemplateVO templateVoMock = Mockito.mock(VMTemplateVO.class);
Mockito.doReturn(templateVoMock).when(templateDao).findById(100L);
Mockito.doReturn(true).when(templateManagerMock).copy(Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.any());
Account account = mock(Account.class);
User user = mock(User.class);
CallContext callContext = mock(CallContext.class);
boolean result = templateService.tryCopyingTemplateToImageStore(tmpltMock, destStoreMock);
Assert.assertTrue(result);
}
@Test
public void tryCopyingTemplateToImageStoreTestReturnsFalseWhenTemplateNotFoundInAnyZone() {
Scope scopeMock = Mockito.mock(Scope.class);
Mockito.doReturn(scopeMock).when(destStoreMock).getScope();
Mockito.doReturn(1L).when(scopeMock).getScopeId();
Mockito.doReturn(List.of(1L, 2L)).when(_dcDao).listAllIds();
Mockito.doReturn(List.of(sourceStoreMock)).when(dataStoreManagerMock).getImageStoresByZoneIds(Mockito.anyLong());
Mockito.doReturn(null).when(templateService).listTemplate(Mockito.any());
boolean result = templateService.tryCopyingTemplateToImageStore(tmpltMock, destStoreMock);
Assert.assertFalse(result);
}
@Test
public void testFindUsableTemplateReturnsTemplateWithNonNullInstallPath() {
VMTemplateVO template = Mockito.mock(VMTemplateVO.class);
Mockito.when(template.getId()).thenReturn(10L);
Mockito.when(template.getUniqueName()).thenReturn("test-template");
DataStore storeWithNullPath = Mockito.mock(DataStore.class);
Mockito.when(storeWithNullPath.getName()).thenReturn("store-null");
DataStore storeWithValidPath = Mockito.mock(DataStore.class);
TemplateObject tmplWithNullPath = Mockito.mock(TemplateObject.class);
Mockito.when(tmplWithNullPath.getInstallPath()).thenReturn(null);
TemplateObject tmplWithValidPath = Mockito.mock(TemplateObject.class);
Mockito.when(tmplWithValidPath.getInstallPath()).thenReturn("/mnt/secondary/template.qcow2");
Mockito.doReturn(tmplWithNullPath).when(templateDataFactoryMock).getTemplate(10L, storeWithNullPath);
Mockito.doReturn(tmplWithValidPath).when(templateDataFactoryMock).getTemplate(10L, storeWithValidPath);
Map<String, TemplateProp> templates = new HashMap<>();
templates.put("test-template", Mockito.mock(TemplateProp.class));
Mockito.doReturn(templates).when(templateService).listTemplate(storeWithNullPath);
Mockito.doReturn(templates).when(templateService).listTemplate(storeWithValidPath);
List<DataStore> imageStores = List.of(storeWithNullPath, storeWithValidPath);
TemplateObject result = templateService.findUsableTemplate(template, imageStores);
Assert.assertNotNull(result);
Assert.assertEquals(tmplWithValidPath, result);
}
}

View File

@ -268,7 +268,7 @@ public class SnapshotTest extends CloudStackTestNGBase {
to.setSize(1000L);
CopyCmdAnswer answer = new CopyCmdAnswer(to);
templateOnStore.processEvent(Event.CreateOnlyRequested);
templateOnStore.processEvent(Event.OperationSuccessed, answer);
templateOnStore.processEvent(Event.OperationSucceeded, answer);
}

View File

@ -244,7 +244,7 @@ public class VolumeTest extends CloudStackTestNGBase {
to.setSize(100L);
CopyCmdAnswer answer = new CopyCmdAnswer(to);
templateOnStore.processEvent(Event.CreateOnlyRequested);
templateOnStore.processEvent(Event.OperationSuccessed, answer);
templateOnStore.processEvent(Event.OperationSucceeded, answer);
}

View File

@ -246,7 +246,7 @@ public class VolumeTestVmware extends CloudStackTestNGBase {
to.setPath(this.getImageInstallPath());
CopyCmdAnswer answer = new CopyCmdAnswer(to);
templateOnStore.processEvent(Event.CreateOnlyRequested);
templateOnStore.processEvent(Event.OperationSuccessed, answer);
templateOnStore.processEvent(Event.OperationSucceeded, answer);
}

View File

@ -270,7 +270,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
}
if (Snapshot.State.Error.equals(snapshotVO.getState())) {
List<SnapshotDataStoreVO> storeRefs = snapshotStoreDao.findBySnapshotId(snapshotId);
List<SnapshotDataStoreVO> storeRefs = snapshotStoreDao.findBySnapshotIdWithNonDestroyedState(snapshotId);
List<Long> deletedRefs = new ArrayList<>();
for (SnapshotDataStoreVO ref : storeRefs) {
boolean refZoneIdMatch = false;
@ -351,7 +351,7 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
protected Boolean deleteSnapshotInfo(SnapshotInfo snapshotInfo, SnapshotVO snapshotVo) {
DataStore dataStore = snapshotInfo.getDataStore();
String storageToString = String.format("%s {uuid: \"%s\", name: \"%s\"}", dataStore.getRole().name(), dataStore.getUuid(), dataStore.getName());
List<SnapshotDataStoreVO> snapshotStoreRefs = snapshotStoreDao.findBySnapshotId(snapshotVo.getId());
List<SnapshotDataStoreVO> snapshotStoreRefs = snapshotStoreDao.findBySnapshotIdWithNonDestroyedState(snapshotVo.getId());
boolean isLastSnapshotRef = CollectionUtils.isEmpty(snapshotStoreRefs) || snapshotStoreRefs.size() == 1;
try {
SnapshotObject snapshotObject = castSnapshotInfoToSnapshotObject(snapshotInfo);

View File

@ -94,7 +94,7 @@ public class SnapshotDataFactoryImpl implements SnapshotDataFactory {
if (snapshot == null) { //snapshot may have been removed;
return new ArrayList<>();
}
List<SnapshotDataStoreVO> allSnapshotsAndDataStore = snapshotStoreDao.findBySnapshotId(snapshotId);
List<SnapshotDataStoreVO> allSnapshotsAndDataStore = snapshotStoreDao.findBySnapshotIdWithNonDestroyedState(snapshotId);
if (CollectionUtils.isEmpty(allSnapshotsAndDataStore)) {
return new ArrayList<>();
}
@ -118,7 +118,23 @@ public class SnapshotDataFactoryImpl implements SnapshotDataFactory {
if (snapshot == null) {
return null;
}
SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findByStoreSnapshot(role, storeId, snapshotId);
return getSnapshotOnStore(snapshot, storeId, role);
}
@Override
public SnapshotInfo getSnapshotIncludingRemoved(long snapshotId, long storeId, DataStoreRole role) {
SnapshotVO snapshot = snapshotDao.findByIdIncludingRemoved(snapshotId);
if (snapshot == null) {
return null;
}
return getSnapshotOnStore(snapshot, storeId, role);
}
private SnapshotInfo getSnapshotOnStore(SnapshotVO snapshot, long storeId, DataStoreRole role) {
if (snapshot == null) {
return null;
}
SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findByStoreSnapshot(role, storeId, snapshot.getId());
if (snapshotStore == null) {
return null;
}
@ -207,7 +223,7 @@ public class SnapshotDataFactoryImpl implements SnapshotDataFactory {
@Override
public void updateOperationFailed(long snapshotId) throws NoTransitionException {
List<SnapshotDataStoreVO> snapshotStoreRefs = snapshotStoreDao.findBySnapshotId(snapshotId);
List<SnapshotDataStoreVO> snapshotStoreRefs = snapshotStoreDao.findBySnapshotIdWithNonDestroyedState(snapshotId);
for (SnapshotDataStoreVO snapshotStoreRef : snapshotStoreRefs) {
SnapshotInfo snapshotInfo = getSnapshot(snapshotStoreRef.getSnapshotId(), snapshotStoreRef.getDataStoreId(), snapshotStoreRef.getRole());
if (snapshotInfo != null) {

View File

@ -382,8 +382,7 @@ public class SnapshotServiceImpl implements SnapshotService {
if (res.isFailed()) {
throw new CloudRuntimeException(res.getResult());
}
SnapshotInfo destSnapshot = res.getSnapshot();
return destSnapshot;
return res.getSnapshot();
} catch (InterruptedException e) {
logger.debug("failed copy snapshot", e);
throw new CloudRuntimeException("Failed to copy snapshot", e);
@ -391,7 +390,6 @@ public class SnapshotServiceImpl implements SnapshotService {
logger.debug("Failed to copy snapshot", e);
throw new CloudRuntimeException("Failed to copy snapshot", e);
}
}
protected Void copySnapshotAsyncCallback(AsyncCallbackDispatcher<SnapshotServiceImpl, CopyCommandResult> callback, CopySnapshotContext<CommandResult> context) {
@ -479,7 +477,6 @@ public class SnapshotServiceImpl implements SnapshotService {
}
protected Void deleteSnapshotCallback(AsyncCallbackDispatcher<SnapshotServiceImpl, CommandResult> callback, DeleteSnapshotContext<CommandResult> context) {
CommandResult result = callback.getResult();
AsyncCallFuture<SnapshotResult> future = context.future;
SnapshotInfo snapshot = context.snapshot;
@ -607,7 +604,7 @@ public class SnapshotServiceImpl implements SnapshotService {
if (snapshot != null) {
if (snapshot.getState() != Snapshot.State.BackedUp) {
List<SnapshotDataStoreVO> snapshotDataStoreVOs = _snapshotStoreDao.findBySnapshotId(snapshotId);
List<SnapshotDataStoreVO> snapshotDataStoreVOs = _snapshotStoreDao.findBySnapshotIdWithNonDestroyedState(snapshotId);
for (SnapshotDataStoreVO snapshotDataStoreVO : snapshotDataStoreVOs) {
logger.debug("Remove snapshot {}, status {} on snapshot_store_ref table with id: {}", snapshot, snapshotDataStoreVO.getState(), snapshotDataStoreVO.getId());
@ -712,7 +709,6 @@ public class SnapshotServiceImpl implements SnapshotService {
SnapshotObject srcSnapshot = (SnapshotObject)snapshot;
srcSnapshot.processEvent(Event.DestroyRequested);
srcSnapshot.processEvent(Event.OperationSucceeded);
srcSnapshot.processEvent(Snapshot.Event.OperationFailed);
_snapshotDetailsDao.removeDetail(srcSnapshot.getId(), AsyncJob.Constants.MS_ID);
@ -723,7 +719,6 @@ public class SnapshotServiceImpl implements SnapshotService {
}
}
});
}
@Override

View File

@ -540,7 +540,6 @@ public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase {
logger.warn("Failed to clean up snapshot '" + snapshot.getId() + "' on primary storage: " + e.getMessage());
}
}
}
private VMSnapshot takeHypervisorSnapshot(VolumeInfo volumeInfo) {

View File

@ -101,6 +101,9 @@ public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager {
stateMachines.addTransition(State.Destroying, Event.DestroyRequested, State.Destroying);
stateMachines.addTransition(State.Destroying, Event.OperationSucceeded, State.Destroyed);
stateMachines.addTransition(State.Destroying, Event.OperationFailed, State.Destroying);
stateMachines.addTransition(State.Destroyed, Event.DestroyRequested, State.Destroyed);
stateMachines.addTransition(State.Destroyed, Event.OperationSucceeded, State.Destroyed);
stateMachines.addTransition(State.Destroyed, Event.OperationFailed, State.Destroyed);
stateMachines.addTransition(State.Failed, Event.DestroyRequested, State.Destroying);
// TODO: further investigate why an extra event is sent when it is
// already Ready for DownloadListener

View File

@ -126,7 +126,7 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore {
@Override
public List<VolumeInfo> getVolumes() {
List<VolumeVO> volumes = volumeDao.findByPoolId(getId());
List<VolumeVO> volumes = volumeDao.findNonDestroyedVolumesByPoolId(getId());
List<VolumeInfo> volumeInfos = new ArrayList<VolumeInfo>();
for (VolumeVO volume : volumes) {
volumeInfos.add(VolumeObject.getVolumeObject(this, volume));

View File

@ -704,7 +704,7 @@ public class VolumeServiceImpl implements VolumeService {
VolumeApiResult res = new VolumeApiResult(volumeInfo);
if (result.isSuccess()) {
// volumeInfo.processEvent(Event.OperationSuccessed, result.getAnswer());
// volumeInfo.processEvent(Event.OperationSucceeded, result.getAnswer());
VolumeVO volume = volDao.findById(volumeInfo.getId());
CopyCmdAnswer answer = (CopyCmdAnswer)result.getAnswer();

View File

@ -120,10 +120,18 @@ public class ConfigKey<T> {
static ConfigDepotImpl s_depot = null;
static public void init(ConfigDepotImpl depot) {
private String _defaultValueIfEmpty = null;
public static void init(ConfigDepotImpl depot) {
s_depot = depot;
}
public ConfigKey(Class<T> type, String name, String category, String defaultValue, String description, boolean isDynamic, Scope scope, T multiplier,
String displayText, String parent, Ternary<String, String, Long> group, Pair<String, Long> subGroup, Kind kind, String options, String defaultValueIfEmpty) {
this(type, name, category, defaultValue, description, isDynamic, scope, multiplier, displayText, parent, group, subGroup, kind, options);
this._defaultValueIfEmpty = defaultValueIfEmpty;
}
public ConfigKey(String category, Class<T> type, String name, String defaultValue, String description, boolean isDynamic, Scope scope) {
this(type, name, category, defaultValue, description, isDynamic, scope, null);
}
@ -216,7 +224,19 @@ public class ConfigKey<T> {
public T value() {
if (_value == null || isDynamic()) {
String value = s_depot != null ? s_depot.getConfigStringValue(_name, Scope.Global, null) : null;
_value = valueOf((value == null) ? defaultValue() : value);
String effective;
if (value != null) {
if (value.isEmpty() && _defaultValueIfEmpty != null) {
effective = _defaultValueIfEmpty;
} else {
effective = value;
}
} else {
effective = _defaultValueIfEmpty != null ? _defaultValueIfEmpty : defaultValue();
}
_value = valueOf(effective);
}
return _value;
@ -231,6 +251,10 @@ public class ConfigKey<T> {
if (value == null) {
return value();
}
if (value.isEmpty() && _defaultValueIfEmpty != null) {
return valueOf(_defaultValueIfEmpty);
}
return valueOf(value);
}

View File

@ -57,7 +57,8 @@ public class Filter {
}
public Filter(long limit) {
_orderBy = " ORDER BY RAND() LIMIT " + limit;
_orderBy = " ORDER BY RAND()";
_limit = limit;
}
public Filter(Long offset, Long limit) {

View File

@ -89,6 +89,7 @@ import net.sf.cglib.proxy.NoOp;
import net.sf.ehcache.Cache;
import net.sf.ehcache.CacheManager;
import net.sf.ehcache.Element;
import org.springframework.util.ClassUtils;
/**
* GenericDaoBase is a simple way to implement DAOs. It DOES NOT
@ -1160,6 +1161,8 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
if (filter.getLimit() != null) {
sql.append(", ").append(filter.getLimit());
}
} else if (filter.getLimit() != null) {
sql.append(" LIMIT ").append(filter.getLimit());
}
}
}
@ -1321,7 +1324,7 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
Filter filter = null;
final long batchSizeFinal = ObjectUtils.defaultIfNull(batchSize, 0L);
if (batchSizeFinal > 0) {
filter = new Filter(batchSizeFinal);
filter = new Filter(null, batchSizeFinal);
}
int expunged = 0;
int currentExpunged = 0;
@ -2047,16 +2050,22 @@ public abstract class GenericDaoBase<T, ID extends Serializable> extends Compone
@DB()
protected void setField(final Object entity, final ResultSet rs, ResultSetMetaData meta, final int index) throws SQLException {
Attribute attr = _allColumns.get(new Pair<String, String>(meta.getTableName(index), meta.getColumnName(index)));
String tableName = meta.getTableName(index);
String columnName = meta.getColumnName(index);
Attribute attr = _allColumns.get(new Pair<>(tableName, columnName));
if (attr == null) {
// work around for mysql bug to return original table name instead of view name in db view case
Table tbl = entity.getClass().getSuperclass().getAnnotation(Table.class);
if (tbl != null) {
attr = _allColumns.get(new Pair<String, String>(tbl.name(), meta.getColumnLabel(index)));
attr = _allColumns.get(new Pair<>(tbl.name(), meta.getColumnLabel(index)));
}
}
assert (attr != null) : "How come I can't find " + meta.getCatalogName(index) + "." + meta.getColumnName(index);
setField(entity, attr.field, rs, index);
if(attr == null) {
logger.warn(String.format("Failed to find attribute in the entity %s to map column %s.%s (%s)",
ClassUtils.getUserClass(entity).getSimpleName(), tableName, columnName));
} else {
setField(entity, attr.field, rs, index);
}
}
@Override

View File

@ -76,7 +76,7 @@ Requires: sudo
Requires: /sbin/service
Requires: /sbin/chkconfig
Requires: /usr/bin/ssh-keygen
Requires: (genisoimage or mkisofs)
Requires: (genisoimage or mkisofs or xorrisofs)
Requires: ipmitool
Requires: %{name}-common = %{_ver}
Requires: (iptables-services or iptables)

View File

@ -123,7 +123,7 @@ public class SiocManagerImpl implements SiocManager {
int limitIopsTotal = 0;
List<VolumeVO> volumes = volumeDao.findByPoolId(storagePoolId, null);
List<VolumeVO> volumes = volumeDao.findNonDestroyedVolumesByPoolId(storagePoolId, null);
if (volumes != null && volumes.size() > 0) {
Set<Long> instanceIds = new HashSet<>();

View File

@ -46,10 +46,10 @@ public class BaremetalDnsmasqResource extends BaremetalDhcpResourceBase {
com.trilead.ssh2.Connection sshConnection = null;
try {
super.configure(name, params);
logger.debug(String.format("Trying to connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s)", _ip, _username, _password));
logger.debug(String.format("Trying to connect to DHCP server(IP=%1$s, username=%2$s", _ip, _username));
sshConnection = SSHCmdHelper.acquireAuthorizedConnection(_ip, _username, _password);
if (sshConnection == null) {
throw new ConfigurationException(String.format("Cannot connect to DHCP server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password));
throw new ConfigurationException(String.format("Cannot connect to DHCP server(IP=%1$s, username=%2$s", _ip, _username));
}
if (!SSHCmdHelper.sshExecuteCmd(sshConnection, "[ -f '/usr/sbin/dnsmasq' ]")) {

View File

@ -130,8 +130,8 @@ public class BaremetalKickStartPxeResource extends BaremetalPxeResourceBase {
sshConnection.connect(null, 60000, 60000);
if (!sshConnection.authenticateWithPassword(_username, _password)) {
logger.debug("SSH Failed to authenticate");
throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password));
logger.debug("SSH Failed to authenticate with user {} credentials", _username);
throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s", _ip, _username));
}
String script = String.format("python /usr/bin/baremetal_user_data.py '%s'", arg);
@ -167,7 +167,7 @@ public class BaremetalKickStartPxeResource extends BaremetalPxeResourceBase {
sshConnection.connect(null, 60000, 60000);
if (!sshConnection.authenticateWithPassword(_username, _password)) {
logger.debug("SSH Failed to authenticate");
throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password));
throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s", _ip, _username));
}
String copyTo = String.format("%s/%s", _tftpDir, cmd.getTemplateUuid());

View File

@ -101,7 +101,7 @@ public class BaremetalPingPxeResource extends BaremetalPxeResourceBase {
sshConnection.connect(null, 60000, 60000);
if (!sshConnection.authenticateWithPassword(_username, _password)) {
logger.debug("SSH Failed to authenticate");
throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, "******"));
throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=******", _ip, _username));
}
String cmd = String.format("[ -f /%1$s/pxelinux.0 ] && [ -f /%2$s/kernel ] && [ -f /%3$s/initrd.gz ] ", _tftpDir, _tftpDir, _tftpDir);
@ -150,8 +150,8 @@ public class BaremetalPingPxeResource extends BaremetalPxeResourceBase {
try {
sshConnection.connect(null, 60000, 60000);
if (!sshConnection.authenticateWithPassword(_username, _password)) {
logger.debug("SSH Failed to authenticate");
throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password));
logger.debug("SSH Failed to authenticate with user {} credentials", _username);
throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s", _ip, _username));
}
String script =
@ -179,7 +179,7 @@ public class BaremetalPingPxeResource extends BaremetalPxeResourceBase {
sshConnection.connect(null, 60000, 60000);
if (!sshConnection.authenticateWithPassword(_username, _password)) {
logger.debug("SSH Failed to authenticate");
throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password));
throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s", _ip, _username));
}
String script =
@ -237,7 +237,7 @@ public class BaremetalPingPxeResource extends BaremetalPxeResourceBase {
sshConnection.connect(null, 60000, 60000);
if (!sshConnection.authenticateWithPassword(_username, _password)) {
logger.debug("SSH Failed to authenticate");
throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password));
throw new ConfigurationException(String.format("Cannot connect to PING PXE server(IP=%1$s, username=%2$s", _ip, _username));
}
String script = String.format("python /usr/bin/baremetal_user_data.py '%s'", arg);

View File

@ -232,7 +232,7 @@ public class BridgeVifDriver extends VifDriverBase {
String brName = createVnetBr(vNetId, trafficLabel, protocol);
intf.defBridgeNet(brName, null, nic.getMac(), getGuestNicModel(guestOsType, nicAdapter), networkRateKBps);
} else {
String brName = createVnetBr(vNetId, _bridges.get("private"), protocol);
String brName = createVnetBr(vNetId, _bridges.get("guest"), protocol);
intf.defBridgeNet(brName, null, nic.getMac(), getGuestNicModel(guestOsType, nicAdapter), networkRateKBps);
}
} else {

View File

@ -4371,12 +4371,11 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
String dataDiskController = details.get(VmDetailConstants.DATA_DISK_CONTROLLER);
if (StringUtils.isNotBlank(dataDiskController)) {
LOGGER.debug("Passed custom disk controller for DATA disk " + dataDiskController);
for (DiskDef.DiskBus bus : DiskDef.DiskBus.values()) {
if (bus.toString().equalsIgnoreCase(dataDiskController)) {
LOGGER.debug("Found matching enum for disk controller for DATA disk " + dataDiskController);
return bus;
}
LOGGER.debug("Passed custom disk controller for DATA disk {}", dataDiskController);
DiskDef.DiskBus bus = DiskDef.DiskBus.fromValue(dataDiskController);
if (bus != null) {
LOGGER.debug("Found matching enum for disk controller for DATA disk {}", dataDiskController);
return bus;
}
}
return null;

View File

@ -111,7 +111,9 @@ public class LibvirtDomainXMLParser {
def.defNetworkBasedDisk(diskPath, host, port, authUserName, poolUuid, diskLabel,
DiskDef.DiskBus.valueOf(bus.toUpperCase()),
DiskDef.DiskProtocol.valueOf(protocol.toUpperCase()), fmt);
def.setCacheMode(DiskDef.DiskCacheMode.valueOf(diskCacheMode.toUpperCase()));
if (StringUtils.isNotBlank(diskCacheMode)) {
def.setCacheMode(DiskDef.DiskCacheMode.valueOf(diskCacheMode.toUpperCase()));
}
} else {
String diskFmtType = getAttrValue("driver", "type", disk);
String diskCacheMode = getAttrValue("driver", "cache", disk);

View File

@ -686,6 +686,15 @@ public class LibvirtVMDef {
_bus = bus;
}
public static DiskBus fromValue(String bus) {
for (DiskBus b : DiskBus.values()) {
if (b.toString().equalsIgnoreCase(bus)) {
return b;
}
}
return null;
}
@Override
public String toString() {
return _bus;

View File

@ -158,7 +158,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
final String target = command.getDestinationIp();
xmlDesc = dm.getXMLDesc(xmlFlag);
if (logger.isDebugEnabled()) {
logger.debug(String.format("VM [%s] with XML configuration [%s] will be migrated to host [%s].", vmName, xmlDesc, target));
logger.debug("VM {} with XML configuration {} will be migrated to host {}.", vmName, maskSensitiveInfoInXML(xmlDesc), target);
}
// Limit the VNC password in case the length is greater than 8 characters
@ -173,7 +173,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
logger.debug(String.format("Editing mount path of ISO from %s to %s", oldIsoVolumePath, newIsoVolumePath));
xmlDesc = replaceDiskSourceFile(xmlDesc, newIsoVolumePath, vmName);
if (logger.isDebugEnabled()) {
logger.debug(String.format("Replaced disk mount point [%s] with [%s] in Instance [%s] XML configuration. New XML configuration is [%s].", oldIsoVolumePath, newIsoVolumePath, vmName, xmlDesc));
logger.debug("Replaced disk mount point {} with {} in Instance {} XML configuration. New XML configuration is {}.", oldIsoVolumePath, newIsoVolumePath, vmName, maskSensitiveInfoInXML(xmlDesc));
}
}
@ -204,11 +204,11 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
if (migrateStorage) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("Changing VM [%s] volumes during migration to host: [%s].", vmName, target));
logger.debug("Changing VM {} volumes during migration to host: {}.", vmName, target);
}
xmlDesc = replaceStorage(xmlDesc, mapMigrateStorage, migrateStorageManaged);
if (logger.isDebugEnabled()) {
logger.debug(String.format("Changed VM [%s] XML configuration of used storage. New XML configuration is [%s].", vmName, xmlDesc));
logger.debug("Changed VM {} XML configuration of used storage. New XML configuration is {}.", vmName, maskSensitiveInfoInXML(xmlDesc));
}
migrateDiskLabels = getMigrateStorageDeviceLabels(disks, mapMigrateStorage);
}
@ -216,11 +216,11 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
Map<String, DpdkTO> dpdkPortsMapping = command.getDpdkInterfaceMapping();
if (MapUtils.isNotEmpty(dpdkPortsMapping)) {
if (logger.isTraceEnabled()) {
logger.trace(String.format("Changing VM [%s] DPDK interfaces during migration to host: [%s].", vmName, target));
logger.trace("Changing VM {} DPDK interfaces during migration to host: {}.", vmName, target);
}
xmlDesc = replaceDpdkInterfaces(xmlDesc, dpdkPortsMapping);
if (logger.isDebugEnabled()) {
logger.debug(String.format("Changed VM [%s] XML configuration of DPDK interfaces. New XML configuration is [%s].", vmName, xmlDesc));
logger.debug("Changed VM {} XML configuration of DPDK interfaces. New XML configuration is {}.", vmName, maskSensitiveInfoInXML(xmlDesc));
}
}
@ -233,7 +233,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
}
//run migration in thread so we can monitor it
logger.info(String.format("Starting live migration of instance [%s] to destination host [%s] having the final XML configuration: [%s].", vmName, dconn.getURI(), xmlDesc));
logger.info("Starting live migration of instance {} to destination host {} having the final XML configuration: {}.", vmName, dconn.getURI(), maskSensitiveInfoInXML(xmlDesc));
final ExecutorService executor = Executors.newFixedThreadPool(1);
boolean migrateNonSharedInc = command.isMigrateNonSharedInc() && !migrateStorageManaged;
@ -243,20 +243,21 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
final Future<Domain> migrateThread = executor.submit(worker);
executor.shutdown();
long sleeptime = 0;
final int migrateDowntime = libvirtComputingResource.getMigrateDowntime();
boolean isMigrateDowntimeSet = false;
while (!executor.isTerminated()) {
Thread.sleep(100);
sleeptime += 100;
if (sleeptime == 1000) { // wait 1s before attempting to set downtime on migration, since I don't know of a VIR_DOMAIN_MIGRATING state
final int migrateDowntime = libvirtComputingResource.getMigrateDowntime();
if (migrateDowntime > 0 ) {
try {
final int setDowntime = dm.migrateSetMaxDowntime(migrateDowntime);
if (setDowntime == 0 ) {
logger.debug("Set max downtime for migration of " + vmName + " to " + String.valueOf(migrateDowntime) + "ms");
}
} catch (final LibvirtException e) {
logger.debug("Failed to set max downtime for migration, perhaps migration completed? Error: " + e.getMessage());
if (!isMigrateDowntimeSet && migrateDowntime > 0 && sleeptime >= 1000) { // wait 1s before attempting to set downtime on migration, since I don't know of a VIR_DOMAIN_MIGRATING state
try {
final int setDowntime = dm.migrateSetMaxDowntime(migrateDowntime);
if (setDowntime == 0 ) {
isMigrateDowntimeSet = true;
logger.debug("Set max downtime for migration of " + vmName + " to " + String.valueOf(migrateDowntime) + "ms");
}
} catch (final LibvirtException e) {
logger.debug("Failed to set max downtime for migration, perhaps migration completed? Error: " + e.getMessage());
}
}
if (sleeptime % 1000 == 0) {
@ -272,7 +273,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
} catch (final LibvirtException e) {
logger.info("Couldn't get VM domain state after " + sleeptime + "ms: " + e.getMessage());
}
if (state != null && state == DomainState.VIR_DOMAIN_RUNNING) {
if (state != null && (state == DomainState.VIR_DOMAIN_RUNNING || state == DomainState.VIR_DOMAIN_PAUSED)) {
try {
DomainJobInfo job = dm.getJobInfo();
logger.info(String.format("Aborting migration of VM [%s] with domain job [%s] due to time out after %d seconds.", vmName, job, migrateWait));
@ -314,6 +315,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
if (logger.isDebugEnabled()) {
logger.debug(String.format("Cleaning the disks of VM [%s] in the source pool after VM migration finished.", vmName));
}
resumeDomainIfPaused(destDomain, vmName);
deleteOrDisconnectDisksOnSourcePool(libvirtComputingResource, migrateDiskInfoList, disks);
libvirtComputingResource.cleanOldSecretsByDiskDef(conn, disks);
}
@ -378,6 +380,28 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
return new MigrateAnswer(command, result == null, result, null);
}
private DomainState getDestDomainState(Domain destDomain, String vmName) {
DomainState dmState = null;
try {
dmState = destDomain.getInfo().state;
} catch (final LibvirtException e) {
logger.info("Failed to get domain state for VM: " + vmName + " due to: " + e.getMessage());
}
return dmState;
}
private void resumeDomainIfPaused(Domain destDomain, String vmName) {
DomainState dmState = getDestDomainState(destDomain, vmName);
if (dmState == DomainState.VIR_DOMAIN_PAUSED) {
logger.info("Resuming VM " + vmName + " on destination after migration");
try {
destDomain.resume();
} catch (final Exception e) {
logger.error("Failed to resume vm " + vmName + " on destination after migration due to : " + e.getMessage());
}
}
}
/**
* Gets the disk labels (vda, vdb...) of the disks mapped for migration on mapMigrateStorage.
* @param diskDefinitions list of all the disksDefinitions of the VM.
@ -575,9 +599,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
graphElem = graphElem.replaceAll("passwd='([^\\s]+)'", "passwd='" + vncPassword + "'");
}
xmlDesc = xmlDesc.replaceAll(GRAPHICS_ELEM_START + CONTENTS_WILDCARD + GRAPHICS_ELEM_END, graphElem);
if (logger.isDebugEnabled()) {
logger.debug(String.format("Replaced the VNC IP address [%s] with [%s] in VM [%s].", originalGraphElem, graphElem, vmName));
}
logger.debug("Replaced the VNC IP address {} with {} in VM {}.", maskSensitiveInfoInXML(originalGraphElem), maskSensitiveInfoInXML(graphElem), vmName);
}
}
return xmlDesc;
@ -910,4 +932,10 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
}
return false;
}
public static String maskSensitiveInfoInXML(String xmlDesc) {
if (xmlDesc == null) return null;
return xmlDesc.replaceAll("(graphics\\s+[^>]*type=['\"]vnc['\"][^>]*passwd=['\"])([^'\"]*)(['\"])",
"$1*****$3");
}
}

View File

@ -80,8 +80,9 @@ public final class LibvirtStartCommandWrapper extends CommandWrapper<StartComman
}
libvirtComputingResource.createVifs(vmSpec, vm);
logger.debug("starting " + vmName + ": " + vm.toString());
if (logger.isDebugEnabled()) {
logger.debug("Starting {} : {}", vmName, LibvirtMigrateCommandWrapper.maskSensitiveInfoInXML(vm.toString()));
}
String vmInitialSpecification = vm.toString();
String vmFinalSpecification = performXmlTransformHook(vmInitialSpecification, libvirtComputingResource);
libvirtComputingResource.startVM(conn, vmName, vmFinalSpecification);

View File

@ -37,7 +37,8 @@ public final class LibvirtUpdateHostPasswordCommandWrapper extends CommandWrappe
final String newPassword = command.getNewPassword();
final Script script = libvirtUtilitiesHelper.buildScript(libvirtComputingResource.getUpdateHostPasswdPath());
script.add(username, newPassword);
script.add(username);
script.addSensitive(newPassword);
final String result = script.execute();
if (result != null) {

View File

@ -1320,26 +1320,27 @@ public class KVMStorageProcessor implements StorageProcessor {
/**
* Attaches or detaches a disk to an instance.
* @param conn libvirt connection
* @param attach boolean that determines whether the device will be attached or detached
* @param vmName instance name
* @param attachingDisk kvm physical disk
* @param devId device id in instance
* @param conn libvirt connection
* @param attach boolean that determines whether the device will be attached or detached
* @param vmName instance name
* @param attachingDisk kvm physical disk
* @param devId device id in instance
* @param serial
* @param bytesReadRate bytes read rate
* @param bytesReadRateMax bytes read rate max
* @param bytesReadRateMaxLength bytes read rate max length
* @param bytesWriteRate bytes write rate
* @param bytesWriteRateMax bytes write rate amx
* @param bytesReadRate bytes read rate
* @param bytesReadRateMax bytes read rate max
* @param bytesReadRateMaxLength bytes read rate max length
* @param bytesWriteRate bytes write rate
* @param bytesWriteRateMax bytes write rate amx
* @param bytesWriteRateMaxLength bytes write rate max length
* @param iopsReadRate iops read rate
* @param iopsReadRateMax iops read rate max
* @param iopsReadRateMaxLength iops read rate max length
* @param iopsWriteRate iops write rate
* @param iopsWriteRateMax iops write rate max
* @param iopsWriteRateMaxLength iops write rate max length
* @param cacheMode cache mode
* @param encryptDetails encrypt details
* @param iopsReadRate iops read rate
* @param iopsReadRateMax iops read rate max
* @param iopsReadRateMaxLength iops read rate max length
* @param iopsWriteRate iops write rate
* @param iopsWriteRateMax iops write rate max
* @param iopsWriteRateMaxLength iops write rate max length
* @param cacheMode cache mode
* @param encryptDetails encrypt details
* @param controllerInfo
* @throws LibvirtException
* @throws InternalErrorException
*/
@ -1347,37 +1348,38 @@ public class KVMStorageProcessor implements StorageProcessor {
final String serial, final Long bytesReadRate, final Long bytesReadRateMax, final Long bytesReadRateMaxLength,
final Long bytesWriteRate, final Long bytesWriteRateMax, final Long bytesWriteRateMaxLength, final Long iopsReadRate,
final Long iopsReadRateMax, final Long iopsReadRateMaxLength, final Long iopsWriteRate, final Long iopsWriteRateMax,
final Long iopsWriteRateMaxLength, final String cacheMode, final DiskDef.LibvirtDiskEncryptDetails encryptDetails, Map<String, String> details)
final Long iopsWriteRateMaxLength, final String cacheMode, final DiskDef.LibvirtDiskEncryptDetails encryptDetails, Map<String, String> details, Map<String, String> controllerInfo)
throws LibvirtException, InternalErrorException {
attachOrDetachDisk(conn, attach, vmName, attachingDisk, devId, serial, bytesReadRate, bytesReadRateMax, bytesReadRateMaxLength,
bytesWriteRate, bytesWriteRateMax, bytesWriteRateMaxLength, iopsReadRate, iopsReadRateMax, iopsReadRateMaxLength, iopsWriteRate,
iopsWriteRateMax, iopsWriteRateMaxLength, cacheMode, encryptDetails, 0l, details);
iopsWriteRateMax, iopsWriteRateMaxLength, cacheMode, encryptDetails, 0l, details, controllerInfo);
}
/**
*
* Attaches or detaches a disk to an instance.
* @param conn libvirt connection
* @param attach boolean that determines whether the device will be attached or detached
* @param vmName instance name
* @param attachingDisk kvm physical disk
* @param devId device id in instance
* @param conn libvirt connection
* @param attach boolean that determines whether the device will be attached or detached
* @param vmName instance name
* @param attachingDisk kvm physical disk
* @param devId device id in instance
* @param serial
* @param bytesReadRate bytes read rate
* @param bytesReadRateMax bytes read rate max
* @param bytesReadRateMaxLength bytes read rate max length
* @param bytesWriteRate bytes write rate
* @param bytesWriteRateMax bytes write rate amx
* @param bytesReadRate bytes read rate
* @param bytesReadRateMax bytes read rate max
* @param bytesReadRateMaxLength bytes read rate max length
* @param bytesWriteRate bytes write rate
* @param bytesWriteRateMax bytes write rate amx
* @param bytesWriteRateMaxLength bytes write rate max length
* @param iopsReadRate iops read rate
* @param iopsReadRateMax iops read rate max
* @param iopsReadRateMaxLength iops read rate max length
* @param iopsWriteRate iops write rate
* @param iopsWriteRateMax iops write rate max
* @param iopsWriteRateMaxLength iops write rate max length
* @param cacheMode cache mode
* @param encryptDetails encrypt details
* @param waitDetachDevice value set in milliseconds to wait before assuming device removal failed
* @param iopsReadRate iops read rate
* @param iopsReadRateMax iops read rate max
* @param iopsReadRateMaxLength iops read rate max length
* @param iopsWriteRate iops write rate
* @param iopsWriteRateMax iops write rate max
* @param iopsWriteRateMaxLength iops write rate max length
* @param cacheMode cache mode
* @param encryptDetails encrypt details
* @param waitDetachDevice value set in milliseconds to wait before assuming device removal failed
* @param controllerInfo
* @throws LibvirtException
* @throws InternalErrorException
*/
@ -1386,7 +1388,7 @@ public class KVMStorageProcessor implements StorageProcessor {
final Long bytesWriteRate, final Long bytesWriteRateMax, final Long bytesWriteRateMaxLength, final Long iopsReadRate,
final Long iopsReadRateMax, final Long iopsReadRateMaxLength, final Long iopsWriteRate, final Long iopsWriteRateMax,
final Long iopsWriteRateMaxLength, final String cacheMode, final DiskDef.LibvirtDiskEncryptDetails encryptDetails,
long waitDetachDevice, Map<String, String> details)
long waitDetachDevice, Map<String, String> details, Map<String, String> controllerInfo)
throws LibvirtException, InternalErrorException {
List<DiskDef> disks = null;
@ -1423,17 +1425,7 @@ public class KVMStorageProcessor implements StorageProcessor {
return;
}
} else {
DiskDef.DiskBus busT = DiskDef.DiskBus.VIRTIO;
for (final DiskDef disk : disks) {
if (disk.getDeviceType() == DeviceType.DISK) {
if (disk.getBusType() == DiskDef.DiskBus.SCSI) {
busT = DiskDef.DiskBus.SCSI;
} else if (disk.getBusType() == DiskDef.DiskBus.VIRTIOBLK) {
busT = DiskDef.DiskBus.VIRTIOBLK;
}
break;
}
}
DiskDef.DiskBus busT = getAttachDiskBusType(devId, disks, controllerInfo);
diskdef = new DiskDef();
if (busT == DiskDef.DiskBus.SCSI || busT == DiskDef.DiskBus.VIRTIOBLK) {
diskdef.setQemuDriver(true);
@ -1538,6 +1530,28 @@ public class KVMStorageProcessor implements StorageProcessor {
}
}
protected DiskDef.DiskBus getAttachDiskBusType(int deviceId, List<DiskDef> disks, Map<String, String> controllerInfo) {
String controllerKey = deviceId == 0 ? VmDetailConstants.ROOT_DISK_CONTROLLER : VmDetailConstants.DATA_DISK_CONTROLLER;
String diskController = MapUtils.getString(controllerInfo, controllerKey);
DiskDef.DiskBus busType = DiskDef.DiskBus.fromValue(diskController);
if (diskController != null) {
logger.debug("Using controller '{}' from command specified as {} while attaching disk (deviceId={})",
diskController, controllerKey, deviceId);
return busType;
}
for (final DiskDef disk : disks) {
if (disk.getDeviceType() != DeviceType.DISK) {
continue;
}
if (disk.getBusType() == DiskDef.DiskBus.SCSI) {
return DiskDef.DiskBus.SCSI;
} else if (disk.getBusType() == DiskDef.DiskBus.VIRTIOBLK) {
return DiskDef.DiskBus.VIRTIOBLK;
}
}
return DiskDef.DiskBus.VIRTIO;
}
@Override
public Answer attachVolume(final AttachCommand cmd) {
final DiskTO disk = cmd.getDisk();
@ -1565,7 +1579,8 @@ public class KVMStorageProcessor implements StorageProcessor {
vol.getBytesReadRate(), vol.getBytesReadRateMax(), vol.getBytesReadRateMaxLength(),
vol.getBytesWriteRate(), vol.getBytesWriteRateMax(), vol.getBytesWriteRateMaxLength(),
vol.getIopsReadRate(), vol.getIopsReadRateMax(), vol.getIopsReadRateMaxLength(),
vol.getIopsWriteRate(), vol.getIopsWriteRateMax(), vol.getIopsWriteRateMaxLength(), volCacheMode, encryptDetails, disk.getDetails());
vol.getIopsWriteRate(), vol.getIopsWriteRateMax(), vol.getIopsWriteRateMaxLength(), volCacheMode,
encryptDetails, disk.getDetails(), cmd.getControllerInfo());
return new AttachAnswer(disk);
} catch (final LibvirtException e) {
@ -1602,7 +1617,7 @@ public class KVMStorageProcessor implements StorageProcessor {
vol.getBytesReadRate(), vol.getBytesReadRateMax(), vol.getBytesReadRateMaxLength(),
vol.getBytesWriteRate(), vol.getBytesWriteRateMax(), vol.getBytesWriteRateMaxLength(),
vol.getIopsReadRate(), vol.getIopsReadRateMax(), vol.getIopsReadRateMaxLength(),
vol.getIopsWriteRate(), vol.getIopsWriteRateMax(), vol.getIopsWriteRateMaxLength(), volCacheMode, null, waitDetachDevice, null);
vol.getIopsWriteRate(), vol.getIopsWriteRateMax(), vol.getIopsWriteRateMaxLength(), volCacheMode, null, waitDetachDevice, null, null);
storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath());

View File

@ -589,7 +589,7 @@ public class LibvirtMigrateCommandWrapperTest {
@Test
public void testReplaceIpForVNCInDescFile() {
final String targetIp = "192.168.22.21";
final String result = libvirtMigrateCmdWrapper.replaceIpForVNCInDescFileAndNormalizePassword(fullfile, targetIp, null, "");
final String result = libvirtMigrateCmdWrapper.replaceIpForVNCInDescFileAndNormalizePassword(fullfile, targetIp, "vncSecretPwd", "");
assertEquals("transformation does not live up to expectation:\n" + result, targetfile, result);
}
@ -1019,4 +1019,28 @@ public class LibvirtMigrateCommandWrapperTest {
Assert.assertTrue(finalXml.contains(newIsoVolumePath));
}
@Test
public void testMaskVncPwdDomain() {
// Test case 1: Single quotes
String xml1 = "<graphics type='vnc' port='5900' passwd='secret123'/>";
String expected1 = "<graphics type='vnc' port='5900' passwd='*****'/>";
assertEquals(expected1, LibvirtMigrateCommandWrapper.maskSensitiveInfoInXML(xml1));
// Test case 2: Double quotes
String xml2 = "<graphics type=\"vnc\" port=\"5901\" passwd=\"mypassword\"/>";
String expected2 = "<graphics type=\"vnc\" port=\"5901\" passwd=\"*****\"/>";
assertEquals(expected2, LibvirtMigrateCommandWrapper.maskSensitiveInfoInXML(xml2));
// Test case 3: Non-VNC graphics (should remain unchanged)
String xml3 = "<graphics type='spice' port='5902' passwd='notvnc'/>";
assertEquals(xml3, LibvirtMigrateCommandWrapper.maskSensitiveInfoInXML(xml3));
// Test case 4: Multiple VNC entries in one string
String xml4 = "<graphics type='vnc' port='5900' passwd='a'/>\n" +
"<graphics type='vnc' port='5901' passwd='b'/>";
String expected4 = "<graphics type='vnc' port='5900' passwd='*****'/>\n" +
"<graphics type='vnc' port='5901' passwd='*****'/>";
assertEquals(expected4, LibvirtMigrateCommandWrapper.maskSensitiveInfoInXML(xml4));
}
}

View File

@ -362,7 +362,7 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
sshConnection = SSHCmdHelper.acquireAuthorizedConnection(_ip, _username, _password);
if (sshConnection == null) {
throw new CloudRuntimeException(String.format("Cannot connect to ovm host(IP=%1$s, username=%2$s, password=%3$s", _ip, _username, _password));
throw new CloudRuntimeException(String.format("Cannot connect to ovm host(IP=%1$s, username=%2$s)", _ip, _username));
}
if (!SSHCmdHelper.sshExecuteCmd(sshConnection, "sh /usr/bin/configureOvm.sh postSetup")) {

View File

@ -45,9 +45,9 @@ public final class CitrixUpdateHostPasswordCommandWrapper extends CommandWrapper
Pair<Boolean, String> result;
try {
logger.debug("Executing command in Host: " + cmdLine);
logger.debug("Executing password update command on host: {} for user: {}", hostIp, username);
final String hostPassword = citrixResourceBase.getPwdFromQueue();
result = xenServerUtilitiesHelper.executeSshWrapper(hostIp, 22, username, null, hostPassword, cmdLine.toString());
result = xenServerUtilitiesHelper.executeSshWrapper(hostIp, 22, username, null, hostPassword, cmdLine);
} catch (final Exception e) {
return new Answer(command, false, e.getMessage());
}

View File

@ -973,9 +973,12 @@ public class KubernetesClusterManagerImpl extends ManagerBase implements Kuberne
CallContext networkContext = CallContext.register(CallContext.current(), ApiCommandResourceType.Network);
try {
Long zoneId = zone.getId();
Integer publicMTU = NetworkService.VRPublicInterfaceMtu.valueIn(zoneId);
Integer privateMTU = NetworkService.VRPrivateInterfaceMtu.valueIn(zoneId);
network = networkService.createGuestNetwork(networkOffering.getId(), clusterName + "-network",
owner.getAccountName() + "-network", owner, physicalNetwork, zone.getId(),
ControlledEntity.ACLType.Account);
owner.getAccountName() + "-network", owner, physicalNetwork, zoneId,
ControlledEntity.ACLType.Account, new Pair<>(publicMTU, privateMTU));
} catch (ConcurrentOperationException | InsufficientCapacityException | ResourceAllocationException e) {
logAndThrow(Level.ERROR, String.format("Unable to create network for the Kubernetes cluster: %s", clusterName));
} finally {

View File

@ -33,6 +33,9 @@ import org.apache.cloudstack.api.command.user.kubernetes.version.ListKubernetesS
import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import com.cloud.api.query.dao.TemplateJoinDao;
@ -53,6 +56,7 @@ import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VMTemplateZoneDao;
import com.cloud.template.TemplateApiService;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.user.Account;
import com.cloud.user.AccountManager;
import com.cloud.utils.Pair;
import com.cloud.utils.component.ComponentContext;
@ -80,12 +84,14 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne
@Inject
private DataCenterDao dataCenterDao;
@Inject
private ImageStoreDao imageStoreDao;
@Inject
private TemplateApiService templateService;
public static final String MINIMUN_AUTOSCALER_SUPPORTED_VERSION = "1.15.0";
protected void updateTemplateDetailsInKubernetesSupportedVersionResponse(
final KubernetesSupportedVersion kubernetesSupportedVersion, KubernetesSupportedVersionResponse response) {
final KubernetesSupportedVersion kubernetesSupportedVersion, KubernetesSupportedVersionResponse response, boolean isRootAdmin) {
TemplateJoinVO template = templateJoinDao.findById(kubernetesSupportedVersion.getIsoId());
if (template == null) {
return;
@ -95,11 +101,14 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne
if (template.getState() != null) {
response.setIsoState(template.getState().toString());
}
if (isRootAdmin) {
response.setIsoUrl(template.getUrl());
}
response.setIsoArch(template.getArch().getType());
response.setDirectDownload(template.isDirectDownload());
}
private KubernetesSupportedVersionResponse createKubernetesSupportedVersionResponse(final KubernetesSupportedVersion kubernetesSupportedVersion) {
private KubernetesSupportedVersionResponse createKubernetesSupportedVersionResponse(final KubernetesSupportedVersion kubernetesSupportedVersion, boolean isRootAdmin) {
KubernetesSupportedVersionResponse response = new KubernetesSupportedVersionResponse();
response.setObjectName("kubernetessupportedversion");
response.setId(kubernetesSupportedVersion.getUuid());
@ -118,7 +127,7 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne
response.setSupportsHA(compareSemanticVersions(kubernetesSupportedVersion.getSemanticVersion(),
KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT)>=0);
response.setSupportsAutoscaling(versionSupportsAutoscaling(kubernetesSupportedVersion));
updateTemplateDetailsInKubernetesSupportedVersionResponse(kubernetesSupportedVersion, response);
updateTemplateDetailsInKubernetesSupportedVersionResponse(kubernetesSupportedVersion, response, isRootAdmin);
response.setCreated(kubernetesSupportedVersion.getCreated());
return response;
}
@ -126,8 +135,11 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne
private ListResponse<KubernetesSupportedVersionResponse> createKubernetesSupportedVersionListResponse(
List<KubernetesSupportedVersionVO> versions, Integer count) {
List<KubernetesSupportedVersionResponse> responseList = new ArrayList<>();
Account caller = CallContext.current().getCallingAccount();
boolean isRootAdmin = accountManager.isRootAdmin(caller.getId());
for (KubernetesSupportedVersionVO version : versions) {
responseList.add(createKubernetesSupportedVersionResponse(version));
responseList.add(createKubernetesSupportedVersionResponse(version, isRootAdmin));
}
ListResponse<KubernetesSupportedVersionResponse> response = new ListResponse<>();
response.setResponses(responseList, count);
@ -316,6 +328,32 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne
return createKubernetesSupportedVersionListResponse(versions, versionsAndCount.second());
}
private void validateImageStoreForZone(Long zoneId, boolean directDownload) {
if (directDownload) {
return;
}
if (zoneId != null) {
List<ImageStoreVO> imageStores = imageStoreDao.listStoresByZoneId(zoneId);
if (CollectionUtils.isEmpty(imageStores)) {
DataCenterVO zone = dataCenterDao.findById(zoneId);
String zoneName = zone != null ? zone.getName() : String.valueOf(zoneId);
throw new InvalidParameterValueException(String.format("Unable to register Kubernetes version ISO. No image store available in zone: %s", zoneName));
}
} else {
List<DataCenterVO> zones = dataCenterDao.listAllZones();
List<String> zonesWithoutStorage = new ArrayList<>();
for (DataCenterVO zone : zones) {
List<ImageStoreVO> imageStores = imageStoreDao.listStoresByZoneId(zone.getId());
if (CollectionUtils.isEmpty(imageStores)) {
zonesWithoutStorage.add(zone.getName());
}
}
if (!zonesWithoutStorage.isEmpty()) {
throw new InvalidParameterValueException(String.format("Unable to register Kubernetes version ISO for all zones. The following zones have no image store: %s", String.join(", ", zonesWithoutStorage)));
}
}
}
@Override
@ActionEvent(eventType = KubernetesVersionEventTypes.EVENT_KUBERNETES_VERSION_ADD,
eventDescription = "Adding Kubernetes supported version")
@ -361,6 +399,8 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne
}
}
validateImageStoreForZone(zoneId, isDirectDownload);
VMTemplateVO template = null;
try {
VirtualMachineTemplate vmTemplate = registerKubernetesVersionIso(zoneId, name, isoUrl, isoChecksum, isDirectDownload, arch);
@ -374,7 +414,7 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne
supportedVersionVO = kubernetesSupportedVersionDao.persist(supportedVersionVO);
CallContext.current().putContextParameter(KubernetesSupportedVersion.class, supportedVersionVO.getUuid());
return createKubernetesSupportedVersionResponse(supportedVersionVO);
return createKubernetesSupportedVersionResponse(supportedVersionVO, true);
}
@Override
@ -435,7 +475,7 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne
}
version = kubernetesSupportedVersionDao.findById(versionId);
}
return createKubernetesSupportedVersionResponse(version);
return createKubernetesSupportedVersionResponse(version, true);
}
@Override

View File

@ -50,6 +50,10 @@ public class KubernetesSupportedVersionResponse extends BaseResponse {
@Param(description = "The name of the binaries ISO for Kubernetes supported version")
private String isoName;
@SerializedName(ApiConstants.ISO_URL)
@Param(description = "the URL of the binaries ISO for Kubernetes supported version")
private String isoUrl;
@SerializedName(ApiConstants.ISO_STATE)
@Param(description = "The state of the binaries ISO for Kubernetes supported version")
private String isoState;
@ -134,6 +138,14 @@ public class KubernetesSupportedVersionResponse extends BaseResponse {
this.isoName = isoName;
}
public String getIsoUrl() {
return isoUrl;
}
public void setIsoUrl(String isoUrl) {
this.isoUrl = isoUrl;
}
public String getIsoState() {
return isoState;
}

View File

@ -16,10 +16,15 @@
// under the License.
package com.cloud.kubernetes.version;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.UUID;
import org.apache.cloudstack.api.response.KubernetesSupportedVersionResponse;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
@ -32,6 +37,9 @@ import org.springframework.test.util.ReflectionTestUtils;
import com.cloud.api.query.dao.TemplateJoinDao;
import com.cloud.api.query.vo.TemplateJoinVO;
import com.cloud.cpu.CPU;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.exception.InvalidParameterValueException;
@RunWith(MockitoJUnitRunner.class)
public class KubernetesVersionManagerImplTest {
@ -39,6 +47,12 @@ public class KubernetesVersionManagerImplTest {
@Mock
TemplateJoinDao templateJoinDao;
@Mock
ImageStoreDao imageStoreDao;
@Mock
DataCenterDao dataCenterDao;
@InjectMocks
KubernetesVersionManagerImpl kubernetesVersionManager = new KubernetesVersionManagerImpl();
@ -48,7 +62,7 @@ public class KubernetesVersionManagerImplTest {
Mockito.when(kubernetesSupportedVersion.getIsoId()).thenReturn(1L);
KubernetesSupportedVersionResponse response = new KubernetesSupportedVersionResponse();
kubernetesVersionManager.updateTemplateDetailsInKubernetesSupportedVersionResponse(kubernetesSupportedVersion,
response);
response, true);
Assert.assertNull(ReflectionTestUtils.getField(response, "isoId"));
}
@ -63,13 +77,71 @@ public class KubernetesVersionManagerImplTest {
Mockito.when(templateJoinVO.getUuid()).thenReturn(uuid);
Mockito.when(templateJoinDao.findById(1L)).thenReturn(templateJoinVO);
kubernetesVersionManager.updateTemplateDetailsInKubernetesSupportedVersionResponse(kubernetesSupportedVersion,
response);
response, true);
Assert.assertEquals(uuid, ReflectionTestUtils.getField(response, "isoId"));
Assert.assertNull(ReflectionTestUtils.getField(response, "isoState"));
ObjectInDataStoreStateMachine.State state = ObjectInDataStoreStateMachine.State.Ready;
Mockito.when(templateJoinVO.getState()).thenReturn(state);
kubernetesVersionManager.updateTemplateDetailsInKubernetesSupportedVersionResponse(kubernetesSupportedVersion,
response);
response, true);
Assert.assertEquals(state.toString(), ReflectionTestUtils.getField(response, "isoState"));
}
@Test
public void testValidateImageStoreForZoneWithDirectDownload() {
ReflectionTestUtils.invokeMethod(kubernetesVersionManager, "validateImageStoreForZone", 1L, true);
}
@Test
public void testValidateImageStoreForZoneWithValidZone() {
Long zoneId = 1L;
List<ImageStoreVO> imageStores = Collections.singletonList(Mockito.mock(ImageStoreVO.class));
Mockito.when(imageStoreDao.listStoresByZoneId(zoneId)).thenReturn(imageStores);
ReflectionTestUtils.invokeMethod(kubernetesVersionManager, "validateImageStoreForZone", zoneId, false);
}
@Test(expected = InvalidParameterValueException.class)
public void testValidateImageStoreForZoneWithNoImageStore() {
Long zoneId = 1L;
DataCenterVO zone = Mockito.mock(DataCenterVO.class);
Mockito.when(zone.getName()).thenReturn("test-zone");
Mockito.when(dataCenterDao.findById(zoneId)).thenReturn(zone);
Mockito.when(imageStoreDao.listStoresByZoneId(zoneId)).thenReturn(Collections.emptyList());
ReflectionTestUtils.invokeMethod(kubernetesVersionManager, "validateImageStoreForZone", zoneId, false);
}
@Test
public void testValidateImageStoreForAllZonesWithAllValid() {
DataCenterVO zone1 = Mockito.mock(DataCenterVO.class);
Mockito.when(zone1.getId()).thenReturn(1L);
DataCenterVO zone2 = Mockito.mock(DataCenterVO.class);
Mockito.when(zone2.getId()).thenReturn(2L);
List<DataCenterVO> zones = Arrays.asList(zone1, zone2);
Mockito.when(dataCenterDao.listAllZones()).thenReturn(zones);
List<ImageStoreVO> imageStores = Collections.singletonList(Mockito.mock(ImageStoreVO.class));
Mockito.when(imageStoreDao.listStoresByZoneId(1L)).thenReturn(imageStores);
Mockito.when(imageStoreDao.listStoresByZoneId(2L)).thenReturn(imageStores);
ReflectionTestUtils.invokeMethod(kubernetesVersionManager, "validateImageStoreForZone", (Long) null, false);
}
@Test(expected = InvalidParameterValueException.class)
public void testValidateImageStoreForAllZonesWithSomeMissingStorage() {
DataCenterVO zone1 = Mockito.mock(DataCenterVO.class);
Mockito.when(zone1.getId()).thenReturn(1L);
DataCenterVO zone2 = Mockito.mock(DataCenterVO.class);
Mockito.when(zone2.getId()).thenReturn(2L);
Mockito.when(zone2.getName()).thenReturn("zone-without-storage");
List<DataCenterVO> zones = Arrays.asList(zone1, zone2);
Mockito.when(dataCenterDao.listAllZones()).thenReturn(zones);
List<ImageStoreVO> imageStores = Collections.singletonList(Mockito.mock(ImageStoreVO.class));
Mockito.when(imageStoreDao.listStoresByZoneId(1L)).thenReturn(imageStores);
Mockito.when(imageStoreDao.listStoresByZoneId(2L)).thenReturn(Collections.emptyList());
ReflectionTestUtils.invokeMethod(kubernetesVersionManager, "validateImageStoreForZone", (Long) null, false);
}
}

View File

@ -17,6 +17,9 @@
package com.cloud.kubernetes.version;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertNotNull;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.when;
import java.lang.reflect.Field;
@ -25,6 +28,11 @@ import java.util.List;
import java.util.UUID;
import com.cloud.cpu.CPU;
import com.cloud.user.Account;
import com.cloud.user.AccountManager;
import com.cloud.user.AccountVO;
import com.cloud.user.User;
import com.cloud.user.UserVO;
import org.apache.cloudstack.api.command.admin.kubernetes.version.AddKubernetesSupportedVersionCmd;
import org.apache.cloudstack.api.command.admin.kubernetes.version.DeleteKubernetesSupportedVersionCmd;
import org.apache.cloudstack.api.command.admin.kubernetes.version.UpdateKubernetesSupportedVersionCmd;
@ -63,11 +71,6 @@ import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.template.TemplateApiService;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.user.Account;
import com.cloud.user.AccountManager;
import com.cloud.user.AccountVO;
import com.cloud.user.User;
import com.cloud.user.UserVO;
import com.cloud.utils.Pair;
import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.db.Filter;
@ -75,6 +78,9 @@ import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
@RunWith(MockitoJUnitRunner.class)
public class KubernetesVersionServiceTest {
@ -94,7 +100,11 @@ public class KubernetesVersionServiceTest {
@Mock
private DataCenterDao dataCenterDao;
@Mock
private ImageStoreDao imageStoreDao;
@Mock
private TemplateApiService templateService;
@Mock
private Account accountMock;
AutoCloseable closeable;
@ -123,7 +133,12 @@ public class KubernetesVersionServiceTest {
DataCenterVO zone = Mockito.mock(DataCenterVO.class);
when(dataCenterDao.findById(Mockito.anyLong())).thenReturn(zone);
List<ImageStoreVO> imageStores = new ArrayList<>();
imageStores.add(Mockito.mock(ImageStoreVO.class));
when(imageStoreDao.listStoresByZoneId(Mockito.anyLong())).thenReturn(imageStores);
TemplateJoinVO templateJoinVO = Mockito.mock(TemplateJoinVO.class);
when(templateJoinVO.getUrl()).thenReturn("https://download.cloudstack.com");
when(templateJoinVO.getState()).thenReturn(ObjectInDataStoreStateMachine.State.Ready);
when(templateJoinVO.getArch()).thenReturn(CPU.CPUArch.getDefault());
when(templateJoinDao.findById(Mockito.anyLong())).thenReturn(templateJoinVO);
@ -140,19 +155,66 @@ public class KubernetesVersionServiceTest {
@Test
public void listKubernetesSupportedVersionsTest() {
ListKubernetesSupportedVersionsCmd cmd = Mockito.mock(ListKubernetesSupportedVersionsCmd.class);
List<KubernetesSupportedVersionVO> versionVOs = new ArrayList<>();
KubernetesSupportedVersionVO versionVO = Mockito.mock(KubernetesSupportedVersionVO.class);
when(versionVO.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION);
versionVOs.add(versionVO);
when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(versionVO);
when(kubernetesSupportedVersionDao.searchAndCount(Mockito.any(SearchCriteria.class),
Mockito.any(Filter.class))).thenReturn(new Pair<>(versionVOs, versionVOs.size()));
ListResponse<KubernetesSupportedVersionResponse> versionsResponse =
kubernetesVersionService.listKubernetesSupportedVersions(cmd);
Assert.assertEquals(versionVOs.size(), versionsResponse.getCount().intValue());
Assert.assertTrue(CollectionUtils.isNotEmpty(versionsResponse.getResponses()));
Assert.assertEquals(versionVOs.size(), versionsResponse.getResponses().size());
CallContext callContextMock = Mockito.mock(CallContext.class);
try (MockedStatic<CallContext> callContextMockedStatic = Mockito.mockStatic(CallContext.class)) {
callContextMockedStatic.when(CallContext::current).thenReturn(callContextMock);
final SearchCriteria<KubernetesSupportedVersionVO> versionSearchCriteria = Mockito.mock(SearchCriteria.class);
when(callContextMock.getCallingAccount()).thenReturn(accountMock);
ListKubernetesSupportedVersionsCmd cmd = Mockito.mock(ListKubernetesSupportedVersionsCmd.class);
List<KubernetesSupportedVersionVO> versionVOs = new ArrayList<>();
KubernetesSupportedVersionVO versionVO = Mockito.mock(KubernetesSupportedVersionVO.class);
when(versionVO.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION);
versionVOs.add(versionVO);
when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(versionVO);
when(kubernetesSupportedVersionDao.searchAndCount(Mockito.any(), Mockito.any(Filter.class)))
.thenReturn(new Pair<>(versionVOs, versionVOs.size()));
ListResponse<KubernetesSupportedVersionResponse> versionsResponse =
kubernetesVersionService.listKubernetesSupportedVersions(cmd);
Assert.assertEquals(versionVOs.size(), versionsResponse.getCount().intValue());
Assert.assertTrue(CollectionUtils.isNotEmpty(versionsResponse.getResponses()));
Assert.assertEquals(versionVOs.size(), versionsResponse.getResponses().size());
}
}
@Test
public void listKubernetesSupportedVersionsTestWhenAdmin() {
CallContext callContextMock = Mockito.mock(CallContext.class);
try (MockedStatic<CallContext> callContextMockedStatic = Mockito.mockStatic(CallContext.class)) {
callContextMockedStatic.when(CallContext::current).thenReturn(callContextMock);
ListKubernetesSupportedVersionsCmd cmd = Mockito.mock(ListKubernetesSupportedVersionsCmd.class);
List<KubernetesSupportedVersionVO> versionVOs = new ArrayList<>();
KubernetesSupportedVersionVO versionVO = Mockito.mock(KubernetesSupportedVersionVO.class);
when(versionVO.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION);
versionVOs.add(versionVO);
when(callContextMock.getCallingAccount()).thenReturn(accountMock);
when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(versionVO);
when(kubernetesSupportedVersionDao.searchAndCount(Mockito.any(), Mockito.any(Filter.class)))
.thenReturn(new Pair<>(versionVOs, versionVOs.size()));
when(accountManager.isRootAdmin(anyLong())).thenReturn(true);
ListResponse<KubernetesSupportedVersionResponse> response = kubernetesVersionService.listKubernetesSupportedVersions(cmd);
assertNotNull(response.getResponses().get(0).getIsoUrl());
}
}
@Test
public void listKubernetesSupportedVersionsTestWhenOtherUser() {
CallContext callContextMock = Mockito.mock(CallContext.class);
try (MockedStatic<CallContext> callContextMockedStatic = Mockito.mockStatic(CallContext.class)) {
callContextMockedStatic.when(CallContext::current).thenReturn(callContextMock);
ListKubernetesSupportedVersionsCmd cmd = Mockito.mock(ListKubernetesSupportedVersionsCmd.class);
List<KubernetesSupportedVersionVO> versionVOs = new ArrayList<>();
KubernetesSupportedVersionVO versionVO = Mockito.mock(KubernetesSupportedVersionVO.class);
when(versionVO.getSemanticVersion()).thenReturn(KubernetesVersionService.MIN_KUBERNETES_VERSION);
versionVOs.add(versionVO);
when(callContextMock.getCallingAccount()).thenReturn(accountMock);
when(kubernetesSupportedVersionDao.findById(Mockito.anyLong())).thenReturn(versionVO);
when(kubernetesSupportedVersionDao.searchAndCount(Mockito.any(), Mockito.any(Filter.class)))
.thenReturn(new Pair<>(versionVOs, versionVOs.size()));
when(accountManager.isRootAdmin(anyLong())).thenReturn(false);
when(accountMock.getId()).thenReturn(2L);
ListResponse<KubernetesSupportedVersionResponse> response = kubernetesVersionService.listKubernetesSupportedVersions(cmd);
assertNull(response.getResponses().get(0).getIsoUrl());
}
}
@Test(expected = InvalidParameterValueException.class)
@ -224,7 +286,6 @@ public class KubernetesVersionServiceTest {
mockedComponentContext.when(() -> ComponentContext.inject(Mockito.any(RegisterIsoCmd.class))).thenReturn(
new RegisterIsoCmd());
mockedCallContext.when(CallContext::current).thenReturn(callContext);
when(templateService.registerIso(Mockito.any(RegisterIsoCmd.class))).thenReturn(
Mockito.mock(VirtualMachineTemplate.class));
VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class);

View File

@ -563,7 +563,7 @@ public class DateraPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
private long getUsedBytes(StoragePool storagePool, long volumeIdToIgnore) {
long usedSpaceBytes = 0;
List<VolumeVO> lstVolumes = _volumeDao.findByPoolId(storagePool.getId(), null);
List<VolumeVO> lstVolumes = _volumeDao.findNonDestroyedVolumesByPoolId(storagePool.getId(), null);
if (lstVolumes != null) {
for (VolumeVO volume : lstVolumes) {

View File

@ -247,7 +247,7 @@ public class DateraHostListener implements HypervisorHostListener {
List<String> storagePaths = new ArrayList<>();
// If you do not pass in null for the second parameter, you only get back applicable ROOT disks.
List<VolumeVO> volumes = _volumeDao.findByPoolId(storagePoolId, null);
List<VolumeVO> volumes = _volumeDao.findNonDestroyedVolumesByPoolId(storagePoolId, null);
if (volumes != null) {
for (VolumeVO volume : volumes) {
@ -317,7 +317,7 @@ public class DateraHostListener implements HypervisorHostListener {
StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
// If you do not pass in null for the second parameter, you only get back applicable ROOT disks.
List<VolumeVO> volumes = _volumeDao.findByPoolId(storagePoolId, null);
List<VolumeVO> volumes = _volumeDao.findNonDestroyedVolumesByPoolId(storagePoolId, null);
if (volumes != null) {
for (VolumeVO volume : volumes) {

View File

@ -5,6 +5,12 @@ All notable changes to Linstor CloudStack plugin will be documented in this file
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [2026-01-17]
### Added
- Support live migrate from other primary storage
## [2025-12-18]
### Changed

View File

@ -21,33 +21,25 @@ import com.linbit.linstor.api.CloneWaiter;
import com.linbit.linstor.api.DevelopersApi;
import com.linbit.linstor.api.model.ApiCallRc;
import com.linbit.linstor.api.model.ApiCallRcList;
import com.linbit.linstor.api.model.AutoSelectFilter;
import com.linbit.linstor.api.model.LayerType;
import com.linbit.linstor.api.model.Properties;
import com.linbit.linstor.api.model.ResourceDefinition;
import com.linbit.linstor.api.model.ResourceDefinitionCloneRequest;
import com.linbit.linstor.api.model.ResourceDefinitionCloneStarted;
import com.linbit.linstor.api.model.ResourceDefinitionCreate;
import com.linbit.linstor.api.model.ResourceDefinitionModify;
import com.linbit.linstor.api.model.ResourceGroup;
import com.linbit.linstor.api.model.ResourceGroupSpawn;
import com.linbit.linstor.api.model.ResourceMakeAvailable;
import com.linbit.linstor.api.model.ResourceWithVolumes;
import com.linbit.linstor.api.model.Snapshot;
import com.linbit.linstor.api.model.SnapshotRestore;
import com.linbit.linstor.api.model.VolumeDefinition;
import com.linbit.linstor.api.model.VolumeDefinitionModify;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.inject.Inject;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
@ -117,10 +109,9 @@ import org.apache.cloudstack.storage.snapshot.SnapshotObject;
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
import org.apache.cloudstack.storage.to.VolumeObjectTO;
import org.apache.cloudstack.storage.volume.VolumeObject;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.apache.commons.collections.CollectionUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.nio.charset.StandardCharsets;
@ -335,275 +326,11 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
}
}
private void logLinstorAnswer(@Nonnull ApiCallRc answer) {
if (answer.isError()) {
logger.error(answer.getMessage());
} else if (answer.isWarning()) {
logger.warn(answer.getMessage());
} else if (answer.isInfo()) {
logger.info(answer.getMessage());
}
}
private void logLinstorAnswers(@Nonnull ApiCallRcList answers) {
answers.forEach(this::logLinstorAnswer);
}
private void checkLinstorAnswersThrow(@Nonnull ApiCallRcList answers) {
logLinstorAnswers(answers);
if (answers.hasError())
{
String errMsg = answers.stream()
.filter(ApiCallRc::isError)
.findFirst()
.map(ApiCallRc::getMessage).orElse("Unknown linstor error");
throw new CloudRuntimeException(errMsg);
}
}
private String checkLinstorAnswers(@Nonnull ApiCallRcList answers) {
logLinstorAnswers(answers);
LinstorUtil.logLinstorAnswers(answers);
return answers.stream().filter(ApiCallRc::isError).findFirst().map(ApiCallRc::getMessage).orElse(null);
}
private void applyQoSSettings(StoragePoolVO storagePool, DevelopersApi api, String rscName, Long maxIops)
throws ApiException
{
Long currentQosIops = null;
List<VolumeDefinition> vlmDfns = api.volumeDefinitionList(rscName, null, null);
if (!vlmDfns.isEmpty())
{
Properties props = vlmDfns.get(0).getProps();
long iops = Long.parseLong(props.getOrDefault("sys/fs/blkio_throttle_write_iops", "0"));
currentQosIops = iops > 0 ? iops : null;
}
if (!Objects.equals(maxIops, currentQosIops))
{
VolumeDefinitionModify vdm = new VolumeDefinitionModify();
if (maxIops != null)
{
Properties props = new Properties();
props.put("sys/fs/blkio_throttle_read_iops", "" + maxIops);
props.put("sys/fs/blkio_throttle_write_iops", "" + maxIops);
vdm.overrideProps(props);
logger.info("Apply qos setting: " + maxIops + " to " + rscName);
}
else
{
logger.info("Remove QoS setting for " + rscName);
vdm.deleteProps(Arrays.asList("sys/fs/blkio_throttle_read_iops", "sys/fs/blkio_throttle_write_iops"));
}
ApiCallRcList answers = api.volumeDefinitionModify(rscName, 0, vdm);
checkLinstorAnswersThrow(answers);
Long capacityIops = storagePool.getCapacityIops();
if (capacityIops != null)
{
long vcIops = currentQosIops != null ? currentQosIops * -1 : 0;
long vMaxIops = maxIops != null ? maxIops : 0;
long newIops = vcIops + vMaxIops;
capacityIops -= newIops;
logger.info(String.format("Current storagepool %s iops capacity: %d", storagePool, capacityIops));
storagePool.setCapacityIops(Math.max(0, capacityIops));
_storagePoolDao.update(storagePool.getId(), storagePool);
}
}
}
private String getRscGrp(StoragePool storagePool) {
return storagePool.getUserInfo() != null && !storagePool.getUserInfo().isEmpty() ?
storagePool.getUserInfo() : "DfltRscGrp";
}
/**
* Returns the layerlist of the resourceGroup with encryption(LUKS) added above STORAGE.
* If the resourceGroup layer list already contains LUKS this layer list will be returned.
* @param api Linstor developers API
* @param resourceGroup Resource group to get the encryption layer list
* @return layer list with LUKS added
*/
public List<LayerType> getEncryptedLayerList(DevelopersApi api, String resourceGroup) {
try {
List<ResourceGroup> rscGrps = api.resourceGroupList(
Collections.singletonList(resourceGroup), Collections.emptyList(), null, null);
if (CollectionUtils.isEmpty(rscGrps)) {
throw new CloudRuntimeException(
String.format("Resource Group %s not found on Linstor cluster.", resourceGroup));
}
final ResourceGroup rscGrp = rscGrps.get(0);
List<LayerType> layers = Arrays.asList(LayerType.DRBD, LayerType.LUKS, LayerType.STORAGE);
List<String> curLayerStack = rscGrp.getSelectFilter() != null ?
rscGrp.getSelectFilter().getLayerStack() : Collections.emptyList();
if (CollectionUtils.isNotEmpty(curLayerStack)) {
layers = curLayerStack.stream().map(LayerType::valueOf).collect(Collectors.toList());
if (!layers.contains(LayerType.LUKS)) {
layers.add(layers.size() - 1, LayerType.LUKS); // lowest layer is STORAGE
}
}
return layers;
} catch (ApiException e) {
throw new CloudRuntimeException(
String.format("Resource Group %s not found on Linstor cluster.", resourceGroup));
}
}
/**
* Spawns a new Linstor resource with the given arguments.
* @param api
* @param newRscName
* @param sizeInBytes
* @param isTemplate
* @param rscGrpName
* @param volName
* @param vmName
* @throws ApiException
*/
private void spawnResource(
DevelopersApi api, String newRscName, long sizeInBytes, boolean isTemplate, String rscGrpName,
String volName, String vmName, @Nullable Long passPhraseId, @Nullable byte[] passPhrase) throws ApiException
{
ResourceGroupSpawn rscGrpSpawn = new ResourceGroupSpawn();
rscGrpSpawn.setResourceDefinitionName(newRscName);
rscGrpSpawn.addVolumeSizesItem(sizeInBytes / 1024);
if (passPhraseId != null) {
AutoSelectFilter asf = new AutoSelectFilter();
List<LayerType> luksLayers = getEncryptedLayerList(api, rscGrpName);
asf.setLayerStack(luksLayers.stream().map(LayerType::toString).collect(Collectors.toList()));
rscGrpSpawn.setSelectFilter(asf);
if (passPhrase != null) {
String utf8Passphrase = new String(passPhrase, StandardCharsets.UTF_8);
rscGrpSpawn.setVolumePassphrases(Collections.singletonList(utf8Passphrase));
}
}
if (isTemplate) {
Properties props = new Properties();
props.put(LinstorUtil.getTemplateForAuxPropKey(rscGrpName), "true");
rscGrpSpawn.setResourceDefinitionProps(props);
}
logger.info("Linstor: Spawn resource " + newRscName);
ApiCallRcList answers = api.resourceGroupSpawn(rscGrpName, rscGrpSpawn);
checkLinstorAnswersThrow(answers);
answers = LinstorUtil.applyAuxProps(api, newRscName, volName, vmName);
checkLinstorAnswersThrow(answers);
}
/**
* Condition if a template resource can be shared with the given resource group.
* @param tgtRscGrp
* @param tgtLayerStack
* @param rg
* @return True if the template resource can be shared, else false.
*/
private boolean canShareTemplateForResourceGroup(
ResourceGroup tgtRscGrp, List<String> tgtLayerStack, ResourceGroup rg) {
List<String> rgLayerStack = rg.getSelectFilter() != null ?
rg.getSelectFilter().getLayerStack() : null;
return Objects.equals(tgtLayerStack, rgLayerStack) &&
Objects.equals(tgtRscGrp.getSelectFilter().getStoragePoolList(),
rg.getSelectFilter().getStoragePoolList());
}
/**
* Searches for a shareable template for this rscGrpName and sets the aux template property.
* @param api
* @param rscName
* @param rscGrpName
* @param existingRDs
* @return
* @throws ApiException
*/
private boolean foundShareableTemplate(
DevelopersApi api, String rscName, String rscGrpName,
List<Pair<ResourceDefinition, ResourceGroup>> existingRDs) throws ApiException {
if (!existingRDs.isEmpty()) {
ResourceGroup tgtRscGrp = api.resourceGroupList(
Collections.singletonList(rscGrpName), null, null, null).get(0);
List<String> tgtLayerStack = tgtRscGrp.getSelectFilter() != null ?
tgtRscGrp.getSelectFilter().getLayerStack() : null;
// check if there is already a template copy, that we could reuse
// this means if select filters are similar enough to allow cloning from
for (Pair<ResourceDefinition, ResourceGroup> rdPair : existingRDs) {
ResourceGroup rg = rdPair.second();
if (canShareTemplateForResourceGroup(tgtRscGrp, tgtLayerStack, rg)) {
LinstorUtil.setAuxTemplateForProperty(api, rscName, rscGrpName);
return true;
}
}
}
return false;
}
/**
* Creates a new Linstor resource.
* @param rscName
* @param sizeInBytes
* @param volName
* @param vmName
* @param api
* @param rscGrp
* @param poolId
* @param isTemplate indicates if the resource is a template
* @return true if a new resource was created, false if it already existed or was reused.
*/
private boolean createResourceBase(
String rscName, long sizeInBytes, String volName, String vmName,
@Nullable Long passPhraseId, @Nullable byte[] passPhrase, DevelopersApi api,
String rscGrp, long poolId, boolean isTemplate)
{
try
{
logger.debug("createRscBase: {} :: {} :: {}", rscName, rscGrp, isTemplate);
List<Pair<ResourceDefinition, ResourceGroup>> existingRDs = LinstorUtil.getRDAndRGListStartingWith(api, rscName);
String fullRscName = String.format("%s-%d", rscName, poolId);
boolean alreadyCreated = existingRDs.stream()
.anyMatch(p -> p.first().getName().equalsIgnoreCase(fullRscName)) ||
existingRDs.stream().anyMatch(p -> p.first().getProps().containsKey(LinstorUtil.getTemplateForAuxPropKey(rscGrp)));
if (!alreadyCreated) {
boolean createNewRsc = !foundShareableTemplate(api, rscName, rscGrp, existingRDs);
if (createNewRsc) {
String newRscName = existingRDs.isEmpty() ? rscName : fullRscName;
spawnResource(api, newRscName, sizeInBytes, isTemplate, rscGrp,
volName, vmName, passPhraseId, passPhrase);
}
return createNewRsc;
}
return false;
} catch (ApiException apiEx)
{
logger.error("Linstor: ApiEx - " + apiEx.getMessage());
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
}
}
private String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO) {
DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
final String rscGrp = getRscGrp(storagePoolVO);
final String rscName = LinstorUtil.RSC_PREFIX + vol.getUuid();
createResourceBase(
rscName, vol.getSize(), vol.getName(), vol.getAttachedVmName(), vol.getPassphraseId(), vol.getPassphrase(),
linstorApi, rscGrp, storagePoolVO.getId(), false);
try
{
applyQoSSettings(storagePoolVO, linstorApi, rscName, vol.getMaxIops());
return LinstorUtil.getDevicePath(linstorApi, rscName);
} catch (ApiException apiEx)
{
logger.error("Linstor: ApiEx - " + apiEx.getMessage());
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
}
}
private void resizeResource(DevelopersApi api, String resourceName, long sizeByte) throws ApiException {
VolumeDefinitionModify dfm = new VolumeDefinitionModify();
dfm.setSizeKib(sizeByte / 1024);
@ -688,13 +415,14 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
try {
ResourceDefinition templateRD = LinstorUtil.findResourceDefinition(
linstorApi, templateRscName, getRscGrp(storagePoolVO));
linstorApi, templateRscName, LinstorUtil.getRscGrp(storagePoolVO));
final String cloneRes = templateRD != null ? templateRD.getName() : templateRscName;
logger.info("Clone resource definition {} to {}", cloneRes, rscName);
ResourceDefinitionCloneRequest cloneRequest = new ResourceDefinitionCloneRequest();
cloneRequest.setName(rscName);
if (volumeInfo.getPassphraseId() != null) {
List<LayerType> encryptionLayer = getEncryptedLayerList(linstorApi, getRscGrp(storagePoolVO));
List<LayerType> encryptionLayer = LinstorUtil.getEncryptedLayerList(
linstorApi, LinstorUtil.getRscGrp(storagePoolVO));
cloneRequest.setLayerList(encryptionLayer);
if (volumeInfo.getPassphrase() != null) {
String utf8Passphrase = new String(volumeInfo.getPassphrase(), StandardCharsets.UTF_8);
@ -704,7 +432,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
ResourceDefinitionCloneStarted cloneStarted = linstorApi.resourceDefinitionClone(
cloneRes, cloneRequest);
checkLinstorAnswersThrow(cloneStarted.getMessages());
LinstorUtil.checkLinstorAnswersThrow(cloneStarted.getMessages());
if (!CloneWaiter.waitFor(linstorApi, cloneStarted)) {
throw new CloudRuntimeException("Clone for resource " + rscName + " failed.");
@ -716,11 +444,12 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
resizeResource(linstorApi, rscName, volumeInfo.getSize());
}
updateRscGrpIfNecessary(linstorApi, rscName, getRscGrp(storagePoolVO));
updateRscGrpIfNecessary(linstorApi, rscName, LinstorUtil.getRscGrp(storagePoolVO));
deleteTemplateForProps(linstorApi, rscName);
LinstorUtil.applyAuxProps(linstorApi, rscName, volumeInfo.getName(), volumeInfo.getAttachedVmName());
applyQoSSettings(storagePoolVO, linstorApi, rscName, volumeInfo.getMaxIops());
LinstorUtil.applyQoSSettings(
_storagePoolDao, storagePoolVO, linstorApi, rscName, volumeInfo.getMaxIops());
return LinstorUtil.getDevicePath(linstorApi, rscName);
} catch (ApiException apiEx) {
@ -744,7 +473,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
}
private String createResourceFromSnapshot(long csSnapshotId, String rscName, StoragePoolVO storagePoolVO) {
final String rscGrp = getRscGrp(storagePoolVO);
final String rscGrp = LinstorUtil.getRscGrp(storagePoolVO);
final DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
SnapshotVO snapshotVO = _snapshotDao.findById(csSnapshotId);
@ -757,22 +486,22 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
logger.debug("Create new resource definition: " + rscName);
ResourceDefinitionCreate rdCreate = createResourceDefinitionCreate(rscName, rscGrp);
ApiCallRcList answers = linstorApi.resourceDefinitionCreate(rdCreate);
checkLinstorAnswersThrow(answers);
LinstorUtil.checkLinstorAnswersThrow(answers);
SnapshotRestore snapshotRestore = new SnapshotRestore();
snapshotRestore.toResource(rscName);
logger.debug("Create new volume definition for snapshot: " + cloneRes + ":" + snapName);
answers = linstorApi.resourceSnapshotsRestoreVolumeDefinition(cloneRes, snapName, snapshotRestore);
checkLinstorAnswersThrow(answers);
LinstorUtil.checkLinstorAnswersThrow(answers);
// restore snapshot to new resource
logger.info("Restore resource from snapshot: " + cloneRes + ":" + snapName);
answers = linstorApi.resourceSnapshotRestore(cloneRes, snapName, snapshotRestore);
checkLinstorAnswersThrow(answers);
LinstorUtil.checkLinstorAnswersThrow(answers);
LinstorUtil.applyAuxProps(linstorApi, rscName, volumeVO.getName(), null);
applyQoSSettings(storagePoolVO, linstorApi, rscName, volumeVO.getMaxIops());
LinstorUtil.applyQoSSettings(_storagePoolDao, storagePoolVO, linstorApi, rscName, volumeVO.getMaxIops());
return LinstorUtil.getDevicePath(linstorApi, rscName);
} catch (ApiException apiEx) {
@ -790,7 +519,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
} else if (csTemplateId > 0) {
return cloneResource(csTemplateId, volumeInfo, storagePoolVO);
} else {
return createResource(volumeInfo, storagePoolVO);
return LinstorUtil.createResource(volumeInfo, storagePoolVO, _storagePoolDao);
}
}
@ -1140,7 +869,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
String rscName,
String snapshotName,
String restoredName) throws ApiException {
final String rscGrp = getRscGrp(storagePoolVO);
final String rscGrp = LinstorUtil.getRscGrp(storagePoolVO);
// try to delete -rst resource, could happen if the copy failed and noone deleted it.
deleteResourceDefinition(storagePoolVO, restoredName);
ResourceDefinitionCreate rdc = createResourceDefinitionCreate(restoredName, rscGrp);
@ -1185,7 +914,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
final StoragePoolVO pool = _storagePoolDao.findById(dstData.getDataStore().getId());
final DevelopersApi api = LinstorUtil.getLinstorAPI(pool.getHostAddress());
final String rscName = LinstorUtil.RSC_PREFIX + dstData.getUuid();
boolean newCreated = createResourceBase(
boolean newCreated = LinstorUtil.createResourceBase(
LinstorUtil.RSC_PREFIX + dstData.getUuid(),
tInfo.getSize(),
tInfo.getName(),
@ -1193,9 +922,10 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
null,
null,
api,
getRscGrp(pool),
LinstorUtil.getRscGrp(pool),
pool.getId(),
true);
true,
false);
Answer answer;
if (newCreated) {
@ -1429,7 +1159,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
{
resizeResource(api, rscName, resizeParameter.newSize);
applyQoSSettings(pool, api, rscName, resizeParameter.newMaxIops);
LinstorUtil.applyQoSSettings(_storagePoolDao, pool, api, rscName, resizeParameter.newMaxIops);
{
final VolumeVO volume = _volumeDao.findById(vol.getId());
volume.setMinIops(resizeParameter.newMinIops);
@ -1534,7 +1264,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
@Override
public Pair<Long, Long> getStorageStats(StoragePool storagePool) {
logger.debug(String.format("Requesting storage stats: %s", storagePool));
return LinstorUtil.getStorageStats(storagePool.getHostAddress(), getRscGrp(storagePool));
return LinstorUtil.getStorageStats(storagePool.getHostAddress(), LinstorUtil.getRscGrp(storagePool));
}
@Override

View File

@ -22,6 +22,8 @@ import com.linbit.linstor.api.ApiException;
import com.linbit.linstor.api.DevelopersApi;
import com.linbit.linstor.api.model.ApiCallRc;
import com.linbit.linstor.api.model.ApiCallRcList;
import com.linbit.linstor.api.model.AutoSelectFilter;
import com.linbit.linstor.api.model.LayerType;
import com.linbit.linstor.api.model.Node;
import com.linbit.linstor.api.model.Properties;
import com.linbit.linstor.api.model.ProviderKind;
@ -29,24 +31,36 @@ import com.linbit.linstor.api.model.Resource;
import com.linbit.linstor.api.model.ResourceDefinition;
import com.linbit.linstor.api.model.ResourceDefinitionModify;
import com.linbit.linstor.api.model.ResourceGroup;
import com.linbit.linstor.api.model.ResourceGroupSpawn;
import com.linbit.linstor.api.model.ResourceWithVolumes;
import com.linbit.linstor.api.model.StoragePool;
import com.linbit.linstor.api.model.Volume;
import com.linbit.linstor.api.model.VolumeDefinition;
import com.linbit.linstor.api.model.VolumeDefinitionModify;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
import com.cloud.hypervisor.kvm.storage.KVMStoragePool;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.logging.log4j.Logger;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.commons.collections.CollectionUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.nio.charset.StandardCharsets;
public class LinstorUtil {
protected static Logger LOGGER = LogManager.getLogger(LinstorUtil.class);
@ -56,6 +70,8 @@ public class LinstorUtil {
public static final String RSC_GROUP = "resourceGroup";
public static final String CS_TEMPLATE_FOR_PREFIX = "_cs-template-for-";
public static final String LIN_PROP_DRBDOPT_EXACT_SIZE = "DrbdOptions/ExactSize";
public static final String TEMP_VOLUME_ID = "tempVolumeId";
public static final String CLUSTER_DEFAULT_MIN_IOPS = "clusterDefaultMinIops";
@ -76,6 +92,32 @@ public class LinstorUtil {
.orElse((answers.get(0)).getMessage()) : null;
}
public static void logLinstorAnswer(@Nonnull ApiCallRc answer) {
if (answer.isError()) {
LOGGER.error(answer.getMessage());
} else if (answer.isWarning()) {
LOGGER.warn(answer.getMessage());
} else if (answer.isInfo()) {
LOGGER.info(answer.getMessage());
}
}
public static void logLinstorAnswers(@Nonnull ApiCallRcList answers) {
answers.forEach(LinstorUtil::logLinstorAnswer);
}
public static void checkLinstorAnswersThrow(@Nonnull ApiCallRcList answers) {
logLinstorAnswers(answers);
if (answers.hasError())
{
String errMsg = answers.stream()
.filter(ApiCallRc::isError)
.findFirst()
.map(ApiCallRc::getMessage).orElse("Unknown linstor error");
throw new CloudRuntimeException(errMsg);
}
}
public static List<String> getLinstorNodeNames(@Nonnull DevelopersApi api) throws ApiException
{
List<Node> nodes = api.nodeList(
@ -488,4 +530,253 @@ public class LinstorUtil {
}
return false;
}
public static String getRscGrp(com.cloud.storage.StoragePool storagePool) {
return storagePool.getUserInfo() != null && !storagePool.getUserInfo().isEmpty() ?
storagePool.getUserInfo() : "DfltRscGrp";
}
/**
* Condition if a template resource can be shared with the given resource group.
* @param tgtRscGrp
* @param tgtLayerStack
* @param rg
* @return True if the template resource can be shared, else false.
*/
private static boolean canShareTemplateForResourceGroup(
ResourceGroup tgtRscGrp, List<String> tgtLayerStack, ResourceGroup rg) {
List<String> rgLayerStack = rg.getSelectFilter() != null ?
rg.getSelectFilter().getLayerStack() : null;
return Objects.equals(tgtLayerStack, rgLayerStack) &&
Objects.equals(tgtRscGrp.getSelectFilter().getStoragePoolList(),
rg.getSelectFilter().getStoragePoolList());
}
/**
* Searches for a shareable template for this rscGrpName and sets the aux template property.
* @param api
* @param rscName
* @param rscGrpName
* @param existingRDs
* @return
* @throws ApiException
*/
private static boolean foundShareableTemplate(
DevelopersApi api, String rscName, String rscGrpName,
List<Pair<ResourceDefinition, ResourceGroup>> existingRDs) throws ApiException {
if (!existingRDs.isEmpty()) {
ResourceGroup tgtRscGrp = api.resourceGroupList(
Collections.singletonList(rscGrpName), null, null, null).get(0);
List<String> tgtLayerStack = tgtRscGrp.getSelectFilter() != null ?
tgtRscGrp.getSelectFilter().getLayerStack() : null;
// check if there is already a template copy, that we could reuse
// this means if select filters are similar enough to allow cloning from
for (Pair<ResourceDefinition, ResourceGroup> rdPair : existingRDs) {
ResourceGroup rg = rdPair.second();
if (canShareTemplateForResourceGroup(tgtRscGrp, tgtLayerStack, rg)) {
LinstorUtil.setAuxTemplateForProperty(api, rscName, rscGrpName);
return true;
}
}
}
return false;
}
/**
* Returns the layerlist of the resourceGroup with encryption(LUKS) added above STORAGE.
* If the resourceGroup layer list already contains LUKS this layer list will be returned.
* @param api Linstor developers API
* @param resourceGroup Resource group to get the encryption layer list
* @return layer list with LUKS added
*/
public static List<LayerType> getEncryptedLayerList(DevelopersApi api, String resourceGroup) {
try {
List<ResourceGroup> rscGrps = api.resourceGroupList(
Collections.singletonList(resourceGroup), Collections.emptyList(), null, null);
if (CollectionUtils.isEmpty(rscGrps)) {
throw new CloudRuntimeException(
String.format("Resource Group %s not found on Linstor cluster.", resourceGroup));
}
final ResourceGroup rscGrp = rscGrps.get(0);
List<LayerType> layers = Arrays.asList(LayerType.DRBD, LayerType.LUKS, LayerType.STORAGE);
List<String> curLayerStack = rscGrp.getSelectFilter() != null ?
rscGrp.getSelectFilter().getLayerStack() : Collections.emptyList();
if (CollectionUtils.isNotEmpty(curLayerStack)) {
layers = curLayerStack.stream().map(LayerType::valueOf).collect(Collectors.toList());
if (!layers.contains(LayerType.LUKS)) {
layers.add(layers.size() - 1, LayerType.LUKS); // lowest layer is STORAGE
}
}
return layers;
} catch (ApiException e) {
throw new CloudRuntimeException(
String.format("Resource Group %s not found on Linstor cluster.", resourceGroup));
}
}
/**
* Spawns a new Linstor resource with the given arguments.
* @param api
* @param newRscName
* @param sizeInBytes
* @param isTemplate
* @param rscGrpName
* @param volName
* @param vmName
* @throws ApiException
*/
private static void spawnResource(
DevelopersApi api, String newRscName, long sizeInBytes, boolean isTemplate, String rscGrpName,
String volName, String vmName, @Nullable Long passPhraseId, @Nullable byte[] passPhrase,
boolean exactSize) throws ApiException
{
ResourceGroupSpawn rscGrpSpawn = new ResourceGroupSpawn();
rscGrpSpawn.setResourceDefinitionName(newRscName);
rscGrpSpawn.addVolumeSizesItem(sizeInBytes / 1024);
if (passPhraseId != null) {
AutoSelectFilter asf = new AutoSelectFilter();
List<LayerType> luksLayers = getEncryptedLayerList(api, rscGrpName);
asf.setLayerStack(luksLayers.stream().map(LayerType::toString).collect(Collectors.toList()));
rscGrpSpawn.setSelectFilter(asf);
if (passPhrase != null) {
String utf8Passphrase = new String(passPhrase, StandardCharsets.UTF_8);
rscGrpSpawn.setVolumePassphrases(Collections.singletonList(utf8Passphrase));
}
}
Properties props = new Properties();
if (isTemplate) {
props.put(LinstorUtil.getTemplateForAuxPropKey(rscGrpName), "true");
}
if (exactSize) {
props.put(LIN_PROP_DRBDOPT_EXACT_SIZE, "true");
}
rscGrpSpawn.setResourceDefinitionProps(props);
LOGGER.info("Linstor: Spawn resource " + newRscName);
ApiCallRcList answers = api.resourceGroupSpawn(rscGrpName, rscGrpSpawn);
checkLinstorAnswersThrow(answers);
answers = LinstorUtil.applyAuxProps(api, newRscName, volName, vmName);
checkLinstorAnswersThrow(answers);
}
/**
* Creates a new Linstor resource.
* @param rscName
* @param sizeInBytes
* @param volName
* @param vmName
* @param api
* @param rscGrp
* @param poolId
* @param isTemplate indicates if the resource is a template
* @return true if a new resource was created, false if it already existed or was reused.
*/
public static boolean createResourceBase(
String rscName, long sizeInBytes, String volName, String vmName,
@Nullable Long passPhraseId, @Nullable byte[] passPhrase, DevelopersApi api,
String rscGrp, long poolId, boolean isTemplate, boolean exactSize)
{
try
{
LOGGER.debug("createRscBase: {} :: {} :: {} :: {}", rscName, rscGrp, isTemplate, exactSize);
List<Pair<ResourceDefinition, ResourceGroup>> existingRDs = LinstorUtil.getRDAndRGListStartingWith(api, rscName);
String fullRscName = String.format("%s-%d", rscName, poolId);
boolean alreadyCreated = existingRDs.stream()
.anyMatch(p -> p.first().getName().equalsIgnoreCase(fullRscName)) ||
existingRDs.stream().anyMatch(p -> p.first().getProps().containsKey(LinstorUtil.getTemplateForAuxPropKey(rscGrp)));
if (!alreadyCreated) {
boolean createNewRsc = !foundShareableTemplate(api, rscName, rscGrp, existingRDs);
if (createNewRsc) {
String newRscName = existingRDs.isEmpty() ? rscName : fullRscName;
spawnResource(api, newRscName, sizeInBytes, isTemplate, rscGrp,
volName, vmName, passPhraseId, passPhrase, exactSize);
}
return createNewRsc;
}
return false;
} catch (ApiException apiEx)
{
LOGGER.error("Linstor: ApiEx - {}", apiEx.getMessage());
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
}
}
public static void applyQoSSettings(PrimaryDataStoreDao primaryDataStoreDao,
StoragePoolVO storagePool, DevelopersApi api, String rscName, Long maxIops)
throws ApiException
{
Long currentQosIops = null;
List<VolumeDefinition> vlmDfns = api.volumeDefinitionList(rscName, null, null);
if (!vlmDfns.isEmpty())
{
Properties props = vlmDfns.get(0).getProps();
long iops = Long.parseLong(props.getOrDefault("sys/fs/blkio_throttle_write_iops", "0"));
currentQosIops = iops > 0 ? iops : null;
}
if (!Objects.equals(maxIops, currentQosIops))
{
VolumeDefinitionModify vdm = new VolumeDefinitionModify();
if (maxIops != null)
{
Properties props = new Properties();
props.put("sys/fs/blkio_throttle_read_iops", "" + maxIops);
props.put("sys/fs/blkio_throttle_write_iops", "" + maxIops);
vdm.overrideProps(props);
LOGGER.info("Apply qos setting: {} to {}", maxIops, rscName);
}
else
{
LOGGER.info("Remove QoS setting for {}", rscName);
vdm.deleteProps(Arrays.asList("sys/fs/blkio_throttle_read_iops", "sys/fs/blkio_throttle_write_iops"));
}
ApiCallRcList answers = api.volumeDefinitionModify(rscName, 0, vdm);
LinstorUtil.checkLinstorAnswersThrow(answers);
Long capacityIops = storagePool.getCapacityIops();
if (capacityIops != null)
{
long vcIops = currentQosIops != null ? currentQosIops * -1 : 0;
long vMaxIops = maxIops != null ? maxIops : 0;
long newIops = vcIops + vMaxIops;
capacityIops -= newIops;
LOGGER.info("Current storagepool {} iops capacity: {}", storagePool, capacityIops);
storagePool.setCapacityIops(Math.max(0, capacityIops));
primaryDataStoreDao.update(storagePool.getId(), storagePool);
}
}
}
public static String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO,
PrimaryDataStoreDao primaryDataStoreDao) {
return createResource(vol, storagePoolVO, primaryDataStoreDao, false);
}
public static String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO,
PrimaryDataStoreDao primaryDataStoreDao, boolean exactSize) {
DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
final String rscGrp = getRscGrp(storagePoolVO);
final String rscName = LinstorUtil.RSC_PREFIX + vol.getUuid();
createResourceBase(
rscName, vol.getSize(), vol.getName(), vol.getAttachedVmName(), vol.getPassphraseId(), vol.getPassphrase(),
linstorApi, rscGrp, storagePoolVO.getId(), false, exactSize);
try
{
applyQoSSettings(primaryDataStoreDao, storagePoolVO, linstorApi, rscName, vol.getMaxIops());
return LinstorUtil.getDevicePath(linstorApi, rscName);
} catch (ApiException apiEx)
{
LOGGER.error("Linstor: ApiEx - " + apiEx.getMessage());
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
}
}
}

View File

@ -0,0 +1,437 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.storage.motion;
import com.linbit.linstor.api.ApiException;
import com.linbit.linstor.api.DevelopersApi;
import com.linbit.linstor.api.model.ApiCallRcList;
import com.linbit.linstor.api.model.ResourceDefinition;
import com.linbit.linstor.api.model.ResourceDefinitionModify;
import javax.inject.Inject;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.MigrateAnswer;
import com.cloud.agent.api.MigrateCommand;
import com.cloud.agent.api.PrepareForMigrationCommand;
import com.cloud.agent.api.to.DataObjectType;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.OperationTimedoutException;
import com.cloud.host.Host;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.dao.GuestOSDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.dao.VMInstanceDao;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
import org.apache.cloudstack.framework.async.AsyncCallFuture;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.util.LinstorUtil;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang3.ObjectUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.springframework.stereotype.Component;
/**
* Current state:
* just changing the resource-group on same storage pool resource-group is not really good enough.
* Linstor lacks currently of a good way to move resources to another resource-group and respecting
* every auto-filter setting.
* Also linstor clone would simply set the new resource-group without any adjustments of storage pools or
* auto-select resource placement.
* So currently, we will create a new resource in the wanted primary storage and let qemu copy the data into the
* devices.
*/
@Component
public class LinstorDataMotionStrategy implements DataMotionStrategy {
protected Logger logger = LogManager.getLogger(getClass());
@Inject
private SnapshotDataStoreDao _snapshotStoreDao;
@Inject
private PrimaryDataStoreDao _storagePool;
@Inject
private VolumeDao _volumeDao;
@Inject
private VolumeDataFactory _volumeDataFactory;
@Inject
private VMInstanceDao _vmDao;
@Inject
private GuestOSDao _guestOsDao;
@Inject
private VolumeService _volumeService;
@Inject
private GuestOSCategoryDao _guestOsCategoryDao;
@Inject
private SnapshotDao _snapshotDao;
@Inject
private AgentManager _agentManager;
@Inject
private PrimaryDataStoreDao _storagePoolDao;
@Override
public StrategyPriority canHandle(DataObject srcData, DataObject dstData) {
DataObjectType srcType = srcData.getType();
DataObjectType dstType = dstData.getType();
logger.debug("canHandle: {} -> {}", srcType, dstType);
return StrategyPriority.CANT_HANDLE;
}
@Override
public void copyAsync(DataObject srcData, DataObject destData, Host destHost,
AsyncCompletionCallback<CopyCommandResult> callback) {
throw new CloudRuntimeException("not implemented");
}
private boolean isDestinationLinstorPrimaryStorage(Map<VolumeInfo, DataStore> volumeMap) {
if (MapUtils.isNotEmpty(volumeMap)) {
for (DataStore dataStore : volumeMap.values()) {
StoragePoolVO storagePoolVO = _storagePool.findById(dataStore.getId());
if (storagePoolVO == null
|| !storagePoolVO.getStorageProviderName().equals(LinstorUtil.PROVIDER_NAME)) {
return false;
}
}
} else {
return false;
}
return true;
}
@Override
public StrategyPriority canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost) {
logger.debug("canHandle -- {}: {} -> {}", volumeMap, srcHost, destHost);
if (srcHost.getId() != destHost.getId() && isDestinationLinstorPrimaryStorage(volumeMap)) {
return StrategyPriority.HIGHEST;
}
return StrategyPriority.CANT_HANDLE;
}
private VolumeVO createNewVolumeVO(Volume volume, StoragePoolVO storagePoolVO) {
VolumeVO newVol = new VolumeVO(volume);
newVol.setInstanceId(null);
newVol.setChainInfo(null);
newVol.setPath(newVol.getUuid());
newVol.setFolder(null);
newVol.setPodId(storagePoolVO.getPodId());
newVol.setPoolId(storagePoolVO.getId());
newVol.setLastPoolId(volume.getPoolId());
return _volumeDao.persist(newVol);
}
private void removeExactSizeProperty(VolumeInfo volumeInfo) {
StoragePoolVO destStoragePool = _storagePool.findById(volumeInfo.getDataStore().getId());
DevelopersApi api = LinstorUtil.getLinstorAPI(destStoragePool.getHostAddress());
ResourceDefinitionModify rdm = new ResourceDefinitionModify();
rdm.setDeleteProps(Collections.singletonList(LinstorUtil.LIN_PROP_DRBDOPT_EXACT_SIZE));
try {
String rscName = LinstorUtil.RSC_PREFIX + volumeInfo.getPath();
ApiCallRcList answers = api.resourceDefinitionModify(rscName, rdm);
LinstorUtil.checkLinstorAnswersThrow(answers);
} catch (ApiException apiEx) {
logger.error("Linstor: ApiEx - {}", apiEx.getMessage());
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
}
}
private void handlePostMigration(boolean success, Map<VolumeInfo, VolumeInfo> srcVolumeInfoToDestVolumeInfo,
VirtualMachineTO vmTO, Host destHost) {
if (!success) {
try {
PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO);
pfmc.setRollback(true);
Answer pfma = _agentManager.send(destHost.getId(), pfmc);
if (pfma == null || !pfma.getResult()) {
String details = pfma != null ? pfma.getDetails() : "null answer returned";
String msg = "Unable to rollback prepare for migration due to the following: " + details;
throw new AgentUnavailableException(msg, destHost.getId());
}
} catch (Exception e) {
logger.debug("Failed to disconnect one or more (original) dest volumes", e);
}
}
for (Map.Entry<VolumeInfo, VolumeInfo> entry : srcVolumeInfoToDestVolumeInfo.entrySet()) {
VolumeInfo srcVolumeInfo = entry.getKey();
VolumeInfo destVolumeInfo = entry.getValue();
if (success) {
srcVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.OperationSucceeded);
destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.OperationSucceeded);
_volumeDao.updateUuid(srcVolumeInfo.getId(), destVolumeInfo.getId());
VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId());
volumeVO.setFormat(Storage.ImageFormat.QCOW2);
_volumeDao.update(volumeVO.getId(), volumeVO);
// remove exact size property
removeExactSizeProperty(destVolumeInfo);
try {
_volumeService.destroyVolume(srcVolumeInfo.getId());
srcVolumeInfo = _volumeDataFactory.getVolume(srcVolumeInfo.getId());
AsyncCallFuture<VolumeService.VolumeApiResult> destroyFuture =
_volumeService.expungeVolumeAsync(srcVolumeInfo);
if (destroyFuture.get().isFailed()) {
logger.debug("Failed to clean up source volume on storage");
}
} catch (Exception e) {
logger.debug("Failed to clean up source volume on storage", e);
}
// Update the volume ID for snapshots on secondary storage
if (!_snapshotDao.listByVolumeId(srcVolumeInfo.getId()).isEmpty()) {
_snapshotDao.updateVolumeIds(srcVolumeInfo.getId(), destVolumeInfo.getId());
_snapshotStoreDao.updateVolumeIds(srcVolumeInfo.getId(), destVolumeInfo.getId());
}
} else {
try {
_volumeService.revokeAccess(destVolumeInfo, destHost, destVolumeInfo.getDataStore());
} catch (Exception e) {
logger.debug("Failed to revoke access from dest volume", e);
}
destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed);
srcVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed);
try {
_volumeService.destroyVolume(destVolumeInfo.getId());
destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId());
AsyncCallFuture<VolumeService.VolumeApiResult> destroyFuture =
_volumeService.expungeVolumeAsync(destVolumeInfo);
if (destroyFuture.get().isFailed()) {
logger.debug("Failed to clean up dest volume on storage");
}
} catch (Exception e) {
logger.debug("Failed to clean up dest volume on storage", e);
}
}
}
}
/**
* Determines whether the destination volume should have the DRBD exact-size property set
* during migration.
*
* <p>This method queries the Linstor API to check if the source volume's resource definition
* has the exact-size DRBD option enabled. The exact-size property ensures that DRBD uses
* the precise volume size rather than rounding, which is important for maintaining size
* consistency during migrations.</p>
*
* @param srcVolumeInfo the source volume information to check
* @return {@code true} if the exact-size property should be set on the destination volume,
* which occurs when the source volume has this property enabled, or when the
* property cannot be determined (defaults to {@code true} for safety);
* {@code false} only when the source is confirmed to not have the exact-size property
*/
private boolean needsExactSizeProp(VolumeInfo srcVolumeInfo) {
StoragePoolVO srcStoragePool = _storagePool.findById(srcVolumeInfo.getDataStore().getId());
if (srcStoragePool.getPoolType() == Storage.StoragePoolType.Linstor) {
DevelopersApi api = LinstorUtil.getLinstorAPI(srcStoragePool.getHostAddress());
String rscName = LinstorUtil.RSC_PREFIX + srcVolumeInfo.getPath();
try {
List<ResourceDefinition> rscDfns = api.resourceDefinitionList(
Collections.singletonList(rscName),
false,
Collections.emptyList(),
null,
null);
if (!CollectionUtils.isEmpty(rscDfns)) {
ResourceDefinition srcRsc = rscDfns.get(0);
String exactSizeProp = srcRsc.getProps().get(LinstorUtil.LIN_PROP_DRBDOPT_EXACT_SIZE);
return "true".equalsIgnoreCase(exactSizeProp);
} else {
logger.warn("Unknown resource {} on {}", rscName, srcStoragePool.getHostAddress());
}
} catch (ApiException apiEx) {
logger.error("Unable to fetch resource definition {}: {}", rscName, apiEx.getBestMessage());
}
}
return true;
}
@Override
public void copyAsync(Map<VolumeInfo, DataStore> volumeDataStoreMap, VirtualMachineTO vmTO, Host srcHost,
Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
if (srcHost.getHypervisorType() != Hypervisor.HypervisorType.KVM) {
throw new CloudRuntimeException(
String.format("Invalid hypervisor type [%s]. Only KVM supported", srcHost.getHypervisorType()));
}
String errMsg = null;
VMInstanceVO vmInstance = _vmDao.findById(vmTO.getId());
vmTO.setState(vmInstance.getState());
List<MigrateCommand.MigrateDiskInfo> migrateDiskInfoList = new ArrayList<>();
Map<String, MigrateCommand.MigrateDiskInfo> migrateStorage = new HashMap<>();
Map<VolumeInfo, VolumeInfo> srcVolumeInfoToDestVolumeInfo = new HashMap<>();
try {
for (Map.Entry<VolumeInfo, DataStore> entry : volumeDataStoreMap.entrySet()) {
VolumeInfo srcVolumeInfo = entry.getKey();
DataStore destDataStore = entry.getValue();
VolumeVO srcVolume = _volumeDao.findById(srcVolumeInfo.getId());
StoragePoolVO destStoragePool = _storagePool.findById(destDataStore.getId());
if (srcVolumeInfo.getPassphraseId() != null) {
throw new CloudRuntimeException(
String.format("Cannot live migrate encrypted volume: %s", srcVolumeInfo.getVolume()));
}
VolumeVO destVolume = createNewVolumeVO(srcVolume, destStoragePool);
VolumeInfo destVolumeInfo = _volumeDataFactory.getVolume(destVolume.getId(), destDataStore);
destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationCopyRequested);
destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationCopySucceeded);
destVolumeInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested);
boolean exactSize = needsExactSizeProp(srcVolumeInfo);
String devPath = LinstorUtil.createResource(
destVolumeInfo, destStoragePool, _storagePoolDao, exactSize);
_volumeDao.update(destVolume.getId(), destVolume);
destVolume = _volumeDao.findById(destVolume.getId());
destVolumeInfo = _volumeDataFactory.getVolume(destVolume.getId(), destDataStore);
MigrateCommand.MigrateDiskInfo migrateDiskInfo = new MigrateCommand.MigrateDiskInfo(
srcVolumeInfo.getPath(),
MigrateCommand.MigrateDiskInfo.DiskType.BLOCK,
MigrateCommand.MigrateDiskInfo.DriverType.RAW,
MigrateCommand.MigrateDiskInfo.Source.DEV,
devPath);
migrateDiskInfoList.add(migrateDiskInfo);
migrateStorage.put(srcVolumeInfo.getPath(), migrateDiskInfo);
srcVolumeInfoToDestVolumeInfo.put(srcVolumeInfo, destVolumeInfo);
}
PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO);
try {
Answer pfma = _agentManager.send(destHost.getId(), pfmc);
if (pfma == null || !pfma.getResult()) {
String details = pfma != null ? pfma.getDetails() : "null answer returned";
errMsg = String.format("Unable to prepare for migration due to the following: %s", details);
throw new AgentUnavailableException(errMsg, destHost.getId());
}
} catch (final OperationTimedoutException e) {
errMsg = String.format("Operation timed out due to %s", e.getMessage());
throw new AgentUnavailableException(errMsg, destHost.getId());
}
VMInstanceVO vm = _vmDao.findById(vmTO.getId());
boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId())
.getName().equalsIgnoreCase("Windows");
MigrateCommand migrateCommand = new MigrateCommand(vmTO.getName(),
destHost.getPrivateIpAddress(), isWindows, vmTO, true);
migrateCommand.setWait(StorageManager.KvmStorageOnlineMigrationWait.value());
migrateCommand.setMigrateStorage(migrateStorage);
migrateCommand.setMigrateStorageManaged(true);
migrateCommand.setNewVmCpuShares(
vmTO.getCpus() * ObjectUtils.defaultIfNull(vmTO.getMinSpeed(), vmTO.getSpeed()));
migrateCommand.setMigrateDiskInfoList(migrateDiskInfoList);
boolean kvmAutoConvergence = StorageManager.KvmAutoConvergence.value();
migrateCommand.setAutoConvergence(kvmAutoConvergence);
MigrateAnswer migrateAnswer = (MigrateAnswer) _agentManager.send(srcHost.getId(), migrateCommand);
boolean success = migrateAnswer != null && migrateAnswer.getResult();
handlePostMigration(success, srcVolumeInfoToDestVolumeInfo, vmTO, destHost);
if (migrateAnswer == null) {
throw new CloudRuntimeException("Unable to get an answer to the migrate command");
}
if (!migrateAnswer.getResult()) {
errMsg = migrateAnswer.getDetails();
throw new CloudRuntimeException(errMsg);
}
} catch (AgentUnavailableException | OperationTimedoutException | CloudRuntimeException ex) {
errMsg = String.format(
"Copy volume(s) of VM [%s] to storage(s) [%s] and VM to host [%s] failed in LinstorDataMotionStrategy.copyAsync. Error message: [%s].",
vmTO, srcHost, destHost, ex.getMessage());
logger.error(errMsg, ex);
throw new CloudRuntimeException(errMsg);
} finally {
CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(errMsg);
CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer);
result.setResult(errMsg);
callback.complete(result);
}
}
}

View File

@ -33,4 +33,6 @@
class="org.apache.cloudstack.storage.snapshot.LinstorVMSnapshotStrategy" />
<bean id="linstorConfigManager"
class="org.apache.cloudstack.storage.datastore.util.LinstorConfigurationManager" />
<bean id="linstorDataMotionStrategy"
class="org.apache.cloudstack.storage.motion.LinstorDataMotionStrategy" />
</beans>

View File

@ -26,6 +26,7 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.cloudstack.storage.datastore.util.LinstorUtil;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@ -75,13 +76,13 @@ public class LinstorPrimaryDataStoreDriverImplTest {
when(api.resourceGroupList(Collections.singletonList("EncryptedGrp"), Collections.emptyList(), null, null))
.thenReturn(Collections.singletonList(encryptedGrp));
List<LayerType> layers = linstorPrimaryDataStoreDriver.getEncryptedLayerList(api, "DfltRscGrp");
List<LayerType> layers = LinstorUtil.getEncryptedLayerList(api, "DfltRscGrp");
Assert.assertEquals(Arrays.asList(LayerType.DRBD, LayerType.LUKS, LayerType.STORAGE), layers);
layers = linstorPrimaryDataStoreDriver.getEncryptedLayerList(api, "BcacheGrp");
layers = LinstorUtil.getEncryptedLayerList(api, "BcacheGrp");
Assert.assertEquals(Arrays.asList(LayerType.DRBD, LayerType.BCACHE, LayerType.LUKS, LayerType.STORAGE), layers);
layers = linstorPrimaryDataStoreDriver.getEncryptedLayerList(api, "EncryptedGrp");
layers = LinstorUtil.getEncryptedLayerList(api, "EncryptedGrp");
Assert.assertEquals(Arrays.asList(LayerType.DRBD, LayerType.LUKS, LayerType.STORAGE), layers);
}
}

View File

@ -433,7 +433,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
public long getUsedIops(StoragePool storagePool) {
long usedIops = 0;
List<VolumeVO> volumes = volumeDao.findByPoolId(storagePool.getId(), null);
List<VolumeVO> volumes = volumeDao.findNonDestroyedVolumesByPoolId(storagePool.getId(), null);
if (volumes != null) {
for (VolumeVO volume : volumes) {

View File

@ -199,7 +199,7 @@ public class SolidFireHostListener implements HypervisorHostListener {
List<String> storagePaths = new ArrayList<>();
// If you do not pass in null for the second parameter, you only get back applicable ROOT disks.
List<VolumeVO> volumes = volumeDao.findByPoolId(storagePoolId, null);
List<VolumeVO> volumes = volumeDao.findNonDestroyedVolumesByPoolId(storagePoolId, null);
if (volumes != null) {
for (VolumeVO volume : volumes) {
@ -230,7 +230,7 @@ public class SolidFireHostListener implements HypervisorHostListener {
StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
// If you do not pass in null for the second parameter, you only get back applicable ROOT disks.
List<VolumeVO> volumes = volumeDao.findByPoolId(storagePoolId, null);
List<VolumeVO> volumes = volumeDao.findNonDestroyedVolumesByPoolId(storagePoolId, null);
if (volumes != null) {
for (VolumeVO volume : volumes) {

View File

@ -1276,7 +1276,7 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
return volumeStats;
}
} else {
List<VolumeVO> volumes = volumeDao.findByPoolId(storagePool.getId());
List<VolumeVO> volumes = volumeDao.findNonDestroyedVolumesByPoolId(storagePool.getId());
for (VolumeVO volume : volumes) {
if (volume.getPath() != null && volume.getPath().equals(volumeId)) {
long size = volume.getSize();

View File

@ -102,7 +102,7 @@ public class StorPoolHelper {
if (snapshotDetails != null) {
return StorPoolStorageAdaptor.getVolumeNameFromPath(snapshotDetails.getValue(), true);
} else {
List<SnapshotDataStoreVO> snapshots = snapshotStoreDao.findBySnapshotId(snapshotId);
List<SnapshotDataStoreVO> snapshots = snapshotStoreDao.findBySnapshotIdWithNonDestroyedState(snapshotId);
if (!CollectionUtils.isEmpty(snapshots)) {
for (SnapshotDataStoreVO snapshotDataStoreVO : snapshots) {
String name = StorPoolStorageAdaptor.getVolumeNameFromPath(snapshotDataStoreVO.getInstallPath(), true);

View File

@ -240,7 +240,7 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy {
}
protected boolean areLastSnapshotRef(long snapshotId) {
List<SnapshotDataStoreVO> snapshotStoreRefs = _snapshotStoreDao.findBySnapshotId(snapshotId);
List<SnapshotDataStoreVO> snapshotStoreRefs = _snapshotStoreDao.findBySnapshotIdWithNonDestroyedState(snapshotId);
if (CollectionUtils.isEmpty(snapshotStoreRefs) || snapshotStoreRefs.size() == 1) {
return true;
}
@ -308,7 +308,7 @@ public class StorPoolSnapshotStrategy implements SnapshotStrategy {
}
if (Snapshot.State.Error.equals(snapshotVO.getState())) {
List<SnapshotDataStoreVO> storeRefs = _snapshotStoreDao.findBySnapshotId(snapshotId);
List<SnapshotDataStoreVO> storeRefs = _snapshotStoreDao.findBySnapshotIdWithNonDestroyedState(snapshotId);
List<Long> deletedRefs = new ArrayList<>();
for (SnapshotDataStoreVO ref : storeRefs) {
boolean refZoneIdMatch = false;

View File

@ -82,6 +82,9 @@ public interface SAML2AuthManager extends PluggableAPIAuthenticator, PluggableSe
ConfigKey<Boolean> SAMLRequirePasswordLogin = new ConfigKey<Boolean>("Advanced", Boolean.class, "saml2.require.password", "true",
"When enabled SAML2 will validate that the SAML login was performed with a password. If disabled, other forms of authentication are allowed (two-factor, certificate, etc) on the SAML Authentication Provider", true);
ConfigKey<Boolean> EnableLoginAfterSAMLDisable = new ConfigKey<>("Advanced", Boolean.class, "enable.login.with.disabled.saml", "false", "When enabled, if SAML SSO is disabled, enables user to login with user and password, otherwise a user with SAML SSO disabled cannot login", true);
SAMLProviderMetadata getSPMetadata();
SAMLProviderMetadata getIdPMetadata(String entityId);

View File

@ -451,8 +451,13 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage
user.setExternalEntity(entityId);
user.setSource(User.Source.SAML2);
} else {
boolean enableLoginAfterSAMLDisable = SAML2AuthManager.EnableLoginAfterSAMLDisable.value();
if (user.getSource().equals(User.Source.SAML2)) {
user.setSource(User.Source.SAML2DISABLED);
if(enableLoginAfterSAMLDisable) {
user.setSource(User.Source.UNKNOWN);
} else {
user.setSource(User.Source.SAML2DISABLED);
}
} else {
return false;
}
@ -541,6 +546,6 @@ public class SAML2AuthManagerImpl extends AdapterBase implements SAML2AuthManage
SAMLCloudStackRedirectionUrl, SAMLUserAttributeName,
SAMLIdentityProviderMetadataURL, SAMLDefaultIdentityProviderId,
SAMLSignatureAlgorithm, SAMLAppendDomainSuffix, SAMLTimeout, SAMLCheckSignature,
SAMLForceAuthn, SAMLUserSessionKeyPathAttribute, SAMLRequirePasswordLogin};
SAMLForceAuthn, SAMLUserSessionKeyPathAttribute, SAMLRequirePasswordLogin, EnableLoginAfterSAMLDisable};
}
}

Some files were not shown because too many files have changed in this diff Show More