mirror of https://github.com/apache/cloudstack.git
Merge 81bb667267 into 9bbd32a8ef
This commit is contained in:
commit
dddc67ff59
|
|
@ -170,6 +170,7 @@ public class Storage {
|
|||
ISO(false, false, EncryptionSupport.Unsupported), // for iso image
|
||||
LVM(false, false, EncryptionSupport.Unsupported), // XenServer local LVM SR
|
||||
CLVM(true, false, EncryptionSupport.Unsupported),
|
||||
CLVM_NG(true, false, EncryptionSupport.Hypervisor),
|
||||
RBD(true, true, EncryptionSupport.Unsupported), // http://libvirt.org/storage.html#StorageBackendRBD
|
||||
SharedMountPoint(true, true, EncryptionSupport.Hypervisor),
|
||||
VMFS(true, true, EncryptionSupport.Unsupported), // VMware VMFS storage
|
||||
|
|
|
|||
|
|
@ -0,0 +1,42 @@
|
|||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
|
||||
package com.cloud.agent.api;
|
||||
|
||||
/**
|
||||
* Answer for PostMigrationCommand.
|
||||
* Indicates success or failure of post-migration operations on the destination host.
|
||||
*/
|
||||
public class PostMigrationAnswer extends Answer {
|
||||
|
||||
protected PostMigrationAnswer() {
|
||||
}
|
||||
|
||||
public PostMigrationAnswer(PostMigrationCommand cmd, String detail) {
|
||||
super(cmd, false, detail);
|
||||
}
|
||||
|
||||
public PostMigrationAnswer(PostMigrationCommand cmd, Exception ex) {
|
||||
super(cmd, ex);
|
||||
}
|
||||
|
||||
public PostMigrationAnswer(PostMigrationCommand cmd) {
|
||||
super(cmd, true, null);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
|
||||
package com.cloud.agent.api;
|
||||
|
||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||
|
||||
/**
|
||||
* PostMigrationCommand is sent to the destination host after a successful VM migration.
|
||||
* It performs post-migration tasks such as:
|
||||
* - Claiming exclusive locks on CLVM volumes (converting from shared to exclusive mode)
|
||||
* - Other post-migration cleanup operations
|
||||
*/
|
||||
public class PostMigrationCommand extends Command {
|
||||
private VirtualMachineTO vm;
|
||||
private String vmName;
|
||||
|
||||
protected PostMigrationCommand() {
|
||||
}
|
||||
|
||||
public PostMigrationCommand(VirtualMachineTO vm, String vmName) {
|
||||
this.vm = vm;
|
||||
this.vmName = vmName;
|
||||
}
|
||||
|
||||
public VirtualMachineTO getVirtualMachine() {
|
||||
return vm;
|
||||
}
|
||||
|
||||
public String getVmName() {
|
||||
return vmName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
|
||||
package com.cloud.agent.api;
|
||||
|
||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||
|
||||
/**
|
||||
* PreMigrationCommand is sent to the source host before VM migration starts.
|
||||
* It performs pre-migration tasks such as:
|
||||
* - Converting CLVM volume exclusive locks to shared mode so destination host can access them
|
||||
* - Other pre-migration preparation operations on the source host
|
||||
*
|
||||
* This command runs on the SOURCE host before PrepareForMigrationCommand runs on the DESTINATION host.
|
||||
*/
|
||||
public class PreMigrationCommand extends Command {
|
||||
private VirtualMachineTO vm;
|
||||
private String vmName;
|
||||
|
||||
protected PreMigrationCommand() {
|
||||
}
|
||||
|
||||
public PreMigrationCommand(VirtualMachineTO vm, String vmName) {
|
||||
this.vm = vm;
|
||||
this.vmName = vmName;
|
||||
}
|
||||
|
||||
public VirtualMachineTO getVirtualMachine() {
|
||||
return vm;
|
||||
}
|
||||
|
||||
public String getVmName() {
|
||||
return vmName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.cloudstack.storage.command;
|
||||
|
||||
import com.cloud.agent.api.Command;
|
||||
|
||||
/**
|
||||
* Command to transfer CLVM (Clustered LVM) exclusive lock between hosts.
|
||||
* This enables lightweight volume migration for CLVM storage pools where volumes
|
||||
* reside in the same Volume Group (VG) but need to be accessed from different hosts.
|
||||
*
|
||||
* <p>Instead of copying volume data (traditional migration), this command simply
|
||||
* deactivates the LV on the source host and activates it exclusively on the destination host.
|
||||
*
|
||||
* <p>This is significantly faster (10-100x) than traditional migration and uses no network bandwidth.
|
||||
*/
|
||||
public class ClvmLockTransferCommand extends Command {
|
||||
|
||||
/**
|
||||
* Operation to perform on the CLVM volume.
|
||||
* Maps to lvchange flags for LVM operations.
|
||||
*/
|
||||
public enum Operation {
|
||||
/** Deactivate the volume on this host (-an) */
|
||||
DEACTIVATE("-an", "deactivate"),
|
||||
|
||||
/** Activate the volume exclusively on this host (-aey) */
|
||||
ACTIVATE_EXCLUSIVE("-aey", "activate exclusively"),
|
||||
|
||||
/** Activate the volume in shared mode on this host (-asy) */
|
||||
ACTIVATE_SHARED("-asy", "activate in shared mode");
|
||||
|
||||
private final String lvchangeFlag;
|
||||
private final String description;
|
||||
|
||||
Operation(String lvchangeFlag, String description) {
|
||||
this.lvchangeFlag = lvchangeFlag;
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
public String getLvchangeFlag() {
|
||||
return lvchangeFlag;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
}
|
||||
|
||||
private String lvPath;
|
||||
private Operation operation;
|
||||
private String volumeUuid;
|
||||
|
||||
public ClvmLockTransferCommand() {
|
||||
// For serialization
|
||||
}
|
||||
|
||||
public ClvmLockTransferCommand(Operation operation, String lvPath, String volumeUuid) {
|
||||
this.operation = operation;
|
||||
this.lvPath = lvPath;
|
||||
this.volumeUuid = volumeUuid;
|
||||
// Execute in sequence to ensure lock safety
|
||||
setWait(30);
|
||||
}
|
||||
|
||||
public String getLvPath() {
|
||||
return lvPath;
|
||||
}
|
||||
|
||||
public Operation getOperation() {
|
||||
return operation;
|
||||
}
|
||||
|
||||
public String getVolumeUuid() {
|
||||
return volumeUuid;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean executeInSequence() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
@ -31,6 +31,12 @@ import java.util.Set;
|
|||
|
||||
public interface VolumeInfo extends DownloadableDataInfo, Volume {
|
||||
|
||||
/**
|
||||
* Constant for the volume detail key that stores the host ID currently holding the CLVM exclusive lock.
|
||||
* This is used during lightweight lock migration to determine the source host for lock transfer.
|
||||
*/
|
||||
String CLVM_LOCK_HOST_ID = "clvmLockHostId";
|
||||
|
||||
boolean isAttachedVM();
|
||||
|
||||
void addPayload(Object data);
|
||||
|
|
@ -103,4 +109,21 @@ public interface VolumeInfo extends DownloadableDataInfo, Volume {
|
|||
List<String> getCheckpointPaths();
|
||||
|
||||
Set<String> getCheckpointImageStoreUrls();
|
||||
|
||||
/**
|
||||
* Gets the destination host ID hint for CLVM volume creation.
|
||||
* This is used to route volume creation commands to the specific host where the VM will be deployed.
|
||||
* Only applicable for CLVM storage pools to avoid shared mode activation.
|
||||
*
|
||||
* @return The host ID where the volume should be created, or null if not set
|
||||
*/
|
||||
Long getDestinationHostId();
|
||||
|
||||
/**
|
||||
* Sets the destination host ID hint for CLVM volume creation.
|
||||
* This should be set before volume creation when the destination host is known.
|
||||
*
|
||||
* @param hostId The host ID where the volume should be created
|
||||
*/
|
||||
void setDestinationHostId(Long hostId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -50,6 +50,8 @@ import javax.naming.ConfigurationException;
|
|||
import javax.persistence.EntityExistsException;
|
||||
|
||||
|
||||
import com.cloud.agent.api.PostMigrationCommand;
|
||||
import com.cloud.storage.ClvmLockManager;
|
||||
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
|
||||
import org.apache.cloudstack.annotation.AnnotationService;
|
||||
import org.apache.cloudstack.annotation.dao.AnnotationDao;
|
||||
|
|
@ -135,6 +137,7 @@ import com.cloud.agent.api.PrepareExternalProvisioningAnswer;
|
|||
import com.cloud.agent.api.PrepareExternalProvisioningCommand;
|
||||
import com.cloud.agent.api.PrepareForMigrationAnswer;
|
||||
import com.cloud.agent.api.PrepareForMigrationCommand;
|
||||
import com.cloud.agent.api.PreMigrationCommand;
|
||||
import com.cloud.agent.api.RebootAnswer;
|
||||
import com.cloud.agent.api.RebootCommand;
|
||||
import com.cloud.agent.api.RecreateCheckpointsCommand;
|
||||
|
|
@ -264,6 +267,7 @@ import com.cloud.storage.dao.StoragePoolHostDao;
|
|||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
import com.cloud.storage.dao.VMTemplateZoneDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.storage.dao.VolumeDetailsDao;
|
||||
import com.cloud.storage.snapshot.SnapshotManager;
|
||||
import com.cloud.template.VirtualMachineTemplate;
|
||||
import com.cloud.user.Account;
|
||||
|
|
@ -359,6 +363,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
@Inject
|
||||
private VolumeDao _volsDao;
|
||||
@Inject
|
||||
private VolumeDetailsDao _volsDetailsDao;
|
||||
@Inject
|
||||
private HighAvailabilityManager _haMgr;
|
||||
@Inject
|
||||
private HostPodDao _podDao;
|
||||
|
|
@ -461,6 +467,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
ExtensionsManager extensionsManager;
|
||||
@Inject
|
||||
ExtensionDetailsDao extensionDetailsDao;
|
||||
@Inject
|
||||
ClvmLockManager clvmLockManager;
|
||||
|
||||
|
||||
VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this);
|
||||
|
|
@ -3107,6 +3115,24 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
updateOverCommitRatioForVmProfile(profile, dest.getHost().getClusterId());
|
||||
|
||||
final VirtualMachineTO to = toVmTO(profile);
|
||||
|
||||
logger.info("Sending PreMigrationCommand to source host {} for VM {}", srcHostId, vm.getInstanceName());
|
||||
final PreMigrationCommand preMigCmd = new PreMigrationCommand(to, vm.getInstanceName());
|
||||
Answer preMigAnswer = null;
|
||||
try {
|
||||
preMigAnswer = _agentMgr.send(srcHostId, preMigCmd);
|
||||
if (preMigAnswer == null || !preMigAnswer.getResult()) {
|
||||
final String details = preMigAnswer != null ? preMigAnswer.getDetails() : "null answer returned";
|
||||
final String msg = "Failed to prepare source host for migration: " + details;
|
||||
logger.error("Failed to prepare source host {} for migration of VM {}: {}", srcHostId, vm.getInstanceName(), details);
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
logger.info("Successfully prepared source host {} for migration of VM {}", srcHostId, vm.getInstanceName());
|
||||
} catch (final AgentUnavailableException | OperationTimedoutException e) {
|
||||
logger.error("Failed to send PreMigrationCommand to source host {}: {}", srcHostId, e.getMessage(), e);
|
||||
throw new CloudRuntimeException("Failed to prepare source host for migration: " + e.getMessage(), e);
|
||||
}
|
||||
|
||||
final PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(to);
|
||||
setVmNetworkDetails(vm, to);
|
||||
|
||||
|
|
@ -3238,6 +3264,24 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
logger.warn("Error while checking the vm {} on host {}", vm, dest.getHost(), e);
|
||||
}
|
||||
migrated = true;
|
||||
try {
|
||||
logger.info("Executing post-migration tasks for VM {} on destination host {}", vm.getInstanceName(), dstHostId);
|
||||
final PostMigrationCommand postMigrationCommand = new PostMigrationCommand(to, vm.getInstanceName());
|
||||
final Answer postMigrationAnswer = _agentMgr.send(dstHostId, postMigrationCommand);
|
||||
|
||||
if (postMigrationAnswer == null || !postMigrationAnswer.getResult()) {
|
||||
final String details = postMigrationAnswer != null ? postMigrationAnswer.getDetails() : "null answer returned";
|
||||
logger.warn("Post-migration tasks failed for VM {} on destination host {}: {}. Migration completed but some cleanup may be needed.",
|
||||
vm.getInstanceName(), dstHostId, details);
|
||||
} else {
|
||||
logger.info("Successfully completed post-migration tasks for VM {} on destination host {}", vm.getInstanceName(), dstHostId);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("Exception during post-migration tasks for VM {} on destination host {}: {}. Migration completed but some cleanup may be needed.",
|
||||
vm.getInstanceName(), dstHostId, e.getMessage(), e);
|
||||
}
|
||||
|
||||
updateClvmLockHostForVmVolumes(vm.getId(), dstHostId);
|
||||
} finally {
|
||||
if (!migrated) {
|
||||
logger.info("Migration was unsuccessful. Cleaning up: {}", vm);
|
||||
|
|
@ -3323,6 +3367,27 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
_vmDao.persist(newVm);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates CLVM_LOCK_HOST_ID for all CLVM volumes attached to a VM after VM migration.
|
||||
* This ensures that subsequent operations on CLVM volumes are routed to the correct host.
|
||||
*
|
||||
* @param vmId The ID of the VM that was migrated
|
||||
* @param destHostId The destination host ID where the VM now resides
|
||||
*/
|
||||
private void updateClvmLockHostForVmVolumes(long vmId, long destHostId) {
|
||||
List<VolumeVO> volumes = _volsDao.findByInstance(vmId);
|
||||
if (volumes == null || volumes.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (VolumeVO volume : volumes) {
|
||||
StoragePoolVO pool = _storagePoolDao.findById(volume.getPoolId());
|
||||
if (pool != null && ClvmLockManager.isClvmPoolType(pool.getPoolType())) {
|
||||
clvmLockManager.setClvmLockHostId(volume.getId(), destHostId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* We create the mapping of volumes and storage pool to migrate the VMs according to the information sent by the user.
|
||||
* If the user did not enter a complete mapping, the volumes that were left behind will be auto mapped using {@link #createStoragePoolMappingsForVolumes(VirtualMachineProfile, DataCenterDeployment, Map, List)}
|
||||
|
|
@ -4897,6 +4962,27 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
volumeMgr.prepareForMigration(profile, dest);
|
||||
|
||||
final VirtualMachineTO to = toVmTO(profile);
|
||||
|
||||
// Step 1: Send PreMigrationCommand to source host to convert CLVM volumes to shared mode
|
||||
// This must happen BEFORE PrepareForMigrationCommand on destination to avoid lock conflicts
|
||||
logger.info("Sending PreMigrationCommand to source host {} for VM {}", srcHostId, vm.getInstanceName());
|
||||
final PreMigrationCommand preMigCmd = new PreMigrationCommand(to, vm.getInstanceName());
|
||||
Answer preMigAnswer = null;
|
||||
try {
|
||||
preMigAnswer = _agentMgr.send(srcHostId, preMigCmd);
|
||||
if (preMigAnswer == null || !preMigAnswer.getResult()) {
|
||||
final String details = preMigAnswer != null ? preMigAnswer.getDetails() : "null answer returned";
|
||||
final String msg = "Failed to prepare source host for migration: " + details;
|
||||
logger.error("Failed to prepare source host {} for migration of VM {}: {}", srcHostId, vm.getInstanceName(), details);
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
logger.info("Successfully prepared source host {} for migration of VM {}", srcHostId, vm.getInstanceName());
|
||||
} catch (final AgentUnavailableException | OperationTimedoutException e) {
|
||||
logger.error("Failed to send PreMigrationCommand to source host {}: {}", srcHostId, e.getMessage(), e);
|
||||
throw new CloudRuntimeException("Failed to prepare source host for migration: " + e.getMessage(), e);
|
||||
}
|
||||
|
||||
// Step 2: Send PrepareForMigrationCommand to destination host
|
||||
final PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(to);
|
||||
|
||||
ItWorkVO work = new ItWorkVO(UUID.randomUUID().toString(), _nodeId, State.Migrating, vm.getType(), vm.getId());
|
||||
|
|
|
|||
|
|
@ -38,8 +38,10 @@ import java.util.stream.Collectors;
|
|||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.deploy.DeploymentClusterPlanner;
|
||||
import com.cloud.exception.ResourceAllocationException;
|
||||
import com.cloud.storage.ClvmLockManager;
|
||||
import com.cloud.storage.DiskOfferingVO;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.storage.dao.VMTemplateDao;
|
||||
|
|
@ -273,6 +275,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
ConfigurationDao configurationDao;
|
||||
@Inject
|
||||
VMInstanceDao vmInstanceDao;
|
||||
@Inject
|
||||
ClvmLockManager clvmLockManager;
|
||||
@Inject
|
||||
AgentManager _agentMgr;
|
||||
|
||||
@Inject
|
||||
protected SnapshotHelper snapshotHelper;
|
||||
|
|
@ -745,6 +751,17 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
logger.debug("Trying to create volume [{}] on storage pool [{}].",
|
||||
volumeToString, poolToString);
|
||||
DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
|
||||
|
||||
// For CLVM pools, set the lock host hint so volume is created on the correct host
|
||||
// This avoids the need for shared mode activation and improves performance
|
||||
if (ClvmLockManager.isClvmPoolType(pool.getPoolType()) && hostId != null) {
|
||||
logger.info("CLVM pool detected. Setting lock host {} for volume {} to route creation to correct host",
|
||||
hostId, volumeInfo.getUuid());
|
||||
volumeInfo.setDestinationHostId(hostId);
|
||||
|
||||
clvmLockManager.setClvmLockHostId(volumeInfo.getId(), hostId);
|
||||
}
|
||||
|
||||
for (int i = 0; i < 2; i++) {
|
||||
// retry one more time in case of template reload is required for Vmware case
|
||||
AsyncCallFuture<VolumeApiResult> future = null;
|
||||
|
|
@ -786,6 +803,109 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
return String.format("uuid: %s, name: %s", volume.getUuid(), volume.getName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the CLVM_LOCK_HOST_ID for a migrated volume if applicable.
|
||||
* For CLVM volumes that are attached to a VM, this updates the lock host tracking
|
||||
* to point to the VM's current host after volume migration.
|
||||
*
|
||||
* @param migratedVolume The volume that was migrated
|
||||
* @param destPool The destination storage pool
|
||||
* @param operationType Description of the operation (e.g., "migrated", "live-migrated") for logging
|
||||
*/
|
||||
private void updateClvmLockHostAfterMigration(Volume migratedVolume, StoragePool destPool, String operationType) {
|
||||
if (migratedVolume == null || destPool == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
StoragePoolVO pool = _storagePoolDao.findById(destPool.getId());
|
||||
if (pool == null || pool.getPoolType() != Storage.StoragePoolType.CLVM || pool.getPoolType() != Storage.StoragePoolType.CLVM_NG) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (migratedVolume.getInstanceId() == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
VMInstanceVO vm = vmInstanceDao.findById(migratedVolume.getInstanceId());
|
||||
if (vm == null || vm.getHostId() == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
clvmLockManager.setClvmLockHostId(migratedVolume.getId(), vm.getHostId());
|
||||
logger.debug("Updated CLVM_LOCK_HOST_ID for {} volume {} to host {} where VM {} is running",
|
||||
operationType, migratedVolume.getUuid(), vm.getHostId(), vm.getInstanceName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the CLVM lock host ID from any existing volume of the specified VM.
|
||||
* This is useful when attaching a new volume to a stopped VM - we want to maintain
|
||||
* consistency by using the same host that manages the VM's other CLVM volumes.
|
||||
*
|
||||
* @param vmId The ID of the VM
|
||||
* @return The host ID if found, null otherwise
|
||||
*/
|
||||
private Long getClvmLockHostFromVmVolumes(Long vmId) {
|
||||
if (vmId == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
List<VolumeVO> vmVolumes = _volsDao.findByInstance(vmId);
|
||||
if (vmVolumes == null || vmVolumes.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
for (VolumeVO volume : vmVolumes) {
|
||||
if (volume.getPoolId() == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
StoragePoolVO pool = _storagePoolDao.findById(volume.getPoolId());
|
||||
if (pool != null && ClvmLockManager.isClvmPoolType(pool.getPoolType())) {
|
||||
Long lockHostId = clvmLockManager.getClvmLockHostId(volume.getId(), volume.getUuid());
|
||||
if (lockHostId != null) {
|
||||
logger.debug("Found CLVM lock host {} from existing volume {} of VM {}",
|
||||
lockHostId, volume.getUuid(), vmId);
|
||||
return lockHostId;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
private void transferClvmLocksForVmStart(List<VolumeVO> volumes, Long destHostId, VMInstanceVO vm) {
|
||||
if (volumes == null || volumes.isEmpty() || destHostId == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (VolumeVO volume : volumes) {
|
||||
if (volume.getPoolId() == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
StoragePoolVO pool = _storagePoolDao.findById(volume.getPoolId());
|
||||
if (pool == null || pool.getPoolType() != Storage.StoragePoolType.CLVM || pool.getPoolType() != Storage.StoragePoolType.CLVM_NG) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Long currentLockHost = clvmLockManager.getClvmLockHostId(volume.getId(), volume.getUuid());
|
||||
|
||||
if (currentLockHost == null) {
|
||||
clvmLockManager.setClvmLockHostId(volume.getId(), destHostId);
|
||||
} else if (!currentLockHost.equals(destHostId)) {
|
||||
logger.info("CLVM volume {} is locked on host {} but VM {} starting on host {}. Transferring lock.",
|
||||
volume.getUuid(), currentLockHost, vm.getInstanceName(), destHostId);
|
||||
|
||||
if (!clvmLockManager.transferClvmVolumeLock(volume.getUuid(), volume.getId(),
|
||||
volume.getPath(), pool, currentLockHost, destHostId)) {
|
||||
throw new CloudRuntimeException(
|
||||
String.format("Failed to transfer CLVM lock for volume %s from host %s to host %s",
|
||||
volume.getUuid(), currentLockHost, destHostId));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public String getRandomVolumeName() {
|
||||
return UUID.randomUUID().toString();
|
||||
}
|
||||
|
|
@ -1204,10 +1324,22 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
Long clusterId = storagePool.getClusterId();
|
||||
logger.trace("storage-pool {}/{} is associated with cluster {}",storagePool.getName(), storagePool.getUuid(), clusterId);
|
||||
Long hostId = vm.getHostId();
|
||||
if (hostId == null && storagePool.isLocal()) {
|
||||
List<StoragePoolHostVO> poolHosts = storagePoolHostDao.listByPoolId(storagePool.getId());
|
||||
if (poolHosts.size() > 0) {
|
||||
hostId = poolHosts.get(0).getHostId();
|
||||
if (hostId == null && (storagePool.isLocal() || ClvmLockManager.isClvmPoolType(storagePool.getPoolType()))) {
|
||||
if (ClvmLockManager.isClvmPoolType(storagePool.getPoolType())) {
|
||||
hostId = getClvmLockHostFromVmVolumes(vm.getId());
|
||||
if (hostId != null) {
|
||||
logger.debug("Using CLVM lock host {} from VM {}'s existing volumes for new volume creation",
|
||||
hostId, vm.getUuid());
|
||||
}
|
||||
}
|
||||
|
||||
if (hostId == null) {
|
||||
List<StoragePoolHostVO> poolHosts = storagePoolHostDao.listByPoolId(storagePool.getId());
|
||||
if (!poolHosts.isEmpty()) {
|
||||
hostId = poolHosts.get(0).getHostId();
|
||||
logger.debug("Selected host {} from storage pool {} for stopped VM {} volume creation",
|
||||
hostId, storagePool.getUuid(), vm.getUuid());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1452,6 +1584,9 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
_snapshotDao.updateVolumeIds(vol.getId(), result.getVolume().getId());
|
||||
_snapshotDataStoreDao.updateVolumeIds(vol.getId(), result.getVolume().getId());
|
||||
}
|
||||
|
||||
// For CLVM volumes attached to a VM, update the CLVM_LOCK_HOST_ID after migration
|
||||
updateClvmLockHostAfterMigration(result.getVolume(), destPool, "migrated");
|
||||
}
|
||||
return result.getVolume();
|
||||
} catch (InterruptedException | ExecutionException e) {
|
||||
|
|
@ -1477,6 +1612,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
logger.error("Volume [{}] migration failed due to [{}].", volToString, result.getResult());
|
||||
return null;
|
||||
}
|
||||
|
||||
// For CLVM volumes attached to a VM, update the CLVM_LOCK_HOST_ID after live migration
|
||||
updateClvmLockHostAfterMigration(result.getVolume(), destPool, "live-migrated");
|
||||
|
||||
return result.getVolume();
|
||||
} catch (InterruptedException | ExecutionException e) {
|
||||
logger.error("Volume [{}] migration failed due to [{}].", volToString, e.getMessage());
|
||||
|
|
@ -1519,6 +1658,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
logger.error(msg);
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
for (Map.Entry<Volume, StoragePool> entry : volumeToPool.entrySet()) {
|
||||
Volume volume = entry.getKey();
|
||||
StoragePool destPool = entry.getValue();
|
||||
updateClvmLockHostAfterMigration(volume, destPool, "vm-migrated");
|
||||
}
|
||||
} catch (InterruptedException | ExecutionException e) {
|
||||
logger.error("Failed to migrate VM [{}] along with its volumes due to [{}].", vm, e.getMessage());
|
||||
logger.debug("Exception: ", e);
|
||||
|
|
@ -1851,6 +1995,19 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
|
||||
future = volService.createManagedStorageVolumeFromTemplateAsync(volume, destPool.getId(), templ, hostId);
|
||||
} else {
|
||||
// For CLVM pools, set the destination host hint so volume is created on the correct host
|
||||
// This avoids the need for shared mode activation and improves performance
|
||||
StoragePoolVO poolVO = _storagePoolDao.findById(destPool.getId());
|
||||
if (poolVO != null && ClvmLockManager.isClvmPoolType(poolVO.getPoolType())) {
|
||||
Long hostId = vm.getVirtualMachine().getHostId();
|
||||
if (hostId != null) {
|
||||
volume.setDestinationHostId(hostId);
|
||||
clvmLockManager.setClvmLockHostId(volume.getId(), hostId);
|
||||
logger.info("CLVM pool detected during volume creation from template. Setting lock host {} for volume {} (persisted to DB) to route creation to correct host",
|
||||
hostId, volume.getUuid());
|
||||
}
|
||||
}
|
||||
|
||||
future = volService.createVolumeFromTemplateAsync(volume, destPool.getId(), templ);
|
||||
}
|
||||
}
|
||||
|
|
@ -1966,13 +2123,18 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
|
|||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
|
||||
// don't allow to start vm that doesn't have a root volume
|
||||
if (_volsDao.findByInstanceAndType(vm.getId(), Volume.Type.ROOT).isEmpty()) {
|
||||
throw new CloudRuntimeException(String.format("ROOT volume is missing, unable to prepare volumes for the VM [%s].", vm.getVirtualMachine()));
|
||||
}
|
||||
|
||||
List<VolumeVO> vols = _volsDao.findUsableVolumesForInstance(vm.getId());
|
||||
|
||||
VirtualMachine vmInstance = vm.getVirtualMachine();
|
||||
VMInstanceVO vmInstanceVO = vmInstanceDao.findById(vmInstance.getId());
|
||||
if (vmInstance.getState() == State.Starting && dest.getHost() != null) {
|
||||
transferClvmLocksForVmStart(vols, dest.getHost().getId(), vmInstanceVO);
|
||||
}
|
||||
|
||||
List<VolumeTask> tasks = getTasks(vols, dest.getStorageForDisks(), vm);
|
||||
Volume vol = null;
|
||||
PrimaryDataStore store;
|
||||
|
|
|
|||
|
|
@ -44,6 +44,8 @@
|
|||
value="#{storagePoolAllocatorsRegistry.registered}" />
|
||||
</bean>
|
||||
|
||||
<bean id="clvmLockManager" class="com.cloud.storage.ClvmLockManager" />
|
||||
|
||||
<bean id="storageOrchestrator"
|
||||
class="org.apache.cloudstack.engine.orchestration.StorageOrchestrator"/>
|
||||
<bean id="dataMigrationHelper"
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ import static org.mockito.Mockito.verify;
|
|||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
|
@ -60,6 +61,7 @@ import com.cloud.ha.HighAvailabilityManager;
|
|||
import com.cloud.network.Network;
|
||||
import com.cloud.network.NetworkModel;
|
||||
import com.cloud.resource.ResourceManager;
|
||||
import com.cloud.storage.ClvmLockManager;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
|
||||
|
|
@ -1954,4 +1956,202 @@ public class VirtualMachineManagerImplTest {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateClvmLockHostForVmVolumes_WithClvmVolumes() throws Exception {
|
||||
long vmId = 100L;
|
||||
long destHostId = 2L;
|
||||
long poolId = 10L;
|
||||
|
||||
VolumeVO clvmVolume1 = mock(VolumeVO.class);
|
||||
VolumeVO clvmVolume2 = mock(VolumeVO.class);
|
||||
|
||||
when(clvmVolume1.getId()).thenReturn(1L);
|
||||
when(clvmVolume1.getPoolId()).thenReturn(poolId);
|
||||
when(clvmVolume2.getId()).thenReturn(2L);
|
||||
when(clvmVolume2.getPoolId()).thenReturn(poolId);
|
||||
|
||||
StoragePoolVO clvmPool = mock(StoragePoolVO.class);
|
||||
when(clvmPool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
|
||||
|
||||
when(volumeDaoMock.findByInstance(vmId)).thenReturn(Arrays.asList(clvmVolume1, clvmVolume2));
|
||||
when(storagePoolDaoMock.findById(poolId)).thenReturn(clvmPool);
|
||||
|
||||
ClvmLockManager clvmLockManagerMock = mock(ClvmLockManager.class);
|
||||
ReflectionTestUtils.setField(virtualMachineManagerImpl, "clvmLockManager", clvmLockManagerMock);
|
||||
|
||||
Method method = VirtualMachineManagerImpl.class.getDeclaredMethod(
|
||||
"updateClvmLockHostForVmVolumes", long.class, long.class);
|
||||
method.setAccessible(true);
|
||||
method.invoke(virtualMachineManagerImpl, vmId, destHostId);
|
||||
|
||||
verify(clvmLockManagerMock, times(1)).setClvmLockHostId(1L, destHostId);
|
||||
verify(clvmLockManagerMock, times(1)).setClvmLockHostId(2L, destHostId);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateClvmLockHostForVmVolumes_WithNonClvmVolumes() throws Exception {
|
||||
long vmId = 100L;
|
||||
long destHostId = 2L;
|
||||
long poolId = 10L;
|
||||
|
||||
VolumeVO nfsVolume = mock(VolumeVO.class);
|
||||
when(nfsVolume.getPoolId()).thenReturn(poolId);
|
||||
|
||||
StoragePoolVO nfsPool = mock(StoragePoolVO.class);
|
||||
when(nfsPool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
|
||||
|
||||
when(volumeDaoMock.findByInstance(vmId)).thenReturn(Arrays.asList(nfsVolume));
|
||||
when(storagePoolDaoMock.findById(poolId)).thenReturn(nfsPool);
|
||||
|
||||
ClvmLockManager clvmLockManagerMock = mock(ClvmLockManager.class);
|
||||
ReflectionTestUtils.setField(virtualMachineManagerImpl, "clvmLockManager", clvmLockManagerMock);
|
||||
|
||||
Method method = VirtualMachineManagerImpl.class.getDeclaredMethod(
|
||||
"updateClvmLockHostForVmVolumes", long.class, long.class);
|
||||
method.setAccessible(true);
|
||||
method.invoke(virtualMachineManagerImpl, vmId, destHostId);
|
||||
|
||||
verify(clvmLockManagerMock, never()).setClvmLockHostId(anyLong(), anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateClvmLockHostForVmVolumes_WithMixedVolumes() throws Exception {
|
||||
long vmId = 100L;
|
||||
long destHostId = 2L;
|
||||
long clvmPoolId = 10L;
|
||||
long nfsPoolId = 20L;
|
||||
|
||||
VolumeVO clvmVolume = mock(VolumeVO.class);
|
||||
VolumeVO nfsVolume = mock(VolumeVO.class);
|
||||
|
||||
when(clvmVolume.getId()).thenReturn(1L);
|
||||
when(clvmVolume.getPoolId()).thenReturn(clvmPoolId);
|
||||
when(nfsVolume.getPoolId()).thenReturn(nfsPoolId);
|
||||
|
||||
StoragePoolVO clvmPool = mock(StoragePoolVO.class);
|
||||
when(clvmPool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
|
||||
|
||||
StoragePoolVO nfsPool = mock(StoragePoolVO.class);
|
||||
when(nfsPool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
|
||||
|
||||
when(volumeDaoMock.findByInstance(vmId)).thenReturn(Arrays.asList(clvmVolume, nfsVolume));
|
||||
when(storagePoolDaoMock.findById(clvmPoolId)).thenReturn(clvmPool);
|
||||
when(storagePoolDaoMock.findById(nfsPoolId)).thenReturn(nfsPool);
|
||||
|
||||
ClvmLockManager clvmLockManagerMock = mock(ClvmLockManager.class);
|
||||
ReflectionTestUtils.setField(virtualMachineManagerImpl, "clvmLockManager", clvmLockManagerMock);
|
||||
|
||||
Method method = VirtualMachineManagerImpl.class.getDeclaredMethod(
|
||||
"updateClvmLockHostForVmVolumes", long.class, long.class);
|
||||
method.setAccessible(true);
|
||||
method.invoke(virtualMachineManagerImpl, vmId, destHostId);
|
||||
|
||||
verify(clvmLockManagerMock, times(1)).setClvmLockHostId(1L, destHostId);
|
||||
verify(clvmLockManagerMock, never()).setClvmLockHostId(2L, destHostId);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateClvmLockHostForVmVolumes_WithNoVolumes() throws Exception {
|
||||
long vmId = 100L;
|
||||
long destHostId = 2L;
|
||||
|
||||
when(volumeDaoMock.findByInstance(vmId)).thenReturn(Collections.emptyList());
|
||||
|
||||
ClvmLockManager clvmLockManagerMock = mock(ClvmLockManager.class);
|
||||
ReflectionTestUtils.setField(virtualMachineManagerImpl, "clvmLockManager", clvmLockManagerMock);
|
||||
|
||||
Method method = VirtualMachineManagerImpl.class.getDeclaredMethod(
|
||||
"updateClvmLockHostForVmVolumes", long.class, long.class);
|
||||
method.setAccessible(true);
|
||||
method.invoke(virtualMachineManagerImpl, vmId, destHostId);
|
||||
|
||||
verify(clvmLockManagerMock, never()).setClvmLockHostId(anyLong(), anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateClvmLockHostForVmVolumes_WithNullPoolId() throws Exception {
|
||||
long vmId = 100L;
|
||||
long destHostId = 2L;
|
||||
|
||||
VolumeVO volumeWithoutPool = mock(VolumeVO.class);
|
||||
when(volumeWithoutPool.getPoolId()).thenReturn(null);
|
||||
|
||||
when(volumeDaoMock.findByInstance(vmId)).thenReturn(Arrays.asList(volumeWithoutPool));
|
||||
|
||||
ClvmLockManager clvmLockManagerMock = mock(ClvmLockManager.class);
|
||||
ReflectionTestUtils.setField(virtualMachineManagerImpl, "clvmLockManager", clvmLockManagerMock);
|
||||
|
||||
Method method = VirtualMachineManagerImpl.class.getDeclaredMethod(
|
||||
"updateClvmLockHostForVmVolumes", long.class, long.class);
|
||||
method.setAccessible(true);
|
||||
method.invoke(virtualMachineManagerImpl, vmId, destHostId);
|
||||
|
||||
verify(storagePoolDaoMock, never()).findById(anyLong());
|
||||
verify(clvmLockManagerMock, never()).setClvmLockHostId(anyLong(), anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateClvmLockHostForVmVolumes_WithNullPool() throws Exception {
|
||||
long vmId = 100L;
|
||||
long destHostId = 2L;
|
||||
long poolId = 10L;
|
||||
|
||||
VolumeVO volume = mock(VolumeVO.class);
|
||||
when(volume.getPoolId()).thenReturn(poolId);
|
||||
|
||||
when(volumeDaoMock.findByInstance(vmId)).thenReturn(Arrays.asList(volume));
|
||||
when(storagePoolDaoMock.findById(poolId)).thenReturn(null);
|
||||
|
||||
ClvmLockManager clvmLockManagerMock = mock(ClvmLockManager.class);
|
||||
ReflectionTestUtils.setField(virtualMachineManagerImpl, "clvmLockManager", clvmLockManagerMock);
|
||||
|
||||
Method method = VirtualMachineManagerImpl.class.getDeclaredMethod(
|
||||
"updateClvmLockHostForVmVolumes", long.class, long.class);
|
||||
method.setAccessible(true);
|
||||
method.invoke(virtualMachineManagerImpl, vmId, destHostId);
|
||||
|
||||
verify(clvmLockManagerMock, never()).setClvmLockHostId(anyLong(), anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateClvmLockHostForVmVolumes_MultipleClvmPools() throws Exception {
|
||||
long vmId = 100L;
|
||||
long destHostId = 2L;
|
||||
long pool1Id = 10L;
|
||||
long pool2Id = 20L;
|
||||
|
||||
VolumeVO volume1 = mock(VolumeVO.class);
|
||||
VolumeVO volume2 = mock(VolumeVO.class);
|
||||
VolumeVO volume3 = mock(VolumeVO.class);
|
||||
|
||||
when(volume1.getId()).thenReturn(1L);
|
||||
when(volume1.getPoolId()).thenReturn(pool1Id);
|
||||
when(volume2.getId()).thenReturn(2L);
|
||||
when(volume2.getPoolId()).thenReturn(pool2Id);
|
||||
when(volume3.getId()).thenReturn(3L);
|
||||
when(volume3.getPoolId()).thenReturn(pool1Id);
|
||||
|
||||
StoragePoolVO clvmPool1 = mock(StoragePoolVO.class);
|
||||
when(clvmPool1.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
|
||||
|
||||
StoragePoolVO clvmPool2 = mock(StoragePoolVO.class);
|
||||
when(clvmPool2.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
|
||||
|
||||
when(volumeDaoMock.findByInstance(vmId)).thenReturn(Arrays.asList(volume1, volume2, volume3));
|
||||
when(storagePoolDaoMock.findById(pool1Id)).thenReturn(clvmPool1);
|
||||
when(storagePoolDaoMock.findById(pool2Id)).thenReturn(clvmPool2);
|
||||
|
||||
ClvmLockManager clvmLockManagerMock = mock(ClvmLockManager.class);
|
||||
ReflectionTestUtils.setField(virtualMachineManagerImpl, "clvmLockManager", clvmLockManagerMock);
|
||||
|
||||
Method method = VirtualMachineManagerImpl.class.getDeclaredMethod(
|
||||
"updateClvmLockHostForVmVolumes", long.class, long.class);
|
||||
method.setAccessible(true);
|
||||
method.invoke(virtualMachineManagerImpl, vmId, destHostId);
|
||||
|
||||
verify(clvmLockManagerMock, times(1)).setClvmLockHostId(1L, destHostId);
|
||||
verify(clvmLockManagerMock, times(1)).setClvmLockHostId(2L, destHostId);
|
||||
verify(clvmLockManagerMock, times(1)).setClvmLockHostId(3L, destHostId);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,8 @@
|
|||
// under the License.
|
||||
package org.apache.cloudstack.engine.orchestration;
|
||||
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
|
|
@ -30,6 +32,7 @@ import com.cloud.host.Host;
|
|||
import com.cloud.host.HostVO;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.offering.DiskOffering;
|
||||
import com.cloud.storage.ClvmLockManager;
|
||||
import com.cloud.storage.ScopeType;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.Storage;
|
||||
|
|
@ -42,6 +45,7 @@ import com.cloud.user.ResourceLimitService;
|
|||
import com.cloud.uservm.UserVm;
|
||||
import com.cloud.utils.db.EntityManager;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.VMInstanceVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.utils.Pair;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
|
|
@ -67,6 +71,7 @@ import org.junit.Assert;
|
|||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.ArgumentMatchers;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockedConstruction;
|
||||
|
|
@ -640,4 +645,298 @@ public class VolumeOrchestratorTest {
|
|||
Assert.assertEquals(1, result.second().size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTransferClvmLocksForVmStart_WithClvmVolumes() throws Exception {
|
||||
Long destHostId = 2L;
|
||||
Long currentHostId = 1L;
|
||||
Long poolId = 10L;
|
||||
|
||||
VolumeVO clvmVolume1 = Mockito.mock(VolumeVO.class);
|
||||
VolumeVO clvmVolume2 = Mockito.mock(VolumeVO.class);
|
||||
|
||||
Mockito.when(clvmVolume1.getId()).thenReturn(101L);
|
||||
Mockito.when(clvmVolume1.getPoolId()).thenReturn(poolId);
|
||||
Mockito.when(clvmVolume1.getUuid()).thenReturn("vol-uuid-1");
|
||||
Mockito.when(clvmVolume1.getPath()).thenReturn("vol-path-1");
|
||||
|
||||
Mockito.when(clvmVolume2.getId()).thenReturn(102L);
|
||||
Mockito.when(clvmVolume2.getPoolId()).thenReturn(poolId);
|
||||
Mockito.when(clvmVolume2.getUuid()).thenReturn("vol-uuid-2");
|
||||
Mockito.when(clvmVolume2.getPath()).thenReturn("vol-path-2");
|
||||
|
||||
StoragePoolVO clvmPool = Mockito.mock(StoragePoolVO.class);
|
||||
Mockito.when(clvmPool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
|
||||
|
||||
VMInstanceVO vmInstance = Mockito.mock(VMInstanceVO.class);
|
||||
Mockito.when(vmInstance.getInstanceName()).thenReturn(MOCK_VM_NAME);
|
||||
|
||||
ClvmLockManager clvmLockManager = Mockito.mock(ClvmLockManager.class);
|
||||
Mockito.when(clvmLockManager.getClvmLockHostId(Mockito.eq(101L), Mockito.anyString())).thenReturn(currentHostId);
|
||||
Mockito.when(clvmLockManager.getClvmLockHostId(Mockito.eq(102L), Mockito.anyString())).thenReturn(currentHostId);
|
||||
Mockito.when(clvmLockManager.transferClvmVolumeLock(Mockito.anyString(), Mockito.anyLong(),
|
||||
Mockito.anyString(), Mockito.any(), Mockito.anyLong(), Mockito.anyLong())).thenReturn(true);
|
||||
|
||||
Mockito.when(storagePoolDao.findById(poolId)).thenReturn(clvmPool);
|
||||
|
||||
setField(volumeOrchestrator, "clvmLockManager", clvmLockManager);
|
||||
setField(volumeOrchestrator, "_storagePoolDao", storagePoolDao);
|
||||
|
||||
Method method = VolumeOrchestrator.class.getDeclaredMethod(
|
||||
"transferClvmLocksForVmStart", List.class, Long.class, VMInstanceVO.class);
|
||||
method.setAccessible(true);
|
||||
|
||||
method.invoke(volumeOrchestrator, List.of(clvmVolume1, clvmVolume2), destHostId, vmInstance);
|
||||
|
||||
Mockito.verify(clvmLockManager, Mockito.times(1)).transferClvmVolumeLock(
|
||||
Mockito.eq("vol-uuid-1"), Mockito.eq(101L), Mockito.eq("vol-path-1"),
|
||||
Mockito.eq(clvmPool), Mockito.eq(currentHostId), Mockito.eq(destHostId));
|
||||
Mockito.verify(clvmLockManager, Mockito.times(1)).transferClvmVolumeLock(
|
||||
Mockito.eq("vol-uuid-2"), Mockito.eq(102L), Mockito.eq("vol-path-2"),
|
||||
Mockito.eq(clvmPool), Mockito.eq(currentHostId), Mockito.eq(destHostId));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTransferClvmLocksForVmStart_WithNonClvmVolumes() throws Exception {
|
||||
Long destHostId = 2L;
|
||||
Long poolId = 10L;
|
||||
|
||||
VolumeVO nfsVolume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(nfsVolume.getPoolId()).thenReturn(poolId);
|
||||
|
||||
StoragePoolVO nfsPool = Mockito.mock(StoragePoolVO.class);
|
||||
Mockito.when(nfsPool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
|
||||
|
||||
VMInstanceVO vmInstance = Mockito.mock(VMInstanceVO.class);
|
||||
|
||||
ClvmLockManager clvmLockManager = Mockito.mock(ClvmLockManager.class);
|
||||
|
||||
Mockito.when(storagePoolDao.findById(poolId)).thenReturn(nfsPool);
|
||||
|
||||
setField(volumeOrchestrator, "clvmLockManager", clvmLockManager);
|
||||
setField(volumeOrchestrator, "_storagePoolDao", storagePoolDao);
|
||||
|
||||
Method method = VolumeOrchestrator.class.getDeclaredMethod(
|
||||
"transferClvmLocksForVmStart", List.class, Long.class, VMInstanceVO.class);
|
||||
method.setAccessible(true);
|
||||
|
||||
method.invoke(volumeOrchestrator, List.of(nfsVolume), destHostId, vmInstance);
|
||||
|
||||
Mockito.verify(clvmLockManager, Mockito.never()).transferClvmVolumeLock(
|
||||
Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(),
|
||||
Mockito.any(), Mockito.anyLong(), Mockito.anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTransferClvmLocksForVmStart_NoLockTransferNeeded() throws Exception {
|
||||
Long destHostId = 2L;
|
||||
Long poolId = 10L;
|
||||
|
||||
VolumeVO clvmVolume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(clvmVolume.getId()).thenReturn(101L);
|
||||
Mockito.when(clvmVolume.getPoolId()).thenReturn(poolId);
|
||||
|
||||
StoragePoolVO clvmPool = Mockito.mock(StoragePoolVO.class);
|
||||
Mockito.when(clvmPool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
|
||||
|
||||
VMInstanceVO vmInstance = Mockito.mock(VMInstanceVO.class);
|
||||
|
||||
ClvmLockManager clvmLockManager = Mockito.mock(ClvmLockManager.class);
|
||||
Mockito.when(clvmLockManager.getClvmLockHostId(Mockito.eq(101L), ArgumentMatchers.nullable(String.class))).thenReturn(destHostId);
|
||||
|
||||
Mockito.when(storagePoolDao.findById(poolId)).thenReturn(clvmPool);
|
||||
|
||||
setField(volumeOrchestrator, "clvmLockManager", clvmLockManager);
|
||||
setField(volumeOrchestrator, "_storagePoolDao", storagePoolDao);
|
||||
|
||||
java.lang.reflect.Method method = VolumeOrchestrator.class.getDeclaredMethod(
|
||||
"transferClvmLocksForVmStart", List.class, Long.class, VMInstanceVO.class);
|
||||
method.setAccessible(true);
|
||||
|
||||
method.invoke(volumeOrchestrator, List.of(clvmVolume), destHostId, vmInstance);
|
||||
|
||||
Mockito.verify(clvmLockManager, Mockito.never()).transferClvmVolumeLock(
|
||||
Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(),
|
||||
Mockito.any(), Mockito.anyLong(), Mockito.anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTransferClvmLocksForVmStart_EmptyVolumeList() throws Exception {
|
||||
Long destHostId = 2L;
|
||||
VMInstanceVO vmInstance = Mockito.mock(VMInstanceVO.class);
|
||||
|
||||
ClvmLockManager clvmLockManager = Mockito.mock(ClvmLockManager.class);
|
||||
setField(volumeOrchestrator, "clvmLockManager", clvmLockManager);
|
||||
|
||||
Method method = VolumeOrchestrator.class.getDeclaredMethod(
|
||||
"transferClvmLocksForVmStart", List.class, Long.class, VMInstanceVO.class);
|
||||
method.setAccessible(true);
|
||||
|
||||
method.invoke(volumeOrchestrator, new ArrayList<VolumeVO>(), destHostId, vmInstance);
|
||||
|
||||
Mockito.verify(clvmLockManager, Mockito.never()).getClvmLockHostId(Mockito.anyLong(), Mockito.anyString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTransferClvmLocksForVmStart_NullPoolId() throws Exception {
|
||||
Long destHostId = 2L;
|
||||
|
||||
VolumeVO volumeWithoutPool = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volumeWithoutPool.getPoolId()).thenReturn(null);
|
||||
|
||||
VMInstanceVO vmInstance = Mockito.mock(VMInstanceVO.class);
|
||||
|
||||
ClvmLockManager clvmLockManager = Mockito.mock(ClvmLockManager.class);
|
||||
setField(volumeOrchestrator, "clvmLockManager", clvmLockManager);
|
||||
setField(volumeOrchestrator, "_storagePoolDao", storagePoolDao);
|
||||
|
||||
Method method = VolumeOrchestrator.class.getDeclaredMethod(
|
||||
"transferClvmLocksForVmStart", List.class, Long.class, VMInstanceVO.class);
|
||||
method.setAccessible(true);
|
||||
|
||||
method.invoke(volumeOrchestrator, List.of(volumeWithoutPool), destHostId, vmInstance);
|
||||
|
||||
Mockito.verify(storagePoolDao, Mockito.never()).findById(Mockito.anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTransferClvmLocksForVmStart_SetInitialLockHost() throws Exception {
|
||||
Long destHostId = 2L;
|
||||
Long poolId = 10L;
|
||||
|
||||
VolumeVO clvmVolume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(clvmVolume.getId()).thenReturn(101L);
|
||||
Mockito.when(clvmVolume.getPoolId()).thenReturn(poolId);
|
||||
|
||||
StoragePoolVO clvmPool = Mockito.mock(StoragePoolVO.class);
|
||||
Mockito.when(clvmPool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
|
||||
|
||||
VMInstanceVO vmInstance = Mockito.mock(VMInstanceVO.class);
|
||||
|
||||
ClvmLockManager clvmLockManager = Mockito.mock(ClvmLockManager.class);
|
||||
Mockito.when(clvmLockManager.getClvmLockHostId(Mockito.eq(101L), ArgumentMatchers.nullable(String.class))).thenReturn(null);
|
||||
|
||||
Mockito.when(storagePoolDao.findById(poolId)).thenReturn(clvmPool);
|
||||
|
||||
setField(volumeOrchestrator, "clvmLockManager", clvmLockManager);
|
||||
setField(volumeOrchestrator, "_storagePoolDao", storagePoolDao);
|
||||
|
||||
Method method = VolumeOrchestrator.class.getDeclaredMethod(
|
||||
"transferClvmLocksForVmStart", List.class, Long.class, VMInstanceVO.class);
|
||||
method.setAccessible(true);
|
||||
|
||||
method.invoke(volumeOrchestrator, List.of(clvmVolume), destHostId, vmInstance);
|
||||
|
||||
Mockito.verify(clvmLockManager, Mockito.times(1)).setClvmLockHostId(101L, destHostId);
|
||||
Mockito.verify(clvmLockManager, Mockito.never()).transferClvmVolumeLock(
|
||||
Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(),
|
||||
Mockito.any(), Mockito.anyLong(), Mockito.anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTransferClvmLocksForVmStart_MixedVolumes() throws Exception {
|
||||
Long destHostId = 2L;
|
||||
Long currentHostId = 1L;
|
||||
Long clvmPoolId = 10L;
|
||||
Long nfsPoolId = 20L;
|
||||
|
||||
VolumeVO clvmVolume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(clvmVolume.getId()).thenReturn(101L);
|
||||
Mockito.when(clvmVolume.getPoolId()).thenReturn(clvmPoolId);
|
||||
Mockito.when(clvmVolume.getUuid()).thenReturn("clvm-vol-uuid");
|
||||
Mockito.when(clvmVolume.getPath()).thenReturn("clvm-vol-path");
|
||||
|
||||
VolumeVO nfsVolume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(nfsVolume.getPoolId()).thenReturn(nfsPoolId);
|
||||
|
||||
StoragePoolVO clvmPool = Mockito.mock(StoragePoolVO.class);
|
||||
Mockito.when(clvmPool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
|
||||
|
||||
StoragePoolVO nfsPool = Mockito.mock(StoragePoolVO.class);
|
||||
Mockito.when(nfsPool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
|
||||
|
||||
VMInstanceVO vmInstance = Mockito.mock(VMInstanceVO.class);
|
||||
Mockito.when(vmInstance.getInstanceName()).thenReturn(MOCK_VM_NAME);
|
||||
|
||||
ClvmLockManager clvmLockManager = Mockito.mock(ClvmLockManager.class);
|
||||
Mockito.when(clvmLockManager.getClvmLockHostId(Mockito.eq(101L), Mockito.anyString())).thenReturn(currentHostId);
|
||||
Mockito.when(clvmLockManager.transferClvmVolumeLock(Mockito.anyString(), Mockito.anyLong(),
|
||||
Mockito.anyString(), Mockito.any(), Mockito.anyLong(), Mockito.anyLong())).thenReturn(true);
|
||||
|
||||
Mockito.when(storagePoolDao.findById(clvmPoolId)).thenReturn(clvmPool);
|
||||
Mockito.when(storagePoolDao.findById(nfsPoolId)).thenReturn(nfsPool);
|
||||
|
||||
setField(volumeOrchestrator, "clvmLockManager", clvmLockManager);
|
||||
setField(volumeOrchestrator, "_storagePoolDao", storagePoolDao);
|
||||
|
||||
Method method = VolumeOrchestrator.class.getDeclaredMethod(
|
||||
"transferClvmLocksForVmStart", List.class, Long.class, VMInstanceVO.class);
|
||||
method.setAccessible(true);
|
||||
|
||||
method.invoke(volumeOrchestrator, List.of(clvmVolume, nfsVolume), destHostId, vmInstance);
|
||||
|
||||
Mockito.verify(clvmLockManager, Mockito.times(1)).transferClvmVolumeLock(
|
||||
Mockito.eq("clvm-vol-uuid"), Mockito.eq(101L), Mockito.eq("clvm-vol-path"),
|
||||
Mockito.eq(clvmPool), Mockito.eq(currentHostId), Mockito.eq(destHostId));
|
||||
}
|
||||
|
||||
@Test(expected = CloudRuntimeException.class)
|
||||
public void testTransferClvmLocksForVmStart_TransferFails() throws Throwable {
|
||||
Long destHostId = 2L;
|
||||
Long currentHostId = 1L;
|
||||
Long poolId = 10L;
|
||||
|
||||
VolumeVO clvmVolume = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(clvmVolume.getId()).thenReturn(101L);
|
||||
Mockito.when(clvmVolume.getPoolId()).thenReturn(poolId);
|
||||
Mockito.when(clvmVolume.getUuid()).thenReturn("vol-uuid");
|
||||
Mockito.when(clvmVolume.getPath()).thenReturn("vol-path");
|
||||
|
||||
StoragePoolVO clvmPool = Mockito.mock(StoragePoolVO.class);
|
||||
Mockito.when(clvmPool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
|
||||
|
||||
VMInstanceVO vmInstance = Mockito.mock(VMInstanceVO.class);
|
||||
Mockito.when(vmInstance.getInstanceName()).thenReturn(MOCK_VM_NAME);
|
||||
|
||||
ClvmLockManager clvmLockManager = Mockito.mock(ClvmLockManager.class);
|
||||
Mockito.when(clvmLockManager.getClvmLockHostId(Mockito.eq(101L), Mockito.anyString())).thenReturn(currentHostId);
|
||||
Mockito.when(clvmLockManager.transferClvmVolumeLock(Mockito.anyString(), Mockito.anyLong(),
|
||||
Mockito.anyString(), Mockito.any(), Mockito.anyLong(), Mockito.anyLong())).thenReturn(false);
|
||||
|
||||
Mockito.when(storagePoolDao.findById(poolId)).thenReturn(clvmPool);
|
||||
|
||||
setField(volumeOrchestrator, "clvmLockManager", clvmLockManager);
|
||||
setField(volumeOrchestrator, "_storagePoolDao", storagePoolDao);
|
||||
|
||||
Method method = VolumeOrchestrator.class.getDeclaredMethod(
|
||||
"transferClvmLocksForVmStart", List.class, Long.class, VMInstanceVO.class);
|
||||
method.setAccessible(true);
|
||||
|
||||
try {
|
||||
method.invoke(volumeOrchestrator, List.of(clvmVolume), destHostId, vmInstance);
|
||||
} catch (InvocationTargetException e) {
|
||||
throw e.getCause();
|
||||
}
|
||||
}
|
||||
|
||||
private void setField(Object target, String fieldName, Object value) throws Exception {
|
||||
Field field = findField(target.getClass(), fieldName);
|
||||
if (field == null) {
|
||||
throw new NoSuchFieldException("Field " + fieldName + " not found in " + target.getClass());
|
||||
}
|
||||
field.setAccessible(true);
|
||||
field.set(target, value);
|
||||
}
|
||||
|
||||
private Field findField(Class<?> clazz, String fieldName) {
|
||||
Class<?> current = clazz;
|
||||
while (current != null && current != Object.class) {
|
||||
try {
|
||||
return current.getDeclaredField(fieldName);
|
||||
} catch (NoSuchFieldException e) {
|
||||
current = current.getSuperclass();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -148,7 +148,7 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase<SnapshotDataStoreVO
|
|||
|
||||
idStateNeqSearch = createSearchBuilder();
|
||||
idStateNeqSearch.and(SNAPSHOT_ID, idStateNeqSearch.entity().getSnapshotId(), SearchCriteria.Op.EQ);
|
||||
idStateNeqSearch.and(STATE, idStateNeqSearch.entity().getState(), SearchCriteria.Op.NEQ);
|
||||
idStateNeqSearch.and(STATE, idStateNeqSearch.entity().getState(), SearchCriteria.Op.NIN);
|
||||
idStateNeqSearch.done();
|
||||
|
||||
snapshotVOSearch = snapshotDao.createSearchBuilder();
|
||||
|
|
|
|||
|
|
@ -643,6 +643,10 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
|
|||
return StrategyPriority.DEFAULT;
|
||||
}
|
||||
|
||||
if (isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO)) {
|
||||
return StrategyPriority.DEFAULT;
|
||||
}
|
||||
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
if (zoneId != null && SnapshotOperation.DELETE.equals(op)) {
|
||||
|
|
@ -691,4 +695,32 @@ public class DefaultSnapshotStrategy extends SnapshotStrategyBase {
|
|||
dataStoreMgr.getStoreZoneId(s.getDataStoreId(), s.getRole()), volumeVO.getDataCenterId()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a CLVM volume snapshot is stored on secondary storage in the same zone.
|
||||
* CLVM snapshots are backed up to secondary storage and removed from primary storage.
|
||||
*/
|
||||
protected boolean isSnapshotStoredOnSecondaryForCLVMVolume(Snapshot snapshot, VolumeVO volumeVO) {
|
||||
if (volumeVO == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Long poolId = volumeVO.getPoolId();
|
||||
if (poolId == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
StoragePool pool = (StoragePool) dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
|
||||
if (pool == null || pool.getPoolType() != StoragePoolType.CLVM || pool.getPoolType() != StoragePoolType.CLVM_NG) {
|
||||
return false;
|
||||
}
|
||||
|
||||
List<SnapshotDataStoreVO> snapshotStores = snapshotStoreDao.listReadyBySnapshot(snapshot.getId(), DataStoreRole.Image);
|
||||
if (CollectionUtils.isEmpty(snapshotStores)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return snapshotStores.stream().anyMatch(s -> Objects.equals(
|
||||
dataStoreMgr.getStoreZoneId(s.getDataStoreId(), s.getRole()), volumeVO.getDataCenterId()));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ import javax.naming.ConfigurationException;
|
|||
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.storage.Snapshot;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.dao.SnapshotDao;
|
||||
import com.cloud.vm.snapshot.VMSnapshotDetailsVO;
|
||||
import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao;
|
||||
|
|
@ -468,6 +469,13 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot
|
|||
|
||||
@Override
|
||||
public StrategyPriority canHandle(VMSnapshot vmSnapshot) {
|
||||
UserVmVO vm = userVmDao.findById(vmSnapshot.getVmId());
|
||||
String cantHandleLog = String.format("Default VM snapshot cannot handle VM snapshot for [%s]", vm);
|
||||
|
||||
if (isRunningVMVolumeOnCLVMStorage(vm, cantHandleLog)) {
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
|
||||
return StrategyPriority.DEFAULT;
|
||||
}
|
||||
|
||||
|
|
@ -493,10 +501,31 @@ public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshot
|
|||
return vmSnapshotDao.remove(vmSnapshot.getId());
|
||||
}
|
||||
|
||||
protected boolean isRunningVMVolumeOnCLVMStorage(UserVmVO vm, String cantHandleLog) {
|
||||
Long vmId = vm.getId();
|
||||
if (State.Running.equals(vm.getState())) {
|
||||
List<VolumeVO> volumes = volumeDao.findByInstance(vmId);
|
||||
for (VolumeVO volume : volumes) {
|
||||
StoragePool pool = primaryDataStoreDao.findById(volume.getPoolId());
|
||||
if (pool != null && pool.getPoolType() == Storage.StoragePoolType.CLVM) {
|
||||
logger.warn("Rejecting VM snapshot request: {} - VM is running on CLVM storage (pool: {}, poolType: CLVM)",
|
||||
cantHandleLog, pool.getName());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public StrategyPriority canHandle(Long vmId, Long rootPoolId, boolean snapshotMemory) {
|
||||
UserVmVO vm = userVmDao.findById(vmId);
|
||||
String cantHandleLog = String.format("Default VM snapshot cannot handle VM snapshot for [%s]", vm);
|
||||
|
||||
if (isRunningVMVolumeOnCLVMStorage(vm, cantHandleLog)) {
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
|
||||
if (State.Running.equals(vm.getState()) && !snapshotMemory) {
|
||||
logger.debug("{} as it is running and its memory will not be affected.", cantHandleLog, vm);
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
|
|
|
|||
|
|
@ -345,6 +345,13 @@ public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy {
|
|||
}
|
||||
}
|
||||
|
||||
Long vmId = vmSnapshot.getVmId();
|
||||
UserVmVO vm = userVmDao.findById(vmId);
|
||||
String cantHandleLog = String.format("Storage VM snapshot strategy cannot handle VM snapshot for [%s]", vm);
|
||||
if (vm != null && isRunningVMVolumeOnCLVMStorage(vm, cantHandleLog)) {
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
|
||||
if ( SnapshotManager.VmStorageSnapshotKvm.value() && userVm.getHypervisorType() == Hypervisor.HypervisorType.KVM
|
||||
&& vmSnapshot.getType() == VMSnapshot.Type.Disk) {
|
||||
return StrategyPriority.HYPERVISOR;
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import java.util.ArrayList;
|
|||
import java.util.List;
|
||||
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
|
||||
|
|
@ -327,4 +328,236 @@ public class DefaultSnapshotStrategyTest {
|
|||
prepareMocksForIsSnapshotStoredOnSameZoneStoreForQCOW2VolumeTest(100L);
|
||||
Assert.assertTrue(defaultSnapshotStrategySpy.isSnapshotStoredOnSameZoneStoreForQCOW2Volume(snapshot, volumeVO));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsSnapshotStoredOnSecondaryForCLVMVolume_NullVolume() {
|
||||
Snapshot snapshot = Mockito.mock(Snapshot.class);
|
||||
Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, null));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsSnapshotStoredOnSecondaryForCLVMVolume_NullPoolId() {
|
||||
Snapshot snapshot = Mockito.mock(Snapshot.class);
|
||||
VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volumeVO.getPoolId()).thenReturn(null);
|
||||
|
||||
Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsSnapshotStoredOnSecondaryForCLVMVolume_NullPool() {
|
||||
Snapshot snapshot = Mockito.mock(Snapshot.class);
|
||||
VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
|
||||
Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn(null);
|
||||
|
||||
Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsSnapshotStoredOnSecondaryForCLVMVolume_NonCLVMPool() {
|
||||
Snapshot snapshot = Mockito.mock(Snapshot.class);
|
||||
VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
|
||||
|
||||
StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
|
||||
Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
|
||||
Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
|
||||
|
||||
Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsSnapshotStoredOnSecondaryForCLVMVolume_RBDPool() {
|
||||
Snapshot snapshot = Mockito.mock(Snapshot.class);
|
||||
VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
|
||||
|
||||
StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
|
||||
Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.RBD);
|
||||
Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
|
||||
|
||||
Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsSnapshotStoredOnSecondaryForCLVMVolume_CLVMPoolNoSnapshotStores() {
|
||||
Snapshot snapshot = Mockito.mock(Snapshot.class);
|
||||
Mockito.when(snapshot.getId()).thenReturn(1L);
|
||||
|
||||
VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
|
||||
|
||||
StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
|
||||
Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
|
||||
Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
|
||||
|
||||
Mockito.when(snapshotDataStoreDao.listReadyBySnapshot(1L, DataStoreRole.Image)).thenReturn(new ArrayList<>());
|
||||
|
||||
Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsSnapshotStoredOnSecondaryForCLVMVolume_CLVMPoolSnapshotInDifferentZone() {
|
||||
Snapshot snapshot = Mockito.mock(Snapshot.class);
|
||||
Mockito.when(snapshot.getId()).thenReturn(1L);
|
||||
|
||||
VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
|
||||
Mockito.when(volumeVO.getDataCenterId()).thenReturn(100L);
|
||||
|
||||
StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
|
||||
Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
|
||||
Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
|
||||
|
||||
SnapshotDataStoreVO snapshotStore1 = Mockito.mock(SnapshotDataStoreVO.class);
|
||||
Mockito.when(snapshotStore1.getDataStoreId()).thenReturn(201L);
|
||||
Mockito.when(snapshotStore1.getRole()).thenReturn(DataStoreRole.Image);
|
||||
|
||||
SnapshotDataStoreVO snapshotStore2 = Mockito.mock(SnapshotDataStoreVO.class);
|
||||
Mockito.when(snapshotStore2.getDataStoreId()).thenReturn(202L);
|
||||
Mockito.when(snapshotStore2.getRole()).thenReturn(DataStoreRole.Image);
|
||||
|
||||
Mockito.when(snapshotDataStoreDao.listReadyBySnapshot(1L, DataStoreRole.Image))
|
||||
.thenReturn(List.of(snapshotStore1, snapshotStore2));
|
||||
|
||||
Mockito.when(dataStoreManager.getStoreZoneId(201L, DataStoreRole.Image)).thenReturn(111L);
|
||||
Mockito.when(dataStoreManager.getStoreZoneId(202L, DataStoreRole.Image)).thenReturn(112L);
|
||||
|
||||
Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsSnapshotStoredOnSecondaryForCLVMVolume_CLVMPoolSnapshotInSameZone() {
|
||||
Snapshot snapshot = Mockito.mock(Snapshot.class);
|
||||
Mockito.when(snapshot.getId()).thenReturn(1L);
|
||||
|
||||
VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
|
||||
Mockito.when(volumeVO.getDataCenterId()).thenReturn(100L);
|
||||
|
||||
StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
|
||||
Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
|
||||
Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
|
||||
|
||||
SnapshotDataStoreVO snapshotStore = Mockito.mock(SnapshotDataStoreVO.class);
|
||||
Mockito.when(snapshotStore.getDataStoreId()).thenReturn(201L);
|
||||
Mockito.when(snapshotStore.getRole()).thenReturn(DataStoreRole.Image);
|
||||
|
||||
Mockito.when(snapshotDataStoreDao.listReadyBySnapshot(1L, DataStoreRole.Image))
|
||||
.thenReturn(List.of(snapshotStore));
|
||||
|
||||
Mockito.when(dataStoreManager.getStoreZoneId(201L, DataStoreRole.Image)).thenReturn(100L);
|
||||
|
||||
Assert.assertTrue(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsSnapshotStoredOnSecondaryForCLVMVolume_CLVMPoolMultipleSnapshotsOneMatches() {
|
||||
Snapshot snapshot = Mockito.mock(Snapshot.class);
|
||||
Mockito.when(snapshot.getId()).thenReturn(1L);
|
||||
|
||||
VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
|
||||
Mockito.when(volumeVO.getDataCenterId()).thenReturn(100L);
|
||||
|
||||
StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
|
||||
Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
|
||||
Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
|
||||
|
||||
SnapshotDataStoreVO snapshotStore1 = Mockito.mock(SnapshotDataStoreVO.class);
|
||||
Mockito.when(snapshotStore1.getDataStoreId()).thenReturn(201L);
|
||||
Mockito.when(snapshotStore1.getRole()).thenReturn(DataStoreRole.Image);
|
||||
|
||||
SnapshotDataStoreVO snapshotStore2 = Mockito.mock(SnapshotDataStoreVO.class);
|
||||
Mockito.when(snapshotStore2.getDataStoreId()).thenReturn(202L);
|
||||
Mockito.when(snapshotStore2.getRole()).thenReturn(DataStoreRole.Image);
|
||||
|
||||
SnapshotDataStoreVO snapshotStore3 = Mockito.mock(SnapshotDataStoreVO.class);
|
||||
|
||||
Mockito.when(snapshotDataStoreDao.listReadyBySnapshot(1L, DataStoreRole.Image))
|
||||
.thenReturn(List.of(snapshotStore1, snapshotStore2, snapshotStore3));
|
||||
|
||||
Mockito.when(dataStoreManager.getStoreZoneId(201L, DataStoreRole.Image)).thenReturn(111L);
|
||||
Mockito.when(dataStoreManager.getStoreZoneId(202L, DataStoreRole.Image)).thenReturn(100L);
|
||||
|
||||
Assert.assertTrue(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsSnapshotStoredOnSecondaryForCLVMVolume_CLVMPoolNullZoneIds() {
|
||||
Snapshot snapshot = Mockito.mock(Snapshot.class);
|
||||
Mockito.when(snapshot.getId()).thenReturn(1L);
|
||||
|
||||
VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
|
||||
Mockito.when(volumeVO.getDataCenterId()).thenReturn(100L);
|
||||
|
||||
StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
|
||||
Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
|
||||
Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
|
||||
|
||||
SnapshotDataStoreVO snapshotStore = Mockito.mock(SnapshotDataStoreVO.class);
|
||||
Mockito.when(snapshotStore.getDataStoreId()).thenReturn(201L);
|
||||
Mockito.when(snapshotStore.getRole()).thenReturn(DataStoreRole.Image);
|
||||
|
||||
Mockito.when(snapshotDataStoreDao.listReadyBySnapshot(1L, DataStoreRole.Image))
|
||||
.thenReturn(List.of(snapshotStore));
|
||||
|
||||
Mockito.when(dataStoreManager.getStoreZoneId(201L, DataStoreRole.Image)).thenReturn(null);
|
||||
|
||||
Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsSnapshotStoredOnSecondaryForCLVMVolume_CLVMPoolVolumeNullDataCenter() {
|
||||
Snapshot snapshot = Mockito.mock(Snapshot.class);
|
||||
Mockito.when(snapshot.getId()).thenReturn(1L);
|
||||
|
||||
VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
|
||||
Mockito.when(volumeVO.getDataCenterId()).thenReturn(1L);
|
||||
|
||||
StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
|
||||
Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
|
||||
Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
|
||||
|
||||
SnapshotDataStoreVO snapshotStore = Mockito.mock(SnapshotDataStoreVO.class);
|
||||
Mockito.when(snapshotStore.getDataStoreId()).thenReturn(201L);
|
||||
Mockito.when(snapshotStore.getRole()).thenReturn(DataStoreRole.Image);
|
||||
|
||||
Mockito.when(snapshotDataStoreDao.listReadyBySnapshot(1L, DataStoreRole.Image))
|
||||
.thenReturn(List.of(snapshotStore));
|
||||
|
||||
Mockito.when(dataStoreManager.getStoreZoneId(201L, DataStoreRole.Image)).thenReturn(100L);
|
||||
|
||||
Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsSnapshotStoredOnSecondaryForCLVMVolume_CLVMPoolMultipleSnapshotsAllInSameZone() {
|
||||
Snapshot snapshot = Mockito.mock(Snapshot.class);
|
||||
Mockito.when(snapshot.getId()).thenReturn(1L);
|
||||
|
||||
VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
|
||||
Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
|
||||
Mockito.when(volumeVO.getDataCenterId()).thenReturn(100L);
|
||||
|
||||
StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
|
||||
Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
|
||||
Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
|
||||
|
||||
SnapshotDataStoreVO snapshotStore1 = Mockito.mock(SnapshotDataStoreVO.class);
|
||||
Mockito.when(snapshotStore1.getDataStoreId()).thenReturn(201L);
|
||||
Mockito.when(snapshotStore1.getRole()).thenReturn(DataStoreRole.Image);
|
||||
|
||||
SnapshotDataStoreVO snapshotStore2 = Mockito.mock(SnapshotDataStoreVO.class);
|
||||
|
||||
Mockito.when(snapshotDataStoreDao.listReadyBySnapshot(1L, DataStoreRole.Image))
|
||||
.thenReturn(List.of(snapshotStore1, snapshotStore2));
|
||||
|
||||
Mockito.when(dataStoreManager.getStoreZoneId(201L, DataStoreRole.Image)).thenReturn(100L);
|
||||
|
||||
Assert.assertTrue(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,8 +32,13 @@ import javax.inject.Inject;
|
|||
|
||||
import com.cloud.dc.DedicatedResourceVO;
|
||||
import com.cloud.dc.dao.DedicatedResourceDao;
|
||||
import com.cloud.storage.ClvmLockManager;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.VolumeDetailVO;
|
||||
import com.cloud.storage.dao.VolumeDetailsDao;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.db.QueryBuilder;
|
||||
import org.apache.cloudstack.context.CallContext;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
|
|
@ -46,6 +51,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
|
|||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.storage.LocalHostEndpoint;
|
||||
import org.apache.cloudstack.storage.RemoteHostEndPoint;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
|
@ -59,8 +65,8 @@ import com.cloud.hypervisor.Hypervisor;
|
|||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.ScopeType;
|
||||
import com.cloud.storage.Storage.TemplateType;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import com.cloud.utils.db.DB;
|
||||
import com.cloud.utils.db.QueryBuilder;
|
||||
import com.cloud.utils.db.SearchCriteria.Op;
|
||||
import com.cloud.utils.db.TransactionLegacy;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
|
@ -75,6 +81,10 @@ public class DefaultEndPointSelector implements EndPointSelector {
|
|||
private HostDao hostDao;
|
||||
@Inject
|
||||
private DedicatedResourceDao dedicatedResourceDao;
|
||||
@Inject
|
||||
private PrimaryDataStoreDao _storagePoolDao;
|
||||
@Inject
|
||||
private VolumeDetailsDao _volDetailsDao;
|
||||
|
||||
private static final String VOL_ENCRYPT_COLUMN_NAME = "volume_encryption_support";
|
||||
private final String findOneHostOnPrimaryStorage = "select t.id from "
|
||||
|
|
@ -264,6 +274,14 @@ public class DefaultEndPointSelector implements EndPointSelector {
|
|||
|
||||
@Override
|
||||
public EndPoint select(DataObject srcData, DataObject destData, boolean volumeEncryptionSupportRequired) {
|
||||
if (destData instanceof VolumeInfo) {
|
||||
EndPoint clvmEndpoint = selectClvmEndpointIfApplicable((VolumeInfo) destData, "template-to-volume copy");
|
||||
if (clvmEndpoint != null) {
|
||||
return clvmEndpoint;
|
||||
}
|
||||
}
|
||||
|
||||
// Default behavior for non-CLVM or when no destination host is set
|
||||
DataStore srcStore = srcData.getDataStore();
|
||||
DataStore destStore = destData.getDataStore();
|
||||
if (moveBetweenPrimaryImage(srcStore, destStore)) {
|
||||
|
|
@ -388,18 +406,91 @@ public class DefaultEndPointSelector implements EndPointSelector {
|
|||
return sc.list();
|
||||
}
|
||||
|
||||
/**
|
||||
* Selects endpoint for CLVM volumes with destination host hint.
|
||||
* This ensures volumes are created on the correct host with exclusive locks.
|
||||
*
|
||||
* @param volume The volume to check for CLVM routing
|
||||
* @param operation Description of the operation (for logging)
|
||||
* @return EndPoint for the destination host if CLVM routing applies, null otherwise
|
||||
*/
|
||||
private EndPoint selectClvmEndpointIfApplicable(VolumeInfo volume, String operation) {
|
||||
DataStore store = volume.getDataStore();
|
||||
|
||||
if (store.getRole() != DataStoreRole.Primary) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Check if this is a CLVM pool
|
||||
StoragePoolVO pool = _storagePoolDao.findById(store.getId());
|
||||
if (pool == null ||
|
||||
(pool.getPoolType() != Storage.StoragePoolType.CLVM ||
|
||||
pool.getPoolType() != Storage.StoragePoolType.CLVM_NG)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Check if destination host hint is set
|
||||
Long destHostId = volume.getDestinationHostId();
|
||||
if (destHostId == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
logger.info("CLVM {}: routing volume {} to destination host {} for optimal exclusive lock placement",
|
||||
operation, volume.getUuid(), destHostId);
|
||||
|
||||
EndPoint ep = getEndPointFromHostId(destHostId);
|
||||
if (ep != null) {
|
||||
return ep;
|
||||
}
|
||||
|
||||
logger.warn("Could not get endpoint for destination host {}, falling back to default selection", destHostId);
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public EndPoint select(DataObject object, boolean encryptionSupportRequired) {
|
||||
DataStore store = object.getDataStore();
|
||||
|
||||
// This ensures volumes are created on the correct host with exclusive locks
|
||||
if (object instanceof VolumeInfo && store.getRole() == DataStoreRole.Primary) {
|
||||
VolumeInfo volInfo = (VolumeInfo) object;
|
||||
EndPoint clvmEndpoint = selectClvmEndpointIfApplicable(volInfo, "volume creation");
|
||||
if (clvmEndpoint != null) {
|
||||
return clvmEndpoint;
|
||||
}
|
||||
}
|
||||
|
||||
// Default behavior for non-CLVM or when no destination host is set
|
||||
if (store.getRole() == DataStoreRole.Primary) {
|
||||
return findEndPointInScope(store.getScope(), findOneHostOnPrimaryStorage, store.getId(), encryptionSupportRequired);
|
||||
}
|
||||
throw new CloudRuntimeException(String.format("Storage role %s doesn't support encryption", store.getRole()));
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public EndPoint select(DataObject object) {
|
||||
DataStore store = object.getDataStore();
|
||||
|
||||
// For CLVM volumes, check if there's a lock host ID to route to
|
||||
if (object instanceof VolumeInfo && store.getRole() == DataStoreRole.Primary) {
|
||||
VolumeInfo volume = (VolumeInfo) object;
|
||||
StoragePoolVO pool = _storagePoolDao.findById(store.getId());
|
||||
if (pool != null && ClvmLockManager.isClvmPoolType(pool.getPoolType())) {
|
||||
Long lockHostId = getClvmLockHostId(volume);
|
||||
if (lockHostId != null) {
|
||||
logger.debug("Routing CLVM volume {} operation to lock holder host {}",
|
||||
volume.getUuid(), lockHostId);
|
||||
EndPoint ep = getEndPointFromHostId(lockHostId);
|
||||
if (ep != null) {
|
||||
return ep;
|
||||
}
|
||||
logger.warn("Could not get endpoint for CLVM lock host {}, falling back to default selection",
|
||||
lockHostId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
EndPoint ep = select(store);
|
||||
if (ep != null) {
|
||||
return ep;
|
||||
|
|
@ -493,6 +584,31 @@ public class DefaultEndPointSelector implements EndPointSelector {
|
|||
}
|
||||
case DELETEVOLUME: {
|
||||
VolumeInfo volume = (VolumeInfo) object;
|
||||
|
||||
// For CLVM volumes, route to the host holding the exclusive lock
|
||||
if (volume.getHypervisorType() == Hypervisor.HypervisorType.KVM) {
|
||||
DataStore store = volume.getDataStore();
|
||||
if (store.getRole() == DataStoreRole.Primary) {
|
||||
StoragePoolVO pool = _storagePoolDao.findById(store.getId());
|
||||
if (pool != null && ClvmLockManager.isClvmPoolType(pool.getPoolType())) {
|
||||
Long lockHostId = getClvmLockHostId(volume);
|
||||
if (lockHostId != null) {
|
||||
logger.info("Routing CLVM volume {} deletion to lock holder host {}",
|
||||
volume.getUuid(), lockHostId);
|
||||
EndPoint ep = getEndPointFromHostId(lockHostId);
|
||||
if (ep != null) {
|
||||
return ep;
|
||||
}
|
||||
logger.warn("Could not get endpoint for CLVM lock host {}, falling back to default selection",
|
||||
lockHostId);
|
||||
} else {
|
||||
logger.debug("No CLVM lock host tracked for volume {}, using default endpoint selection",
|
||||
volume.getUuid());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (volume.getHypervisorType() == Hypervisor.HypervisorType.VMware) {
|
||||
VirtualMachine vm = volume.getAttachedVM();
|
||||
if (vm != null) {
|
||||
|
|
@ -589,4 +705,24 @@ public class DefaultEndPointSelector implements EndPointSelector {
|
|||
}
|
||||
return endPoints;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the host ID that currently holds the exclusive lock on a CLVM volume.
|
||||
* This is tracked in volume_details table for proper routing of delete operations.
|
||||
*
|
||||
* @param volume The CLVM volume
|
||||
* @return Host ID holding the lock, or null if not tracked
|
||||
*/
|
||||
private Long getClvmLockHostId(VolumeInfo volume) {
|
||||
VolumeDetailVO detail = _volDetailsDao.findDetail(volume.getId(), VolumeInfo.CLVM_LOCK_HOST_ID);
|
||||
if (detail != null && detail.getValue() != null && !detail.getValue().isEmpty()) {
|
||||
try {
|
||||
return Long.parseLong(detail.getValue());
|
||||
} catch (NumberFormatException e) {
|
||||
logger.warn("Invalid CLVM lock host ID in volume_details for volume {}: {}",
|
||||
volume.getUuid(), detail.getValue());
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,12 +37,14 @@ import com.cloud.network.dao.NetworkDao;
|
|||
import com.cloud.network.dao.NetworkVO;
|
||||
import com.cloud.offerings.NetworkOfferingVO;
|
||||
import com.cloud.offerings.dao.NetworkOfferingDao;
|
||||
import com.cloud.storage.ClvmLockManager;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.StorageManager;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.StoragePoolHostVO;
|
||||
import com.cloud.storage.StorageService;
|
||||
import com.cloud.storage.VolumeApiServiceImpl;
|
||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
|
|
@ -139,6 +141,18 @@ public class DefaultHostListener implements HypervisorHostListener {
|
|||
Map<String, String> nfsMountOpts = storageManager.getStoragePoolNFSMountOpts(pool, null).first();
|
||||
|
||||
Optional.ofNullable(nfsMountOpts).ifPresent(detailsMap::putAll);
|
||||
|
||||
// Propagate CLVM secure zero-fill setting to the host
|
||||
// Note: This is done during host connection (agent start, MS restart, host reconnection)
|
||||
// so the setting is non-dynamic. Changes require host reconnection to take effect.
|
||||
if (ClvmLockManager.isClvmPoolType(pool.getPoolType())) {
|
||||
Boolean clvmSecureZeroFill = VolumeApiServiceImpl.CLVMSecureZeroFill.valueIn(poolId);
|
||||
if (clvmSecureZeroFill != null) {
|
||||
detailsMap.put("clvmsecurezerofill", String.valueOf(clvmSecureZeroFill));
|
||||
logger.debug("Added CLVM secure zero-fill setting: {} for storage pool: {}", clvmSecureZeroFill, pool);
|
||||
}
|
||||
}
|
||||
|
||||
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool, detailsMap);
|
||||
cmd.setWait(modifyStoragePoolCommandWait);
|
||||
HostVO host = hostDao.findById(hostId);
|
||||
|
|
|
|||
|
|
@ -126,6 +126,7 @@ public class VolumeObject implements VolumeInfo {
|
|||
private boolean directDownload;
|
||||
private String vSphereStoragePolicyId;
|
||||
private boolean followRedirects;
|
||||
private Long destinationHostId; // For CLVM: hints where volume should be created
|
||||
|
||||
private List<String> checkpointPaths;
|
||||
private Set<String> checkpointImageStoreUrls;
|
||||
|
|
@ -361,6 +362,30 @@ public class VolumeObject implements VolumeInfo {
|
|||
this.directDownload = directDownload;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getDestinationHostId() {
|
||||
// If not in memory, try to load from database (volume_details table)
|
||||
// For CLVM volumes, this uses the CLVM_LOCK_HOST_ID which serves dual purpose:
|
||||
// 1. During creation: hints where to create the volume
|
||||
// 2. After creation: tracks which host holds the exclusive lock
|
||||
if (destinationHostId == null && volumeVO != null) {
|
||||
VolumeDetailVO detail = volumeDetailsDao.findDetail(volumeVO.getId(), CLVM_LOCK_HOST_ID);
|
||||
if (detail != null && detail.getValue() != null && !detail.getValue().isEmpty()) {
|
||||
try {
|
||||
destinationHostId = Long.parseLong(detail.getValue());
|
||||
} catch (NumberFormatException e) {
|
||||
logger.warn("Invalid CLVM lock host ID value in volume_details for volume {}: {}", volumeVO.getUuid(), detail.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
return destinationHostId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setDestinationHostId(Long hostId) {
|
||||
this.destinationHostId = hostId;
|
||||
}
|
||||
|
||||
public void update() {
|
||||
volumeDao.update(volumeVO.getId(), volumeVO);
|
||||
volumeVO = volumeDao.findById(volumeVO.getId());
|
||||
|
|
|
|||
|
|
@ -2484,7 +2484,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
} else if ((poolType == StoragePoolType.NetworkFilesystem
|
||||
|| poolType == StoragePoolType.SharedMountPoint
|
||||
|| poolType == StoragePoolType.Filesystem
|
||||
|| poolType == StoragePoolType.Gluster)
|
||||
|| poolType == StoragePoolType.Gluster
|
||||
|| poolType == StoragePoolType.CLVM_NG)
|
||||
&& volFormat == PhysicalDiskFormat.QCOW2 ) {
|
||||
return "QCOW2";
|
||||
} else if (poolType == StoragePoolType.Linstor) {
|
||||
|
|
@ -3680,13 +3681,19 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
final String glusterVolume = pool.getSourceDir().replace("/", "");
|
||||
disk.defNetworkBasedDisk(glusterVolume + path.replace(mountpoint, ""), pool.getSourceHost(), pool.getSourcePort(), null,
|
||||
null, devId, diskBusType, DiskProtocol.GLUSTER, DiskDef.DiskFmtType.QCOW2);
|
||||
} else if (pool.getType() == StoragePoolType.CLVM || physicalDisk.getFormat() == PhysicalDiskFormat.RAW) {
|
||||
} else if (pool.getType() == StoragePoolType.CLVM || pool.getType() == StoragePoolType.CLVM_NG || physicalDisk.getFormat() == PhysicalDiskFormat.RAW) {
|
||||
// CLVM and CLVM_NG use block devices (/dev/vgname/volume)
|
||||
if (volume.getType() == Volume.Type.DATADISK && !(isWindowsTemplate && isUefiEnabled)) {
|
||||
disk.defBlockBasedDisk(physicalDisk.getPath(), devId, diskBusTypeData);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
disk.defBlockBasedDisk(physicalDisk.getPath(), devId, diskBusType);
|
||||
}
|
||||
|
||||
// CLVM_NG uses QCOW2 format on block devices, override the default RAW format
|
||||
if (pool.getType() == StoragePoolType.CLVM_NG) {
|
||||
disk.setDiskFormatType(DiskDef.DiskFmtType.QCOW2);
|
||||
}
|
||||
|
||||
if (pool.getType() == StoragePoolType.Linstor && isQemuDiscardBugFree(diskBusType)) {
|
||||
disk.setDiscard(DiscardType.UNMAP);
|
||||
}
|
||||
|
|
@ -6523,4 +6530,232 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
public String getGuestCpuArch() {
|
||||
return guestCpuArch;
|
||||
}
|
||||
|
||||
/**
|
||||
* CLVM volume state for migration operations on source host
|
||||
*/
|
||||
public enum ClvmVolumeState {
|
||||
/** Shared mode (-asy) - used before migration to allow both hosts to access volume */
|
||||
SHARED("-asy", "shared", "Before migration: activating in shared mode"),
|
||||
|
||||
/** Deactivate (-an) - used after successful migration to release volume on source */
|
||||
DEACTIVATE("-an", "deactivated", "After successful migration: deactivating volume"),
|
||||
|
||||
/** Exclusive mode (-aey) - used after failed migration to revert to original exclusive state */
|
||||
EXCLUSIVE("-aey", "exclusive", "After failed migration: reverting to exclusive mode");
|
||||
|
||||
private final String lvchangeFlag;
|
||||
private final String description;
|
||||
private final String logMessage;
|
||||
|
||||
ClvmVolumeState(String lvchangeFlag, String description, String logMessage) {
|
||||
this.lvchangeFlag = lvchangeFlag;
|
||||
this.description = description;
|
||||
this.logMessage = logMessage;
|
||||
}
|
||||
|
||||
public String getLvchangeFlag() {
|
||||
return lvchangeFlag;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public String getLogMessage() {
|
||||
return logMessage;
|
||||
}
|
||||
}
|
||||
|
||||
public static void modifyClvmVolumesStateForMigration(List<DiskDef> disks, LibvirtComputingResource resource,
|
||||
VirtualMachineTO vmSpec, ClvmVolumeState state) {
|
||||
for (DiskDef disk : disks) {
|
||||
if (isClvmVolume(disk, resource, vmSpec)) {
|
||||
String volumePath = disk.getDiskPath();
|
||||
try {
|
||||
modifyClvmVolumeState(volumePath, state.getLvchangeFlag(), state.getDescription(), state.getLogMessage());
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("[CLVM Migration] Exception while setting volume [{}] to {} state: {}",
|
||||
volumePath, state.getDescription(), e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void modifyClvmVolumeState(String volumePath, String lvchangeFlag,
|
||||
String stateDescription, String logMessage) {
|
||||
try {
|
||||
LOGGER.info("[CLVM Migration] {} for volume [{}]", logMessage, volumePath);
|
||||
|
||||
Script cmd = new Script("lvchange", Duration.standardSeconds(300), LOGGER);
|
||||
cmd.add(lvchangeFlag);
|
||||
cmd.add(volumePath);
|
||||
|
||||
String result = cmd.execute();
|
||||
if (result != null) {
|
||||
String errorMsg = String.format(
|
||||
"[CLVM Migration] Failed to set volume [%s] to %s state. Command result: %s",
|
||||
volumePath, stateDescription, result);
|
||||
LOGGER.error(errorMsg);
|
||||
throw new CloudRuntimeException(errorMsg);
|
||||
} else {
|
||||
LOGGER.info("[CLVM Migration] Successfully set volume [{}] to {} state.",
|
||||
volumePath, stateDescription);
|
||||
}
|
||||
} catch (CloudRuntimeException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
String errorMsg = String.format(
|
||||
"[CLVM Migration] Exception while setting volume [%s] to %s state: %s",
|
||||
volumePath, stateDescription, e.getMessage());
|
||||
LOGGER.error(errorMsg, e);
|
||||
throw new CloudRuntimeException(errorMsg, e);
|
||||
}
|
||||
}
|
||||
|
||||
public static void activateClvmVolumeExclusive(String volumePath) {
|
||||
modifyClvmVolumeState(volumePath, ClvmVolumeState.EXCLUSIVE.getLvchangeFlag(),
|
||||
ClvmVolumeState.EXCLUSIVE.getDescription(),
|
||||
"Activating CLVM volume in exclusive mode for copy");
|
||||
}
|
||||
|
||||
public static void deactivateClvmVolume(String volumePath) {
|
||||
try {
|
||||
modifyClvmVolumeState(volumePath, ClvmVolumeState.DEACTIVATE.getLvchangeFlag(),
|
||||
ClvmVolumeState.DEACTIVATE.getDescription(),
|
||||
"Deactivating CLVM volume after copy");
|
||||
} catch (Exception e) {
|
||||
LOGGER.warn("Failed to deactivate CLVM volume {}: {}", volumePath, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if a disk is on a CLVM storage pool by checking the actual pool type from VirtualMachineTO.
|
||||
* This is the most reliable method as it uses CloudStack's own storage pool information.
|
||||
*
|
||||
* @param disk The disk definition to check
|
||||
* @param resource The LibvirtComputingResource instance (unused but kept for compatibility)
|
||||
* @param vmSpec The VirtualMachineTO specification containing disk and pool information
|
||||
* @return true if the disk is on a CLVM storage pool, false otherwise
|
||||
*/
|
||||
private static boolean isClvmVolume(DiskDef disk, LibvirtComputingResource resource, VirtualMachineTO vmSpec) {
|
||||
String diskPath = disk.getDiskPath();
|
||||
if (diskPath == null || vmSpec == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
if (vmSpec.getDisks() != null) {
|
||||
for (DiskTO diskTO : vmSpec.getDisks()) {
|
||||
if (diskTO.getData() instanceof VolumeObjectTO) {
|
||||
VolumeObjectTO volumeTO = (VolumeObjectTO) diskTO.getData();
|
||||
if (diskPath.equals(volumeTO.getPath()) || diskPath.equals(diskTO.getPath())) {
|
||||
DataStoreTO dataStore = volumeTO.getDataStore();
|
||||
if (dataStore instanceof PrimaryDataStoreTO) {
|
||||
PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO) dataStore;
|
||||
boolean isClvm = StoragePoolType.CLVM == primaryStore.getPoolType() ||
|
||||
StoragePoolType.CLVM_NG == primaryStore.getPoolType();
|
||||
LOGGER.debug("Disk {} identified as CLVM/CLVM_NG={} via VirtualMachineTO pool type: {}",
|
||||
diskPath, isClvm, primaryStore.getPoolType());
|
||||
return isClvm;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: Check VG attributes using vgs command (reliable)
|
||||
// CLVM VGs have the 'c' (clustered) or 's' (shared) flag in their attributes
|
||||
// Example: 'wz--ns' = shared, 'wz--n-' = not clustered
|
||||
if (diskPath.startsWith("/dev/") && !diskPath.contains("/dev/mapper/")) {
|
||||
String vgName = extractVolumeGroupFromPath(diskPath);
|
||||
if (vgName != null) {
|
||||
boolean isClustered = checkIfVolumeGroupIsClustered(vgName);
|
||||
LOGGER.debug("Disk {} VG {} identified as clustered={} via vgs attribute check",
|
||||
diskPath, vgName, isClustered);
|
||||
return isClustered;
|
||||
}
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Error determining if volume {} is CLVM: {}", diskPath, e.getMessage(), e);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the volume group name from a device path.
|
||||
*
|
||||
* @param devicePath The device path (e.g., /dev/vgname/lvname)
|
||||
* @return The volume group name, or null if cannot be determined
|
||||
*/
|
||||
static String extractVolumeGroupFromPath(String devicePath) {
|
||||
if (devicePath == null || !devicePath.startsWith("/dev/")) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Format: /dev/<vgname>/<lvname>
|
||||
String[] parts = devicePath.split("/");
|
||||
if (parts.length >= 3) {
|
||||
return parts[2]; // ["", "dev", "vgname", ...]
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a volume group is clustered (CLVM) by examining its attributes.
|
||||
* Uses 'vgs' command to check for the clustered/shared flag in VG attributes.
|
||||
*
|
||||
* VG Attr format (6 characters): wz--nc or wz--ns
|
||||
* Position 6: Clustered flag - 'c' = CLVM (clustered), 's' = shared (lvmlockd), '-' = not clustered
|
||||
*
|
||||
* @param vgName The volume group name
|
||||
* @return true if the VG is clustered or shared, false otherwise
|
||||
*/
|
||||
static boolean checkIfVolumeGroupIsClustered(String vgName) {
|
||||
if (vgName == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
// Use vgs with --noheadings and -o attr to get VG attributes
|
||||
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
|
||||
Script vgsCmd = new Script("vgs", 5000, LOGGER);
|
||||
vgsCmd.add("--noheadings");
|
||||
vgsCmd.add("--unbuffered");
|
||||
vgsCmd.add("-o");
|
||||
vgsCmd.add("vg_attr");
|
||||
vgsCmd.add(vgName);
|
||||
|
||||
String result = vgsCmd.execute(parser);
|
||||
|
||||
if (result == null && parser.getLines() != null) {
|
||||
String output = parser.getLines();
|
||||
if (output != null && !output.isEmpty()) {
|
||||
// Parse VG attributes (format: wz--nc or wz--ns or wz--n-)
|
||||
// Position 6 (0-indexed 5) indicates clustering/sharing:
|
||||
// 'c' = clustered (CLVM) or 's' = shared (lvmlockd) or '-' = not clustered/shared
|
||||
String vgAttr = output.trim();
|
||||
if (vgAttr.length() >= 6) {
|
||||
char clusterFlag = vgAttr.charAt(5); // Position 6 (0-indexed 5)
|
||||
boolean isClustered = (clusterFlag == 'c' || clusterFlag == 's');
|
||||
LOGGER.debug("VG {} has attributes '{}', cluster/shared flag '{}' = {}",
|
||||
vgName, vgAttr, clusterFlag, isClustered);
|
||||
return isClustered;
|
||||
} else {
|
||||
LOGGER.warn("VG {} attributes '{}' have unexpected format (expected 6+ chars)", vgName, vgAttr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOGGER.warn("Failed to get VG attributes for {}: {}", vgName, result);
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
LOGGER.debug("Error checking if VG {} is clustered: {}", vgName, e.getMessage());
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,90 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import org.apache.cloudstack.storage.command.ClvmLockTransferCommand;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.utils.script.Script;
|
||||
|
||||
@ResourceWrapper(handles = ClvmLockTransferCommand.class)
|
||||
public class LibvirtClvmLockTransferCommandWrapper
|
||||
extends CommandWrapper<ClvmLockTransferCommand, Answer, LibvirtComputingResource> {
|
||||
|
||||
protected Logger logger = LogManager.getLogger(getClass());
|
||||
|
||||
@Override
|
||||
public Answer execute(ClvmLockTransferCommand cmd, LibvirtComputingResource serverResource) {
|
||||
String lvPath = cmd.getLvPath();
|
||||
ClvmLockTransferCommand.Operation operation = cmd.getOperation();
|
||||
String volumeUuid = cmd.getVolumeUuid();
|
||||
|
||||
logger.info(String.format("Executing CLVM lock transfer: operation=%s, lv=%s, volume=%s",
|
||||
operation, lvPath, volumeUuid));
|
||||
|
||||
try {
|
||||
String lvchangeOpt;
|
||||
String operationDesc;
|
||||
switch (operation) {
|
||||
case DEACTIVATE:
|
||||
lvchangeOpt = "-an";
|
||||
operationDesc = "deactivated";
|
||||
break;
|
||||
case ACTIVATE_EXCLUSIVE:
|
||||
lvchangeOpt = "-aey";
|
||||
operationDesc = "activated exclusively";
|
||||
break;
|
||||
case ACTIVATE_SHARED:
|
||||
lvchangeOpt = "-asy";
|
||||
operationDesc = "activated in shared mode";
|
||||
break;
|
||||
default:
|
||||
return new Answer(cmd, false, "Unknown operation: " + operation);
|
||||
}
|
||||
|
||||
Script script = new Script("/usr/sbin/lvchange", 30000, logger);
|
||||
script.add(lvchangeOpt);
|
||||
script.add(lvPath);
|
||||
|
||||
String result = script.execute();
|
||||
|
||||
if (result != null) {
|
||||
logger.error("CLVM lock transfer failed for volume {}: {}}",
|
||||
volumeUuid, result);
|
||||
return new Answer(cmd, false,
|
||||
String.format("lvchange %s %s failed: %s", lvchangeOpt, lvPath, result));
|
||||
}
|
||||
|
||||
logger.info("Successfully executed CLVM lock transfer: {} {}} for volume {}}",
|
||||
lvchangeOpt, lvPath, volumeUuid);
|
||||
|
||||
return new Answer(cmd, true,
|
||||
String.format("Successfully %s CLVM volume %s", operationDesc, volumeUuid));
|
||||
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception during CLVM lock transfer for volume {}: {}}",
|
||||
volumeUuid, e.getMessage(), e);
|
||||
return new Answer(cmd, false, "Exception: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,5 +1,3 @@
|
|||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
|
|
@ -42,9 +40,15 @@ import javax.xml.parsers.ParserConfigurationException;
|
|||
import javax.xml.transform.TransformerException;
|
||||
|
||||
import com.cloud.agent.api.VgpuTypesInfo;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.agent.api.to.GPUDeviceTO;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtGpuDef;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtXMLParser;
|
||||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.utils.Ternary;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import org.apache.cloudstack.utils.security.ParserUtils;
|
||||
import org.apache.commons.collections.MapUtils;
|
||||
import org.apache.commons.collections4.CollectionUtils;
|
||||
|
|
@ -69,7 +73,6 @@ import com.cloud.agent.api.Command;
|
|||
import com.cloud.agent.api.MigrateAnswer;
|
||||
import com.cloud.agent.api.MigrateCommand;
|
||||
import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.agent.api.to.DiskTO;
|
||||
import com.cloud.agent.api.to.DpdkTO;
|
||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||
|
|
@ -82,11 +85,6 @@ import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef;
|
|||
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InterfaceDef;
|
||||
import com.cloud.hypervisor.kvm.resource.MigrateKVMAsync;
|
||||
import com.cloud.hypervisor.kvm.resource.VifDriver;
|
||||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import com.cloud.utils.Ternary;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
|
||||
@ResourceWrapper(handles = MigrateCommand.class)
|
||||
public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCommand, Answer, LibvirtComputingResource> {
|
||||
|
|
@ -117,7 +115,8 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
Command.State commandState = null;
|
||||
|
||||
List<InterfaceDef> ifaces = null;
|
||||
List<DiskDef> disks;
|
||||
List<DiskDef> disks = new ArrayList<>();
|
||||
VirtualMachineTO to = null;
|
||||
|
||||
Domain dm = null;
|
||||
Connect dconn = null;
|
||||
|
|
@ -136,7 +135,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(String.format("Found domain with name [%s]. Starting VM migration to host [%s].", vmName, destinationUri));
|
||||
}
|
||||
VirtualMachineTO to = command.getVirtualMachine();
|
||||
to = command.getVirtualMachine();
|
||||
|
||||
dm = conn.domainLookupByName(vmName);
|
||||
/*
|
||||
|
|
@ -336,6 +335,12 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
logger.debug(String.format("Cleaning the disks of VM [%s] in the source pool after VM migration finished.", vmName));
|
||||
}
|
||||
resumeDomainIfPaused(destDomain, vmName);
|
||||
|
||||
// Deactivate CLVM volumes on source host after successful migration
|
||||
if (to != null) {
|
||||
LibvirtComputingResource.modifyClvmVolumesStateForMigration(disks, libvirtComputingResource, to, LibvirtComputingResource.ClvmVolumeState.DEACTIVATE);
|
||||
}
|
||||
|
||||
deleteOrDisconnectDisksOnSourcePool(libvirtComputingResource, migrateDiskInfoList, disks);
|
||||
libvirtComputingResource.cleanOldSecretsByDiskDef(conn, disks);
|
||||
}
|
||||
|
|
@ -382,6 +387,10 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
if (destDomain != null) {
|
||||
destDomain.free();
|
||||
}
|
||||
// Revert CLVM volumes to exclusive mode on failure
|
||||
if (to != null) {
|
||||
LibvirtComputingResource.modifyClvmVolumesStateForMigration(disks, libvirtComputingResource, to, LibvirtComputingResource.ClvmVolumeState.EXCLUSIVE);
|
||||
}
|
||||
} catch (final LibvirtException e) {
|
||||
logger.trace("Ignoring libvirt error.", e);
|
||||
}
|
||||
|
|
@ -681,7 +690,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
protected void deleteOrDisconnectDisksOnSourcePool(final LibvirtComputingResource libvirtComputingResource, final List<MigrateDiskInfo> migrateDiskInfoList,
|
||||
List<DiskDef> disks) {
|
||||
for (DiskDef disk : disks) {
|
||||
MigrateDiskInfo migrateDiskInfo = searchDiskDefOnMigrateDiskInfoList(migrateDiskInfoList, disk);
|
||||
MigrateCommand.MigrateDiskInfo migrateDiskInfo = searchDiskDefOnMigrateDiskInfoList(migrateDiskInfoList, disk);
|
||||
if (migrateDiskInfo != null && migrateDiskInfo.isSourceDiskOnStorageFileSystem()) {
|
||||
deleteLocalVolume(disk.getDiskPath());
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -52,9 +52,19 @@ public final class LibvirtModifyStoragePoolCommandWrapper extends CommandWrapper
|
|||
|
||||
final KVMStoragePool storagepool;
|
||||
try {
|
||||
Map<String, String> poolDetails = command.getDetails();
|
||||
if (poolDetails == null) {
|
||||
poolDetails = new HashMap<>();
|
||||
}
|
||||
|
||||
// Ensure CLVM secure zero-fill setting has a default value if not provided by MS
|
||||
if (!poolDetails.containsKey(KVMStoragePool.CLVM_SECURE_ZERO_FILL)) {
|
||||
poolDetails.put(KVMStoragePool.CLVM_SECURE_ZERO_FILL, "false");
|
||||
}
|
||||
|
||||
storagepool =
|
||||
storagePoolMgr.createStoragePool(command.getPool().getUuid(), command.getPool().getHost(), command.getPool().getPort(), command.getPool().getPath(), command.getPool()
|
||||
.getUserInfo(), command.getPool().getType(), command.getDetails());
|
||||
.getUserInfo(), command.getPool().getType(), poolDetails);
|
||||
if (storagepool == null) {
|
||||
return new Answer(command, false, " Failed to create storage pool");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,83 @@
|
|||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.libvirt.Connect;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.PostMigrationAnswer;
|
||||
import com.cloud.agent.api.PostMigrationCommand;
|
||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtConnection;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef;
|
||||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
|
||||
/**
|
||||
* Wrapper for PostMigrationCommand on KVM hypervisor.
|
||||
* Handles post-migration tasks on the destination host after a VM has been successfully migrated.
|
||||
* Primary responsibility: Convert CLVM volumes from shared mode to exclusive mode on destination.
|
||||
*/
|
||||
@ResourceWrapper(handles = PostMigrationCommand.class)
|
||||
public final class LibvirtPostMigrationCommandWrapper extends CommandWrapper<PostMigrationCommand, Answer, LibvirtComputingResource> {
|
||||
|
||||
protected Logger logger = LogManager.getLogger(getClass());
|
||||
|
||||
@Override
|
||||
public Answer execute(final PostMigrationCommand command, final LibvirtComputingResource libvirtComputingResource) {
|
||||
final VirtualMachineTO vm = command.getVirtualMachine();
|
||||
final String vmName = command.getVmName();
|
||||
|
||||
if (vm == null || vmName == null) {
|
||||
return new PostMigrationAnswer(command, "VM or VM name is null");
|
||||
}
|
||||
|
||||
logger.debug("Executing post-migration tasks for VM {} on destination host", vmName);
|
||||
|
||||
try {
|
||||
final Connect conn = LibvirtConnection.getConnectionByVmName(vmName);
|
||||
|
||||
List<DiskDef> disks = libvirtComputingResource.getDisks(conn, vmName);
|
||||
logger.debug("[CLVM Post-Migration] Processing volumes for VM {} to claim exclusive locks on any CLVM volumes", vmName);
|
||||
LibvirtComputingResource.modifyClvmVolumesStateForMigration(
|
||||
disks,
|
||||
libvirtComputingResource,
|
||||
vm,
|
||||
LibvirtComputingResource.ClvmVolumeState.EXCLUSIVE
|
||||
);
|
||||
|
||||
logger.debug("Successfully completed post-migration tasks for VM {}", vmName);
|
||||
return new PostMigrationAnswer(command);
|
||||
|
||||
} catch (final LibvirtException e) {
|
||||
logger.error("Libvirt error during post-migration for VM {}: {}", vmName, e.getMessage(), e);
|
||||
return new PostMigrationAnswer(command, e);
|
||||
} catch (final Exception e) {
|
||||
logger.error("Error during post-migration for VM {}: {}", vmName, e.getMessage(), e);
|
||||
return new PostMigrationAnswer(command, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,85 @@
|
|||
//
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
//
|
||||
|
||||
package com.cloud.hypervisor.kvm.resource.wrapper;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.PreMigrationCommand;
|
||||
import com.cloud.agent.api.to.VirtualMachineTO;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
|
||||
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef;
|
||||
import com.cloud.resource.CommandWrapper;
|
||||
import com.cloud.resource.ResourceWrapper;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.libvirt.Connect;
|
||||
import org.libvirt.Domain;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Handles PreMigrationCommand on the source host before live migration.
|
||||
* Converts CLVM volume locks from exclusive to shared mode so the destination host can access them.
|
||||
*/
|
||||
@ResourceWrapper(handles = PreMigrationCommand.class)
|
||||
public class LibvirtPreMigrationCommandWrapper extends CommandWrapper<PreMigrationCommand, Answer, LibvirtComputingResource> {
|
||||
protected Logger logger = LogManager.getLogger(getClass());
|
||||
|
||||
@Override
|
||||
public Answer execute(PreMigrationCommand command, LibvirtComputingResource libvirtComputingResource) {
|
||||
String vmName = command.getVmName();
|
||||
VirtualMachineTO vmSpec = command.getVirtualMachine();
|
||||
|
||||
logger.info("Preparing source host for migration of VM: {}", vmName);
|
||||
|
||||
Connect conn = null;
|
||||
Domain dm = null;
|
||||
|
||||
try {
|
||||
LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
|
||||
conn = libvirtUtilitiesHelper.getConnectionByVmName(vmName);
|
||||
dm = conn.domainLookupByName(vmName);
|
||||
|
||||
List<DiskDef> disks = libvirtComputingResource.getDisks(conn, vmName);
|
||||
logger.info("Converting CLVM volumes to shared mode for VM: {}", vmName);
|
||||
LibvirtComputingResource.modifyClvmVolumesStateForMigration(
|
||||
disks,
|
||||
libvirtComputingResource,
|
||||
vmSpec,
|
||||
LibvirtComputingResource.ClvmVolumeState.SHARED
|
||||
);
|
||||
|
||||
logger.info("Successfully prepared source host for migration of VM: {}", vmName);
|
||||
return new Answer(command, true, "Source host prepared for migration");
|
||||
|
||||
} catch (LibvirtException e) {
|
||||
logger.error("Failed to prepare source host for migration of VM: {}", vmName, e);
|
||||
return new Answer(command, false, "Failed to prepare source host: " + e.getMessage());
|
||||
} finally {
|
||||
if (dm != null) {
|
||||
try {
|
||||
dm.free();
|
||||
} catch (LibvirtException e) {
|
||||
logger.warn("Failed to free domain {}: {}", vmName, e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -21,6 +21,7 @@ package com.cloud.hypervisor.kvm.resource.wrapper;
|
|||
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.cloudstack.storage.configdrive.ConfigDrive;
|
||||
|
|
@ -124,6 +125,20 @@ public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapp
|
|||
return new PrepareForMigrationAnswer(command, "failed to connect physical disks to host");
|
||||
}
|
||||
|
||||
// Activate CLVM volumes in shared mode on destination host for live migration
|
||||
try {
|
||||
List<LibvirtVMDef.DiskDef> disks = libvirtComputingResource.getDisks(conn, vm.getName());
|
||||
LibvirtComputingResource.modifyClvmVolumesStateForMigration(
|
||||
disks,
|
||||
libvirtComputingResource,
|
||||
vm,
|
||||
LibvirtComputingResource.ClvmVolumeState.SHARED
|
||||
);
|
||||
} catch (Exception e) {
|
||||
logger.warn("Failed to activate CLVM volumes in shared mode on destination for VM {}: {}",
|
||||
vm.getName(), e.getMessage(), e);
|
||||
}
|
||||
|
||||
logger.info("Successfully prepared destination host for migration of VM {}", vm.getName());
|
||||
return createPrepareForMigrationAnswer(command, dpdkInterfaceMapping, libvirtComputingResource, vm);
|
||||
} catch (final LibvirtException | CloudRuntimeException | InternalErrorException | URISyntaxException e) {
|
||||
|
|
|
|||
|
|
@ -113,7 +113,8 @@ public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper<Resi
|
|||
logger.debug("Resizing volume: " + path + ", from: " + toHumanReadableSize(currentSize) + ", to: " + toHumanReadableSize(newSize) + ", type: " + type + ", name: " + vmInstanceName + ", shrinkOk: " + shrinkOk);
|
||||
|
||||
/* libvirt doesn't support resizing (C)LVM devices, and corrupts QCOW2 in some scenarios, so we have to do these via qemu-img */
|
||||
if (pool.getType() != StoragePoolType.CLVM && pool.getType() != StoragePoolType.Linstor && pool.getType() != StoragePoolType.PowerFlex
|
||||
if (pool.getType() != StoragePoolType.CLVM && pool.getType() != StoragePoolType.CLVM_NG
|
||||
&& pool.getType() != StoragePoolType.Linstor && pool.getType() != StoragePoolType.PowerFlex
|
||||
&& vol.getFormat() != PhysicalDiskFormat.QCOW2) {
|
||||
logger.debug("Volume " + path + " can be resized by libvirt. Asking libvirt to resize the volume.");
|
||||
try {
|
||||
|
|
|
|||
|
|
@ -117,7 +117,7 @@ public class LibvirtRevertSnapshotCommandWrapper extends CommandWrapper<RevertSn
|
|||
secondaryStoragePool = storagePoolMgr.getStoragePoolByURI(snapshotImageStore.getUrl());
|
||||
}
|
||||
|
||||
if (primaryPool.getType() == StoragePoolType.CLVM) {
|
||||
if (primaryPool.getType() == StoragePoolType.CLVM || primaryPool.getType() == StoragePoolType.CLVM_NG) {
|
||||
Script cmd = new Script(libvirtComputingResource.manageSnapshotPath(), libvirtComputingResource.getCmdsTimeout(), logger);
|
||||
cmd.add("-v", getFullPathAccordingToStorage(secondaryStoragePool, snapshotRelPath));
|
||||
cmd.add("-n", snapshotDisk.getName());
|
||||
|
|
|
|||
|
|
@ -38,6 +38,8 @@ public interface KVMStoragePool {
|
|||
public static final long HeartBeatUpdateMaxTries = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_HEARTBEAT_UPDATE_MAX_TRIES);
|
||||
public static final long HeartBeatUpdateRetrySleep = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_HEARTBEAT_UPDATE_RETRY_SLEEP);
|
||||
public static final long HeartBeatCheckerTimeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_HEARTBEAT_CHECKER_TIMEOUT);
|
||||
public static final String CLVM_SECURE_ZERO_FILL = "clvmsecurezerofill";
|
||||
|
||||
|
||||
public default KVMPhysicalDisk createPhysicalDisk(String volumeUuid, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, Long usableSize, byte[] passphrase) {
|
||||
return createPhysicalDisk(volumeUuid, format, provisioningType, size, passphrase);
|
||||
|
|
|
|||
|
|
@ -288,12 +288,34 @@ public class KVMStoragePoolManager {
|
|||
}
|
||||
|
||||
if (pool instanceof LibvirtStoragePool) {
|
||||
addPoolDetails(uuid, (LibvirtStoragePool) pool);
|
||||
LibvirtStoragePool libvirtPool = (LibvirtStoragePool) pool;
|
||||
addPoolDetails(uuid, libvirtPool);
|
||||
|
||||
updatePoolTypeIfApplicable(libvirtPool, pool, type, uuid);
|
||||
}
|
||||
|
||||
return pool;
|
||||
}
|
||||
|
||||
private void updatePoolTypeIfApplicable(LibvirtStoragePool libvirtPool, KVMStoragePool pool,
|
||||
StoragePoolType type, String uuid) {
|
||||
StoragePoolType correctType = type;
|
||||
if (correctType == null || correctType == StoragePoolType.CLVM) {
|
||||
StoragePoolInformation info = _storagePools.get(uuid);
|
||||
if (info != null && info.getPoolType() != null) {
|
||||
correctType = info.getPoolType();
|
||||
}
|
||||
}
|
||||
|
||||
if (correctType != null && correctType != pool.getType() &&
|
||||
(correctType == StoragePoolType.CLVM || correctType == StoragePoolType.CLVM_NG) &&
|
||||
(pool.getType() == StoragePoolType.CLVM || pool.getType() == StoragePoolType.CLVM_NG)) {
|
||||
logger.debug("Correcting pool type from {} to {} for pool {} based on caller/cached information",
|
||||
pool.getType(), correctType, uuid);
|
||||
libvirtPool.setType(correctType);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* As the class {@link LibvirtStoragePool} is constrained to the {@link org.libvirt.StoragePool} class, there is no way of saving a generic parameter such as the details, hence,
|
||||
* this method was created to always make available the details of libvirt primary storages for when they are needed.
|
||||
|
|
@ -450,6 +472,10 @@ public class KVMStoragePoolManager {
|
|||
return adaptor.createDiskFromTemplate(template, name,
|
||||
PhysicalDiskFormat.RAW, provisioningType,
|
||||
size, destPool, timeout, passphrase);
|
||||
} else if (destPool.getType() == StoragePoolType.CLVM_NG) {
|
||||
return adaptor.createDiskFromTemplate(template, name,
|
||||
PhysicalDiskFormat.QCOW2, provisioningType,
|
||||
size, destPool, timeout, passphrase);
|
||||
} else if (template.getFormat() == PhysicalDiskFormat.DIR) {
|
||||
return adaptor.createDiskFromTemplate(template, name,
|
||||
PhysicalDiskFormat.DIR, provisioningType,
|
||||
|
|
@ -491,6 +517,11 @@ public class KVMStoragePoolManager {
|
|||
return adaptor.createTemplateFromDirectDownloadFile(templateFilePath, destTemplatePath, destPool, format, timeout);
|
||||
}
|
||||
|
||||
public void createTemplateOnClvmNg(String templatePath, String templateUuid, int timeout, KVMStoragePool pool) {
|
||||
LibvirtStorageAdaptor adaptor = (LibvirtStorageAdaptor) getStorageAdaptor(pool.getType());
|
||||
adaptor.createTemplateOnClvmNg(templatePath, templateUuid, timeout, pool);
|
||||
}
|
||||
|
||||
public Ternary<Boolean, Map<String, String>, String> prepareStorageClient(StoragePoolType type, String uuid, Map<String, String> details) {
|
||||
StorageAdaptor adaptor = getStorageAdaptor(type);
|
||||
return adaptor.prepareStorageClient(uuid, details);
|
||||
|
|
|
|||
|
|
@ -344,15 +344,28 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
path = destTempl.getUuid();
|
||||
}
|
||||
|
||||
if (path != null && !storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) {
|
||||
logger.warn("Failed to connect physical disk at path: {}, in storage pool [id: {}, name: {}]", path, primaryStore.getUuid(), primaryStore.getName());
|
||||
return new PrimaryStorageDownloadAnswer("Failed to spool template disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid());
|
||||
}
|
||||
if (primaryPool.getType() == StoragePoolType.CLVM_NG) {
|
||||
logger.info("Copying template {} to CLVM_NG pool {}",
|
||||
destTempl.getUuid(), primaryPool.getUuid());
|
||||
|
||||
primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds());
|
||||
try {
|
||||
storagePoolMgr.createTemplateOnClvmNg(tmplVol.getPath(), path, cmd.getWaitInMillSeconds(), primaryPool);
|
||||
primaryVol = primaryPool.getPhysicalDisk("template-" + path);
|
||||
} catch (Exception e) {
|
||||
logger.error("Failed to create CLVM_NG template: {}", e.getMessage(), e);
|
||||
return new PrimaryStorageDownloadAnswer("Failed to create CLVM_NG template: " + e.getMessage());
|
||||
}
|
||||
} else {
|
||||
if (path != null && !storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) {
|
||||
logger.warn("Failed to connect physical disk at path: {}, in storage pool [id: {}, name: {}]", path, primaryStore.getUuid(), primaryStore.getName());
|
||||
return new PrimaryStorageDownloadAnswer("Failed to spool template disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid());
|
||||
}
|
||||
|
||||
if (!storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path)) {
|
||||
logger.warn("Failed to disconnect physical disk at path: {}, in storage pool [id: {}, name: {}]", path, primaryStore.getUuid(), primaryStore.getName());
|
||||
primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds());
|
||||
|
||||
if (!storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path)) {
|
||||
logger.warn("Failed to disconnect physical disk at path: {}, in storage pool [id: {}, name: {}]", path, primaryStore.getUuid(), primaryStore.getName());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, UUID.randomUUID().toString(), primaryPool, cmd.getWaitInMillSeconds());
|
||||
|
|
@ -1115,7 +1128,14 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
} else {
|
||||
final Script command = new Script(_manageSnapshotPath, cmd.getWaitInMillSeconds(), logger);
|
||||
command.add("-b", isCreatedFromVmSnapshot ? snapshotDisk.getPath() : snapshot.getPath());
|
||||
String backupPath;
|
||||
if (primaryPool.getType() == StoragePoolType.CLVM) {
|
||||
backupPath = snapshotDisk.getPath();
|
||||
logger.debug("Using snapshotDisk path for CLVM backup: " + backupPath);
|
||||
} else {
|
||||
backupPath = isCreatedFromVmSnapshot ? snapshotDisk.getPath() : snapshot.getPath();
|
||||
}
|
||||
command.add("-b", backupPath);
|
||||
command.add(NAME_OPTION, snapshotName);
|
||||
command.add("-p", snapshotDestPath);
|
||||
|
||||
|
|
@ -1160,6 +1180,76 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a CLVM snapshot using comprehensive cleanup.
|
||||
* For CLVM, the snapshot path stored in DB is: /dev/vgname/volumeuuid/snapshotuuid
|
||||
* the actual snapshot LV created by CLVM is: /dev/vgname/md5(snapshotuuid)
|
||||
*
|
||||
* @param snapshotPath The snapshot path from database
|
||||
* @param checkExistence If true, checks if snapshot exists before cleanup (for explicit deletion)
|
||||
* If false, always performs cleanup (for post-backup cleanup)
|
||||
* @return true if cleanup was performed, false if snapshot didn't exist (when checkExistence=true)
|
||||
*/
|
||||
private boolean deleteClvmSnapshot(String snapshotPath, boolean checkExistence) {
|
||||
logger.info("Starting CLVM snapshot deletion for path: {}, checkExistence: {}", snapshotPath, checkExistence);
|
||||
|
||||
try {
|
||||
// Parse the snapshot path: /dev/acsvg/volume-uuid/snapshot-uuid
|
||||
String[] pathParts = snapshotPath.split("/");
|
||||
if (pathParts.length < 5) {
|
||||
logger.warn("Invalid CLVM snapshot path format: {}, expected format: /dev/vgname/volume-uuid/snapshot-uuid", snapshotPath);
|
||||
return false;
|
||||
}
|
||||
|
||||
String vgName = pathParts[2];
|
||||
String volumeUuid = pathParts[3];
|
||||
String snapshotUuid = pathParts[4];
|
||||
|
||||
logger.info("Parsed snapshot path - VG: {}, Volume: {}, Snapshot: {}", vgName, volumeUuid, snapshotUuid);
|
||||
|
||||
// Compute MD5 hash of snapshot UUID (same as managesnapshot.sh does)
|
||||
String md5Hash = computeMd5Hash(snapshotUuid);
|
||||
logger.debug("Computed MD5 hash for snapshot UUID {}: {}", snapshotUuid, md5Hash);
|
||||
String snapshotLvPath = vgName + "/" + md5Hash;
|
||||
String actualSnapshotPath = "/dev/" + snapshotLvPath;
|
||||
|
||||
// Check if snapshot exists (if requested)
|
||||
if (checkExistence) {
|
||||
Script checkSnapshot = new Script("/usr/sbin/lvs", 5000, logger);
|
||||
checkSnapshot.add("--noheadings");
|
||||
checkSnapshot.add(snapshotLvPath);
|
||||
String checkResult = checkSnapshot.execute();
|
||||
|
||||
if (checkResult != null) {
|
||||
// Snapshot doesn't exist - was already cleaned up
|
||||
logger.info("CLVM snapshot {} was already deleted, no cleanup needed", md5Hash);
|
||||
return false;
|
||||
}
|
||||
logger.info("CLVM snapshot still exists for {}, performing cleanup", md5Hash);
|
||||
}
|
||||
|
||||
// Use native LVM command to remove snapshot (handles all cleanup automatically)
|
||||
Script removeSnapshot = new Script("lvremove", 10000, logger);
|
||||
removeSnapshot.add("-f");
|
||||
removeSnapshot.add(snapshotLvPath);
|
||||
|
||||
logger.info("Executing: lvremove -f {}", snapshotLvPath);
|
||||
String removeResult = removeSnapshot.execute();
|
||||
|
||||
if (removeResult == null) {
|
||||
logger.info("Successfully deleted CLVM snapshot: {} (actual path: {})", snapshotPath, actualSnapshotPath);
|
||||
return true;
|
||||
} else {
|
||||
logger.warn("Failed to delete CLVM snapshot {}: {}", snapshotPath, removeResult);
|
||||
return false;
|
||||
}
|
||||
|
||||
} catch (Exception ex) {
|
||||
logger.error("Exception while deleting CLVM snapshot {}", snapshotPath, ex);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteSnapshotOnPrimary(final CopyCommand cmd, final SnapshotObjectTO snapshot,
|
||||
KVMStoragePool primaryPool) {
|
||||
String snapshotPath = snapshot.getPath();
|
||||
|
|
@ -1172,7 +1262,14 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
|
||||
if ((backupSnapshotAfterTakingSnapshot == null || BooleanUtils.toBoolean(backupSnapshotAfterTakingSnapshot)) && deleteSnapshotOnPrimary) {
|
||||
try {
|
||||
Files.deleteIfExists(Paths.get(snapshotPath));
|
||||
if (primaryPool.getType() == StoragePoolType.CLVM) {
|
||||
boolean cleanedUp = deleteClvmSnapshot(snapshotPath, false);
|
||||
if (!cleanedUp) {
|
||||
logger.info("No need to delete CLVM snapshot on primary as it doesn't exist: {}", snapshotPath);
|
||||
}
|
||||
} else {
|
||||
Files.deleteIfExists(Paths.get(snapshotPath));
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
logger.error("Failed to delete snapshot [{}] on primary storage [{}].", snapshot.getId(), snapshot.getName(), ex);
|
||||
}
|
||||
|
|
@ -1181,6 +1278,26 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Compute MD5 hash of a string, matching what managesnapshot.sh does:
|
||||
* echo "${snapshot}" | md5sum -t | awk '{ print $1 }'
|
||||
*/
|
||||
private String computeMd5Hash(String input) {
|
||||
try {
|
||||
java.security.MessageDigest md = java.security.MessageDigest.getInstance("MD5");
|
||||
byte[] array = md.digest((input + "\n").getBytes("UTF-8"));
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (byte b : array) {
|
||||
sb.append(String.format("%02x", b));
|
||||
}
|
||||
return sb.toString();
|
||||
} catch (Exception e) {
|
||||
logger.error("Failed to compute MD5 hash for: {}", input, e);
|
||||
return input;
|
||||
}
|
||||
}
|
||||
|
||||
protected synchronized void attachOrDetachISO(final Connect conn, final String vmName, String isoPath, final boolean isAttach, Map<String, String> params, DataStoreTO store) throws
|
||||
LibvirtException, InternalErrorException {
|
||||
DiskDef iso = new DiskDef();
|
||||
|
|
@ -1842,8 +1959,10 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
}
|
||||
}
|
||||
|
||||
if (DomainInfo.DomainState.VIR_DOMAIN_RUNNING.equals(state) && volume.requiresEncryption()) {
|
||||
throw new CloudRuntimeException("VM is running, encrypted volume snapshots aren't supported");
|
||||
if (DomainInfo.DomainState.VIR_DOMAIN_RUNNING.equals(state)) {
|
||||
if (volume.requiresEncryption()) {
|
||||
throw new CloudRuntimeException("VM is running, encrypted volume snapshots aren't supported");
|
||||
}
|
||||
}
|
||||
|
||||
KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid());
|
||||
|
|
@ -2880,6 +2999,25 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
if (snapshotTO.isKvmIncrementalSnapshot()) {
|
||||
deleteCheckpoint(snapshotTO);
|
||||
}
|
||||
} else if (primaryPool.getType() == StoragePoolType.CLVM) {
|
||||
// For CLVM, snapshots are typically already deleted from primary storage during backup
|
||||
// via deleteSnapshotOnPrimary in the backupSnapshot finally block.
|
||||
// This is called when the user explicitly deletes the snapshot via UI/API.
|
||||
// We check if the snapshot still exists and clean it up if needed.
|
||||
logger.info("Processing CLVM snapshot deletion (id={}, name={}, path={}) on primary storage",
|
||||
snapshotTO.getId(), snapshotTO.getName(), snapshotTO.getPath());
|
||||
|
||||
String snapshotPath = snapshotTO.getPath();
|
||||
if (snapshotPath != null && !snapshotPath.isEmpty()) {
|
||||
boolean wasDeleted = deleteClvmSnapshot(snapshotPath, true);
|
||||
if (wasDeleted) {
|
||||
logger.info("Successfully cleaned up CLVM snapshot {} from primary storage", snapshotName);
|
||||
} else {
|
||||
logger.info("CLVM snapshot {} was already deleted from primary storage during backup, no cleanup needed", snapshotName);
|
||||
}
|
||||
} else {
|
||||
logger.debug("CLVM snapshot path is null or empty, assuming already cleaned up");
|
||||
}
|
||||
} else {
|
||||
logger.warn("Operation not implemented for storage pool type of " + primaryPool.getType().toString());
|
||||
throw new InternalErrorException("Operation not implemented for storage pool type of " + primaryPool.getType().toString());
|
||||
|
|
|
|||
|
|
@ -34,6 +34,9 @@ import java.util.stream.Collectors;
|
|||
|
||||
import com.cloud.agent.properties.AgentProperties;
|
||||
import com.cloud.agent.properties.AgentPropertiesFileHandler;
|
||||
import com.cloud.utils.script.OutputInterpreter;
|
||||
import com.google.gson.JsonObject;
|
||||
import com.google.gson.JsonParser;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.utils.cryptsetup.KeyFile;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImageOptions;
|
||||
|
|
@ -47,6 +50,7 @@ import org.apache.commons.collections.CollectionUtils;
|
|||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.joda.time.Duration;
|
||||
import org.libvirt.Connect;
|
||||
import org.libvirt.LibvirtException;
|
||||
import org.libvirt.Secret;
|
||||
|
|
@ -254,9 +258,12 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
|||
|
||||
try {
|
||||
vol = pool.storageVolLookupByName(volName);
|
||||
logger.debug("Found volume " + volName + " in storage pool " + pool.getName() + " after refreshing the pool");
|
||||
if (vol != null) {
|
||||
logger.debug("Found volume " + volName + " in storage pool " + pool.getName() + " after refreshing the pool");
|
||||
}
|
||||
} catch (LibvirtException e) {
|
||||
throw new CloudRuntimeException("Could not find volume " + volName + ": " + e.getMessage());
|
||||
logger.debug("Volume " + volName + " still not found after pool refresh: " + e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -663,6 +670,17 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
|||
|
||||
try {
|
||||
StorageVol vol = getVolume(libvirtPool.getPool(), volumeUuid);
|
||||
|
||||
// Check if volume was found - if null, treat as not found and trigger fallback for CLVM/CLVM_NG
|
||||
if (vol == null) {
|
||||
logger.debug("Volume " + volumeUuid + " not found in libvirt, will check for CLVM/CLVM_NG fallback");
|
||||
if (pool.getType() == StoragePoolType.CLVM || pool.getType() == StoragePoolType.CLVM_NG) {
|
||||
return getPhysicalDisk(volumeUuid, pool, libvirtPool);
|
||||
}
|
||||
|
||||
throw new CloudRuntimeException("Volume " + volumeUuid + " not found in libvirt pool");
|
||||
}
|
||||
|
||||
KVMPhysicalDisk disk;
|
||||
LibvirtStorageVolumeDef voldef = getStorageVolumeDef(libvirtPool.getPool().getConnect(), vol);
|
||||
disk = new KVMPhysicalDisk(vol.getPath(), vol.getName(), pool);
|
||||
|
|
@ -693,11 +711,167 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
|||
}
|
||||
return disk;
|
||||
} catch (LibvirtException e) {
|
||||
logger.debug("Failed to get physical disk:", e);
|
||||
logger.debug("Failed to get volume from libvirt: " + e.getMessage());
|
||||
// For CLVM/CLVM_NG, try direct block device access as fallback
|
||||
if (pool.getType() == StoragePoolType.CLVM || pool.getType() == StoragePoolType.CLVM_NG) {
|
||||
return getPhysicalDisk(volumeUuid, pool, libvirtPool);
|
||||
}
|
||||
|
||||
throw new CloudRuntimeException(e.toString());
|
||||
}
|
||||
}
|
||||
|
||||
private KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool, LibvirtStoragePool libvirtPool) {
|
||||
logger.info("CLVM volume not visible to libvirt, attempting direct block device access for volume: {}", volumeUuid);
|
||||
|
||||
try {
|
||||
logger.debug("Refreshing libvirt storage pool: {}", pool.getUuid());
|
||||
libvirtPool.getPool().refresh(0);
|
||||
|
||||
StorageVol vol = getVolume(libvirtPool.getPool(), volumeUuid);
|
||||
if (vol != null) {
|
||||
logger.info("Volume found after pool refresh: {}", volumeUuid);
|
||||
KVMPhysicalDisk disk;
|
||||
LibvirtStorageVolumeDef voldef = getStorageVolumeDef(libvirtPool.getPool().getConnect(), vol);
|
||||
disk = new KVMPhysicalDisk(vol.getPath(), vol.getName(), pool);
|
||||
disk.setSize(vol.getInfo().allocation);
|
||||
disk.setVirtualSize(vol.getInfo().capacity);
|
||||
disk.setFormat(voldef.getFormat() == LibvirtStorageVolumeDef.VolumeFormat.QCOW2 ?
|
||||
PhysicalDiskFormat.QCOW2 : PhysicalDiskFormat.RAW);
|
||||
return disk;
|
||||
}
|
||||
} catch (LibvirtException refreshEx) {
|
||||
logger.debug("Pool refresh failed or volume still not found: {}", refreshEx.getMessage());
|
||||
}
|
||||
|
||||
// Still not found after refresh, try direct block device access
|
||||
return getPhysicalDiskViaDirectBlockDevice(volumeUuid, pool);
|
||||
}
|
||||
|
||||
private String getVgName(String sourceDir) {
|
||||
String vgName = sourceDir;
|
||||
if (vgName.startsWith("/")) {
|
||||
String[] parts = vgName.split("/");
|
||||
List<String> tokens = Arrays.stream(parts)
|
||||
.filter(s -> !s.isEmpty()).collect(Collectors.toList());
|
||||
|
||||
vgName = tokens.size() > 1 ? tokens.get(1)
|
||||
: tokens.size() == 1 ? tokens.get(0)
|
||||
: "";
|
||||
}
|
||||
return vgName;
|
||||
}
|
||||
|
||||
/**
|
||||
* For CLVM volumes that exist in LVM but are not visible to libvirt,
|
||||
* access them directly via block device path.
|
||||
*/
|
||||
private KVMPhysicalDisk getPhysicalDiskViaDirectBlockDevice(String volumeUuid, KVMStoragePool pool) {
|
||||
try {
|
||||
// For CLVM, pool sourceDir contains the VG path (e.g., "/dev/acsvg")
|
||||
// Extract the VG name
|
||||
String sourceDir = pool.getLocalPath();
|
||||
if (sourceDir == null || sourceDir.isEmpty()) {
|
||||
throw new CloudRuntimeException("CLVM pool sourceDir is not set, cannot determine VG name");
|
||||
}
|
||||
String vgName = getVgName(sourceDir);
|
||||
logger.debug("Using VG name: {} (from sourceDir: {}) ", vgName, sourceDir);
|
||||
|
||||
// Check if the LV exists in LVM using lvs command
|
||||
logger.debug("Checking if volume {} exists in VG {}", volumeUuid, vgName);
|
||||
Script checkLvCmd = new Script("/usr/sbin/lvs", 5000, logger);
|
||||
checkLvCmd.add("--noheadings");
|
||||
checkLvCmd.add("--unbuffered");
|
||||
checkLvCmd.add(vgName + "/" + volumeUuid);
|
||||
|
||||
String checkResult = checkLvCmd.execute();
|
||||
if (checkResult != null) {
|
||||
logger.debug("Volume {} does not exist in VG {}: {}", volumeUuid, vgName, checkResult);
|
||||
throw new CloudRuntimeException(String.format("Storage volume not found: no storage vol with matching name '%s'", volumeUuid));
|
||||
}
|
||||
|
||||
logger.info("Volume {} exists in LVM but not visible to libvirt, accessing directly", volumeUuid);
|
||||
|
||||
// Try standard device path first
|
||||
String lvPath = "/dev/" + vgName + "/" + volumeUuid;
|
||||
File lvDevice = new File(lvPath);
|
||||
|
||||
if (!lvDevice.exists()) {
|
||||
// Try device-mapper path with escaped hyphens
|
||||
String vgNameEscaped = vgName.replace("-", "--");
|
||||
String volumeUuidEscaped = volumeUuid.replace("-", "--");
|
||||
lvPath = "/dev/mapper/" + vgNameEscaped + "-" + volumeUuidEscaped;
|
||||
lvDevice = new File(lvPath);
|
||||
|
||||
if (!lvDevice.exists()) {
|
||||
logger.warn("Volume exists in LVM but device node not found: {}", volumeUuid);
|
||||
throw new CloudRuntimeException(String.format("Could not find volume %s " +
|
||||
"in VG %s - volume exists in LVM but device node not accessible", volumeUuid, vgName));
|
||||
}
|
||||
}
|
||||
|
||||
long size = 0;
|
||||
try {
|
||||
Script lvsCmd = new Script("/usr/sbin/lvs", 5000, logger);
|
||||
lvsCmd.add("--noheadings");
|
||||
lvsCmd.add("--units");
|
||||
lvsCmd.add("b");
|
||||
lvsCmd.add("-o");
|
||||
lvsCmd.add("lv_size");
|
||||
lvsCmd.add(lvPath);
|
||||
|
||||
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
|
||||
String result = lvsCmd.execute(parser);
|
||||
|
||||
String output = null;
|
||||
if (result == null) {
|
||||
output = parser.getLines();
|
||||
} else {
|
||||
output = result;
|
||||
}
|
||||
|
||||
if (output != null && !output.isEmpty()) {
|
||||
String sizeStr = output.trim().replaceAll("[^0-9]", "");
|
||||
if (!sizeStr.isEmpty()) {
|
||||
size = Long.parseLong(sizeStr);
|
||||
}
|
||||
}
|
||||
} catch (Exception sizeEx) {
|
||||
logger.warn("Failed to get size for CLVM volume via lvs: {}", sizeEx.getMessage());
|
||||
if (lvDevice.isFile()) {
|
||||
size = lvDevice.length();
|
||||
}
|
||||
}
|
||||
|
||||
KVMPhysicalDisk disk = new KVMPhysicalDisk(lvPath, volumeUuid, pool);
|
||||
|
||||
// Detect correct format based on pool type
|
||||
PhysicalDiskFormat diskFormat = PhysicalDiskFormat.RAW; // Default for legacy CLVM
|
||||
if (pool.getType() == StoragePoolType.CLVM_NG) {
|
||||
// CLVM_NG uses QCOW2 format on LVM block devices
|
||||
diskFormat = PhysicalDiskFormat.QCOW2;
|
||||
logger.debug("CLVM_NG pool detected, setting disk format to QCOW2 for volume {}", volumeUuid);
|
||||
} else {
|
||||
logger.debug("CLVM pool detected, setting disk format to RAW for volume {}", volumeUuid);
|
||||
}
|
||||
|
||||
disk.setFormat(diskFormat);
|
||||
disk.setSize(size);
|
||||
disk.setVirtualSize(size);
|
||||
|
||||
logger.info("Successfully accessed CLVM/CLVM_NG volume via direct block device: {} " +
|
||||
"with format: {} and size: {} bytes", lvPath, diskFormat, size);
|
||||
|
||||
return disk;
|
||||
|
||||
} catch (CloudRuntimeException ex) {
|
||||
throw ex;
|
||||
} catch (Exception ex) {
|
||||
logger.error("Failed to access CLVM volume via direct block device: {}",volumeUuid, ex);
|
||||
throw new CloudRuntimeException(String.format("Could not find volume %s: %s ",volumeUuid, ex.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* adjust refcount
|
||||
*/
|
||||
|
|
@ -822,7 +996,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
|||
try {
|
||||
sp = createNetfsStoragePool(PoolType.GLUSTERFS, conn, name, host, path, null);
|
||||
} catch (LibvirtException e) {
|
||||
logger.error("Failed to create glusterfs mount: " + host + ":" + path , e);
|
||||
logger.error("Failed to create glusterlvm_fs mount: " + host + ":" + path , e);
|
||||
logger.error(e.getStackTrace());
|
||||
throw new CloudRuntimeException(e.toString());
|
||||
}
|
||||
|
|
@ -830,7 +1004,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
|||
sp = createSharedStoragePool(conn, name, host, path);
|
||||
} else if (type == StoragePoolType.RBD) {
|
||||
sp = createRBDStoragePool(conn, name, host, port, userInfo, path);
|
||||
} else if (type == StoragePoolType.CLVM) {
|
||||
} else if (type == StoragePoolType.CLVM || type == StoragePoolType.CLVM_NG) {
|
||||
sp = createCLVMStoragePool(conn, name, host, path);
|
||||
}
|
||||
}
|
||||
|
|
@ -1116,6 +1290,17 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
|||
@Override
|
||||
public boolean connectPhysicalDisk(String name, KVMStoragePool pool, Map<String, String> details, boolean isVMMigrate) {
|
||||
// this is for managed storage that needs to prep disks prior to use
|
||||
if ((pool.getType() == StoragePoolType.CLVM || pool.getType() == StoragePoolType.CLVM_NG) && isVMMigrate) {
|
||||
logger.info("Activating CLVM/CLVM_NG volume {} at location: {} in shared mode for VM migration", name, pool.getLocalPath() + File.separator + name);
|
||||
Script activateVolInSharedMode = new Script("lvchange", 5000, logger);
|
||||
activateVolInSharedMode.add("-asy");
|
||||
activateVolInSharedMode.add(pool.getLocalPath() + File.separator + name);
|
||||
String result = activateVolInSharedMode.execute();
|
||||
if (result != null) {
|
||||
logger.error("Failed to activate CLVM/CLVM_NG volume {} in shared mode for VM migration. Command output: {}", name, result);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -1224,10 +1409,21 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
|||
}
|
||||
}
|
||||
|
||||
// For CLVM/CLVM_NG pools, always use direct LVM cleanup to ensure secure zero-fill
|
||||
if (pool.getType() == StoragePoolType.CLVM || pool.getType() == StoragePoolType.CLVM_NG) {
|
||||
logger.info("CLVM/CLVM_NG pool detected - using direct LVM cleanup with secure zero-fill for volume {}", uuid);
|
||||
return cleanupCLVMVolume(uuid, pool);
|
||||
}
|
||||
|
||||
// For non-CLVM pools, use libvirt deletion
|
||||
LibvirtStoragePool libvirtPool = (LibvirtStoragePool)pool;
|
||||
try {
|
||||
StorageVol vol = getVolume(libvirtPool.getPool(), uuid);
|
||||
logger.debug("Instructing libvirt to remove volume " + uuid + " from pool " + pool.getUuid());
|
||||
if (vol == null) {
|
||||
logger.warn("Volume {} not found in libvirt pool {}, it may have been already deleted", uuid, pool.getUuid());
|
||||
return true;
|
||||
}
|
||||
logger.debug("Instructing libvirt to remove volume {} from pool {}", uuid, pool.getUuid());
|
||||
if(Storage.ImageFormat.DIR.equals(format)){
|
||||
deleteDirVol(libvirtPool, vol);
|
||||
} else {
|
||||
|
|
@ -1240,6 +1436,86 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
|||
}
|
||||
}
|
||||
|
||||
private boolean shouldSecureZeroFill(KVMStoragePool pool) {
|
||||
Map<String, String> details = pool.getDetails();
|
||||
String secureZeroFillStr = (details != null) ? details.get(KVMStoragePool.CLVM_SECURE_ZERO_FILL) : null;
|
||||
return Boolean.parseBoolean(secureZeroFillStr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up CLVM volume and its snapshots directly using LVM commands.
|
||||
* This is used as a fallback when libvirt cannot find or delete the volume.
|
||||
*/
|
||||
private boolean cleanupCLVMVolume(String uuid, KVMStoragePool pool) {
|
||||
logger.info("Starting direct LVM cleanup for CLVM volume: {} in pool: {}", uuid, pool.getUuid());
|
||||
|
||||
try {
|
||||
String sourceDir = pool.getLocalPath();
|
||||
if (sourceDir == null || sourceDir.isEmpty()) {
|
||||
logger.debug("Source directory is null or empty, cannot determine VG name for CLVM pool {}, skipping direct cleanup", pool.getUuid());
|
||||
return true;
|
||||
}
|
||||
String vgName = getVgName(sourceDir);
|
||||
logger.info("Determined VG name: {} for pool: {}", vgName, pool.getUuid());
|
||||
|
||||
if (vgName == null || vgName.isEmpty()) {
|
||||
logger.warn("Cannot determine VG name for CLVM pool {}, skipping direct cleanup", pool.getUuid());
|
||||
return true;
|
||||
}
|
||||
|
||||
String lvPath = "/dev/" + vgName + "/" + uuid;
|
||||
logger.debug("Volume path: {}", lvPath);
|
||||
|
||||
// Check if the LV exists
|
||||
Script checkLvs = new Script("lvs", 5000, logger);
|
||||
checkLvs.add("--noheadings");
|
||||
checkLvs.add("--unbuffered");
|
||||
checkLvs.add(lvPath);
|
||||
|
||||
logger.info("Checking if volume exists: lvs --noheadings --unbuffered {}", lvPath);
|
||||
String checkResult = checkLvs.execute();
|
||||
|
||||
if (checkResult != null) {
|
||||
logger.info("CLVM volume {} does not exist in LVM (check returned: {}), considering it as already deleted", uuid, checkResult);
|
||||
return true;
|
||||
}
|
||||
|
||||
logger.info("Volume {} exists, proceeding with cleanup", uuid);
|
||||
|
||||
boolean secureZeroFillEnabled = shouldSecureZeroFill(pool);
|
||||
|
||||
if (secureZeroFillEnabled) {
|
||||
logger.info("Step 1: Zero-filling volume {} for security", uuid);
|
||||
secureZeroFillVolume(lvPath, uuid);
|
||||
} else {
|
||||
logger.info("Secure zero-fill is disabled, skipping zero-filling for volume {}", uuid);
|
||||
}
|
||||
|
||||
logger.info("Step 2: Removing volume {}", uuid);
|
||||
Script removeLv = new Script("lvremove", 10000, logger);
|
||||
removeLv.add("-f");
|
||||
removeLv.add(lvPath);
|
||||
|
||||
logger.info("Executing command: lvremove -f {}", lvPath);
|
||||
String removeResult = removeLv.execute();
|
||||
|
||||
if (removeResult == null) {
|
||||
logger.info("Successfully removed CLVM volume {} using direct LVM cleanup", uuid);
|
||||
return true;
|
||||
} else {
|
||||
logger.warn("Command 'lvremove -f {}' returned error: {}", lvPath, removeResult);
|
||||
if (removeResult.contains("not found") || removeResult.contains("Failed to find")) {
|
||||
logger.info("CLVM volume {} not found during cleanup, considering it as already deleted", uuid);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
logger.error("Exception during CLVM volume cleanup for {}: {}", uuid, ex.getMessage(), ex);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This function copies a physical disk from Secondary Storage to Primary Storage
|
||||
* or from Primary to Primary Storage
|
||||
|
|
@ -1276,6 +1552,14 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
|||
Script.runSimpleBashScript("chmod 755 " + disk.getPath());
|
||||
Script.runSimpleBashScript("tar -x -f " + template.getPath() + "/*.tar -C " + disk.getPath(), timeout);
|
||||
} else if (format == PhysicalDiskFormat.QCOW2) {
|
||||
if (destPool.getType() == StoragePoolType.CLVM_NG) {
|
||||
logger.info("Creating CLVM_NG volume {} with backing file from template {}", newUuid, template.getName());
|
||||
String backingFile = getClvmBackingFile(template, destPool);
|
||||
|
||||
disk = createClvmNgDiskWithBacking(newUuid, timeout, size, backingFile, destPool);
|
||||
return disk;
|
||||
}
|
||||
|
||||
QemuImg qemu = new QemuImg(timeout);
|
||||
QemuImgFile destFile = new QemuImgFile(disk.getPath(), format);
|
||||
if (size > template.getVirtualSize()) {
|
||||
|
|
@ -1338,6 +1622,92 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
|||
return disk;
|
||||
}
|
||||
|
||||
private String getClvmBackingFile(KVMPhysicalDisk template, KVMStoragePool destPool) {
|
||||
String templateLvName = "template-" + template.getName();
|
||||
KVMPhysicalDisk templateOnPrimary = null;
|
||||
|
||||
try {
|
||||
templateOnPrimary = destPool.getPhysicalDisk(templateLvName);
|
||||
} catch (CloudRuntimeException e) {
|
||||
logger.warn("Template {} not found on CLVM_NG pool {}.", templateLvName, destPool.getUuid());
|
||||
}
|
||||
|
||||
String backingFile;
|
||||
if (templateOnPrimary != null) {
|
||||
backingFile = templateOnPrimary.getPath();
|
||||
logger.info("Using template on primary storage as backing file: {}", backingFile);
|
||||
|
||||
ensureTemplateLvInSharedMode(backingFile);
|
||||
} else {
|
||||
logger.error("Template {} should be on primary storage before creating volumes from it", templateLvName);
|
||||
throw new CloudRuntimeException(String.format("Template not found on CLVM_NG primary storage: {}." +
|
||||
"Template must be copied to primary storage first.", templateLvName));
|
||||
}
|
||||
return backingFile;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures a template LV is activated in shared mode so multiple VMs can use it as a backing file.
|
||||
*
|
||||
* @param templatePath The full path to the template LV (e.g., /dev/vgname/template-uuid)
|
||||
* @param throwOnFailure If true, throws CloudRuntimeException on failure; if false, logs warning and continues
|
||||
*/
|
||||
private void ensureTemplateLvInSharedMode(String templatePath, boolean throwOnFailure) {
|
||||
try {
|
||||
Script checkLvs = new Script("lvs", Duration.millis(5000), logger);
|
||||
checkLvs.add("--noheadings");
|
||||
checkLvs.add("-o", "lv_attr");
|
||||
checkLvs.add(templatePath);
|
||||
|
||||
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
|
||||
String result = checkLvs.execute(parser);
|
||||
|
||||
if (result == null && parser.getLines() != null && !parser.getLines().isEmpty()) {
|
||||
String lvAttr = parser.getLines().trim();
|
||||
if (lvAttr.length() >= 6) {
|
||||
char activeChar = lvAttr.charAt(4); // 'a' = active, '-' = inactive
|
||||
char sharedChar = lvAttr.charAt(5); // 's' = shared, 'e' = exclusive, '-' = not set
|
||||
|
||||
if (activeChar != 'a' || sharedChar != 's') {
|
||||
logger.info("Template LV {} is not in shared mode (attr: {}). Activating in shared mode.",
|
||||
templatePath, lvAttr);
|
||||
|
||||
Script lvchange = new Script("lvchange", Duration.millis(5000), logger);
|
||||
lvchange.add("-asy");
|
||||
lvchange.add(templatePath);
|
||||
result = lvchange.execute();
|
||||
|
||||
if (result != null) {
|
||||
String errorMsg = "Failed to activate template LV " + templatePath + " in shared mode: " + result;
|
||||
if (throwOnFailure) {
|
||||
throw new CloudRuntimeException(errorMsg);
|
||||
} else {
|
||||
logger.warn(errorMsg);
|
||||
}
|
||||
} else {
|
||||
logger.info("Successfully activated template LV {} in shared mode", templatePath);
|
||||
}
|
||||
} else {
|
||||
logger.debug("Template LV {} is already in shared mode", templatePath);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (CloudRuntimeException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
String errorMsg = "Failed to check/ensure template LV shared mode for " + templatePath + ": " + e.getMessage();
|
||||
if (throwOnFailure) {
|
||||
throw new CloudRuntimeException(errorMsg, e);
|
||||
} else {
|
||||
logger.warn(errorMsg, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void ensureTemplateLvInSharedMode(String templatePath) {
|
||||
ensureTemplateLvInSharedMode(templatePath, false);
|
||||
}
|
||||
|
||||
private KVMPhysicalDisk createDiskFromTemplateOnRBD(KVMPhysicalDisk template,
|
||||
String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, KVMStoragePool destPool, int timeout){
|
||||
|
||||
|
|
@ -1737,7 +2107,269 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
|||
vol.delete(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Securely zero-fill a volume before deletion to prevent data leakage.
|
||||
* Uses blkdiscard (fast TRIM) as primary method, with dd zero-fill as fallback.
|
||||
*
|
||||
* @param lvPath The full path to the logical volume (e.g., /dev/vgname/lvname)
|
||||
* @param volumeUuid The UUID of the volume for logging purposes
|
||||
*/
|
||||
private void secureZeroFillVolume(String lvPath, String volumeUuid) {
|
||||
logger.info("Starting secure zero-fill for CLVM volume: {} at path: {}", volumeUuid, lvPath);
|
||||
|
||||
boolean blkdiscardSuccess = false;
|
||||
|
||||
// Try blkdiscard first (fast - sends TRIM commands)
|
||||
try {
|
||||
Script blkdiscard = new Script("blkdiscard", 300000, logger); // 5 minute timeout
|
||||
blkdiscard.add("-f"); // Force flag to suppress confirmation prompts
|
||||
blkdiscard.add(lvPath);
|
||||
|
||||
String result = blkdiscard.execute();
|
||||
if (result == null) {
|
||||
logger.info("Successfully zero-filled CLVM volume {} using blkdiscard (TRIM)", volumeUuid);
|
||||
blkdiscardSuccess = true;
|
||||
} else {
|
||||
if (result.contains("Operation not supported") || result.contains("BLKDISCARD ioctl failed")) {
|
||||
logger.info("blkdiscard not supported for volume {} (device doesn't support TRIM/DISCARD), using dd fallback", volumeUuid);
|
||||
} else {
|
||||
logger.warn("blkdiscard failed for volume {}: {}, will try dd fallback", volumeUuid, result);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("Exception during blkdiscard for volume {}: {}, will try dd fallback", volumeUuid, e.getMessage());
|
||||
}
|
||||
|
||||
// Fallback to dd zero-fill (slow)
|
||||
if (!blkdiscardSuccess) {
|
||||
logger.info("Attempting zero-fill using dd for CLVM volume: {}", volumeUuid);
|
||||
try {
|
||||
// nice -n 19: lowest CPU priority
|
||||
// ionice -c 2 -n 7: best-effort I/O scheduling with lowest priority
|
||||
// oflag=direct: bypass cache for more predictable performance
|
||||
String command = String.format(
|
||||
"nice -n 19 ionice -c 2 -n 7 dd if=/dev/zero of=%s bs=1M oflag=direct 2>&1 || true",
|
||||
lvPath
|
||||
);
|
||||
|
||||
Script ddZeroFill = new Script("/bin/bash", 3600000, logger); // 60 minute timeout for large volumes
|
||||
ddZeroFill.add("-c");
|
||||
ddZeroFill.add(command);
|
||||
|
||||
OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
|
||||
String ddResult = ddZeroFill.execute(parser);
|
||||
String output = parser.getLines();
|
||||
|
||||
if (output != null && (output.contains("copied") || output.contains("records in") ||
|
||||
output.contains("No space left on device"))) {
|
||||
logger.info("Successfully zero-filled CLVM volume {} using dd", volumeUuid);
|
||||
} else if (ddResult == null) {
|
||||
logger.info("Zero-fill completed for CLVM volume {}", volumeUuid);
|
||||
} else {
|
||||
logger.warn("dd zero-fill for volume {} completed with output: {}", volumeUuid,
|
||||
output != null ? output : ddResult);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("Failed to zero-fill CLVM volume {} before deletion: {}. Proceeding with deletion anyway.",
|
||||
volumeUuid, e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteDirVol(LibvirtStoragePool pool, StorageVol vol) throws LibvirtException {
|
||||
Script.runSimpleBashScript("rm -r --interactive=never " + vol.getPath());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Physical Extent (PE) from the volume group
|
||||
* @param vgName Volume group name
|
||||
* @return PE size in bytes, defauts to 4MiB if it cannot be determined
|
||||
*/
|
||||
private long getVgPhysicalExtentSize(String vgName) {
|
||||
String warningMessage = String.format("Failed to get PE size for VG %s, defaulting to 4MiB", vgName);
|
||||
try {
|
||||
Script vgDisplay = new Script("vgdisplay", 300000, logger);
|
||||
vgDisplay.add("--units", "b"); // Output in bytes
|
||||
vgDisplay.add("-C"); // Columnar output
|
||||
vgDisplay.add("--noheadings");
|
||||
vgDisplay.add("-o", "vg_extent_size");
|
||||
vgDisplay.add(vgName);
|
||||
|
||||
String output = vgDisplay.execute();
|
||||
if (output != null) {
|
||||
output = output.trim();
|
||||
if (output.endsWith("B")) {
|
||||
output = output.substring(0, output.length() - 1);
|
||||
}
|
||||
logger.debug("Physical Extent size for VG {} is {} bytes", vgName, output);
|
||||
return Long.parseLong(output);
|
||||
} else {
|
||||
logger.warn(warningMessage);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn(warningMessage, e.getMessage());
|
||||
}
|
||||
final long DEFAULT_PE_SIZE = 4 * 1024 * 1024L;
|
||||
logger.info("Using default PE size for VG {}: {} bytes (4 MiB)", vgName, DEFAULT_PE_SIZE);
|
||||
return DEFAULT_PE_SIZE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate LVM LV size for QCOW2 image accounting for metadata overhead
|
||||
* @param qcow2PhysicalSize Physical size in bytes from qemu-img info
|
||||
* @param vgName Volume group name to query PE size
|
||||
* @return Size in bytes to allocate for LV
|
||||
*/
|
||||
private long calculateClvmNgLvSize(long qcow2PhysicalSize, String vgName) {
|
||||
long peSize = getVgPhysicalExtentSize(vgName);
|
||||
long roundedSize = ((qcow2PhysicalSize + peSize - 1) / peSize) * peSize;
|
||||
|
||||
long finalSize = roundedSize + peSize;
|
||||
logger.info("Calculated LV size for QCOW2 physical size {} bytes: {} bytes " +
|
||||
"(rounded to {} PEs + 1 PE overhead, PE size = {} bytes)",
|
||||
qcow2PhysicalSize, finalSize,
|
||||
roundedSize / peSize, peSize);
|
||||
|
||||
return finalSize;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get physical size of QCOW2 image
|
||||
*/
|
||||
private long getQcow2PhysicalSize(String imagePath) {
|
||||
Script qemuImg = new Script("qemu-img", 300000, logger);
|
||||
qemuImg.add("info");
|
||||
qemuImg.add("--output=json");
|
||||
qemuImg.add(imagePath);
|
||||
String output = qemuImg.execute();
|
||||
|
||||
JsonObject info = JsonParser.parseString(output).getAsJsonObject();
|
||||
return info.get("actual-size").getAsLong();
|
||||
}
|
||||
|
||||
private KVMPhysicalDisk createClvmNgDiskWithBacking(String volumeUuid, int timeout, long virtualSize, String backingFile, KVMStoragePool pool) {
|
||||
String vgName = getVgName(pool.getLocalPath());
|
||||
long lvSize = calculateClvmNgLvSize(virtualSize, vgName);
|
||||
String volumePath = "/dev/" + vgName + "/" + volumeUuid;
|
||||
|
||||
logger.debug("Creating CLVM_NG volume {} with LV size {} bytes (virtual size: {} bytes)", volumeUuid, lvSize, virtualSize);
|
||||
|
||||
Script lvcreate = new Script("lvcreate", Duration.millis(timeout), logger);
|
||||
lvcreate.add("-n", volumeUuid);
|
||||
lvcreate.add("-L", lvSize + "B");
|
||||
lvcreate.add(vgName);
|
||||
|
||||
String result = lvcreate.execute();
|
||||
if (result != null) {
|
||||
throw new CloudRuntimeException("Failed to create LV for CLVM_NG volume: " + result);
|
||||
}
|
||||
|
||||
Script qemuImg = new Script("qemu-img", Duration.millis(timeout), logger);
|
||||
qemuImg.add("create");
|
||||
qemuImg.add("-f", "qcow2");
|
||||
|
||||
StringBuilder qcow2Options = new StringBuilder();
|
||||
qcow2Options.append("preallocation=metadata");
|
||||
qcow2Options.append(",extended_l2=on");
|
||||
qcow2Options.append(",cluster_size=128k");
|
||||
|
||||
if (backingFile != null && !backingFile.isEmpty()) {
|
||||
qcow2Options.append(",backing_file=").append(backingFile);
|
||||
qcow2Options.append(",backing_fmt=qcow2");
|
||||
logger.debug("Creating CLVM_NG volume with backing file: {}", backingFile);
|
||||
}
|
||||
|
||||
qemuImg.add("-o", qcow2Options.toString());
|
||||
qemuImg.add(volumePath);
|
||||
qemuImg.add(virtualSize + "");
|
||||
|
||||
result = qemuImg.execute();
|
||||
if (result != null) {
|
||||
removeLvOnFailure(volumePath, timeout);
|
||||
throw new CloudRuntimeException("Failed to create QCOW2 on CLVM_NG volume: " + result);
|
||||
}
|
||||
|
||||
KVMPhysicalDisk disk = new KVMPhysicalDisk(volumePath, volumeUuid, pool);
|
||||
disk.setFormat(PhysicalDiskFormat.QCOW2);
|
||||
disk.setSize(lvSize);
|
||||
disk.setVirtualSize(virtualSize);
|
||||
|
||||
logger.info("Successfully created CLVM_NG volume {} with backing file (LV size: {}, virtual size: {})",
|
||||
volumeUuid, lvSize, virtualSize);
|
||||
|
||||
return disk;
|
||||
}
|
||||
|
||||
public void createTemplateOnClvmNg(String templatePath, String templateUuid, int timeout, KVMStoragePool pool) {
|
||||
String vgName = getVgName(pool.getLocalPath());
|
||||
String lvName = "template-" + templateUuid;
|
||||
String lvPath = "/dev/" + vgName + "/" + lvName;
|
||||
|
||||
if (lvExists(lvPath)) {
|
||||
logger.info("Template LV {} already exists in VG {}. Skipping creation.", lvName, vgName);
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info("Creating new template LV {} in VG {} for template {}", lvName, vgName, templateUuid);
|
||||
|
||||
long physicalSize = getQcow2PhysicalSize(templatePath);
|
||||
long lvSize = calculateClvmNgLvSize(physicalSize, vgName);
|
||||
|
||||
Script lvcreate = new Script("lvcreate", Duration.millis(timeout), logger);
|
||||
lvcreate.add("-n", lvName);
|
||||
lvcreate.add("-L", lvSize + "B");
|
||||
lvcreate.add(vgName);
|
||||
String result = lvcreate.execute();
|
||||
if (result != null) {
|
||||
throw new CloudRuntimeException("Failed to create LV for CLVM_NG template: " + result);
|
||||
}
|
||||
|
||||
|
||||
Script qemuImgConvert = new Script("qemu-img", Duration.millis(timeout), logger);
|
||||
qemuImgConvert.add("convert");
|
||||
qemuImgConvert.add(templatePath);
|
||||
qemuImgConvert.add("-O", "qcow2");
|
||||
qemuImgConvert.add(lvPath);
|
||||
result = qemuImgConvert.execute();
|
||||
|
||||
if (result != null) {
|
||||
removeLvOnFailure(lvPath, timeout);
|
||||
throw new CloudRuntimeException("Failed to convert template to CLVM_NG volume: " + result);
|
||||
}
|
||||
|
||||
logger.info("Created template LV {} with size {} bytes (physical: {}, overhead: {})",
|
||||
lvName, lvSize, physicalSize, lvSize - physicalSize);
|
||||
|
||||
try {
|
||||
ensureTemplateLvInSharedMode(lvPath, true);
|
||||
} catch (CloudRuntimeException e) {
|
||||
logger.error("Failed to activate template LV {} in shared mode. Cleaning up.", lvPath);
|
||||
removeLvOnFailure(lvPath, timeout);
|
||||
throw e;
|
||||
}
|
||||
|
||||
KVMPhysicalDisk templateDisk = new KVMPhysicalDisk(lvPath, lvName, pool);
|
||||
templateDisk.setFormat(PhysicalDiskFormat.QCOW2);
|
||||
templateDisk.setVirtualSize(physicalSize);
|
||||
templateDisk.setSize(lvSize);
|
||||
|
||||
}
|
||||
|
||||
private boolean lvExists(String lvPath) {
|
||||
Script checkLv = new Script("lvs", Duration.millis(5000), logger);
|
||||
checkLv.add("--noheadings");
|
||||
checkLv.add("--unbuffered");
|
||||
checkLv.add(lvPath);
|
||||
String checkResult = checkLv.execute();
|
||||
return checkResult == null;
|
||||
}
|
||||
|
||||
private void removeLvOnFailure(String lvPath, int timeout) {
|
||||
Script lvremove = new Script("lvremove", Duration.millis(timeout), logger);
|
||||
lvremove.add("-f");
|
||||
lvremove.add(lvPath);
|
||||
lvremove.execute();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -212,7 +212,7 @@ public class LibvirtStoragePool implements KVMStoragePool {
|
|||
|
||||
@Override
|
||||
public boolean isExternalSnapshot() {
|
||||
if (this.type == StoragePoolType.CLVM || type == StoragePoolType.RBD) {
|
||||
if (this.type == StoragePoolType.CLVM || this.type == StoragePoolType.CLVM_NG || type == StoragePoolType.RBD) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
@ -277,6 +277,10 @@ public class LibvirtStoragePool implements KVMStoragePool {
|
|||
return this.type;
|
||||
}
|
||||
|
||||
public void setType(StoragePoolType type) {
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
public StoragePool getPool() {
|
||||
return this._pool;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2728,8 +2728,11 @@ public class LibvirtComputingResourceTest {
|
|||
|
||||
@Test
|
||||
public void testModifyStoragePoolCommand() {
|
||||
final StoragePool pool = Mockito.mock(StoragePool.class);;
|
||||
final StoragePool pool = Mockito.mock(StoragePool.class);
|
||||
final ModifyStoragePoolCommand command = new ModifyStoragePoolCommand(true, pool);
|
||||
Map<String, String> details = new HashMap<>();
|
||||
details.put(KVMStoragePool.CLVM_SECURE_ZERO_FILL, "false");
|
||||
command.setDetails(details);
|
||||
|
||||
final KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class);
|
||||
final KVMStoragePool kvmStoragePool = Mockito.mock(KVMStoragePool.class);
|
||||
|
|
@ -2753,8 +2756,11 @@ public class LibvirtComputingResourceTest {
|
|||
|
||||
@Test
|
||||
public void testModifyStoragePoolCommandFailure() {
|
||||
final StoragePool pool = Mockito.mock(StoragePool.class);;
|
||||
final StoragePool pool = Mockito.mock(StoragePool.class);
|
||||
final ModifyStoragePoolCommand command = new ModifyStoragePoolCommand(true, pool);
|
||||
Map<String, String> details = new HashMap<>();
|
||||
details.put(KVMStoragePool.CLVM_SECURE_ZERO_FILL, "false");
|
||||
command.setDetails(details);
|
||||
|
||||
final KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class);
|
||||
|
||||
|
|
|
|||
|
|
@ -422,12 +422,19 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
|
|||
CommandResult result = new CommandResult();
|
||||
try {
|
||||
EndPoint ep = null;
|
||||
if (snapshotOnPrimaryStore != null) {
|
||||
ep = epSelector.select(snapshotOnPrimaryStore);
|
||||
} else {
|
||||
VolumeInfo volumeInfo = volFactory.getVolume(snapshot.getVolumeId(), DataStoreRole.Primary);
|
||||
VolumeInfo volumeInfo = volFactory.getVolume(snapshot.getVolumeId(), DataStoreRole.Primary);
|
||||
|
||||
StoragePoolVO storagePool = primaryStoreDao.findById(volumeInfo.getPoolId());
|
||||
if (storagePool != null && storagePool.getPoolType() == StoragePoolType.CLVM) {
|
||||
ep = epSelector.select(volumeInfo);
|
||||
} else {
|
||||
if (snapshotOnPrimaryStore != null) {
|
||||
ep = epSelector.select(snapshotOnPrimaryStore);
|
||||
} else {
|
||||
ep = epSelector.select(volumeInfo);
|
||||
}
|
||||
}
|
||||
|
||||
if ( ep == null ){
|
||||
String errMsg = "No remote endpoint to send RevertSnapshotCommand, check if host or ssvm is down?";
|
||||
logger.error(errMsg);
|
||||
|
|
|
|||
|
|
@ -233,6 +233,11 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStor
|
|||
parameters.setHost(storageHost);
|
||||
parameters.setPort(0);
|
||||
parameters.setPath(hostPath.replaceFirst("/", ""));
|
||||
} else if (scheme.equalsIgnoreCase("clvm_ng")) {
|
||||
parameters.setType(StoragePoolType.CLVM_NG);
|
||||
parameters.setHost(storageHost);
|
||||
parameters.setPort(0);
|
||||
parameters.setPath(hostPath.replaceFirst("/", ""));
|
||||
} else if (scheme.equalsIgnoreCase("rbd")) {
|
||||
if (port == -1) {
|
||||
port = 0;
|
||||
|
|
|
|||
4
pom.xml
4
pom.xml
|
|
@ -55,6 +55,10 @@
|
|||
<sonar.host.url>https://sonarcloud.io</sonar.host.url>
|
||||
<sonar.exclusions>engine/schema/src/main/java/org/apache/cloudstack/backup/BackupOfferingDetailsVO.java</sonar.exclusions>
|
||||
<sonar.exclusions>api/src/main/java/org/apache/cloudstack/api/response/BackupOfferingResponse.java</sonar.exclusions>
|
||||
<sonar.exclusions>core/src/main/java/com/cloud/agent/api/PostMigrationAnswer.java</sonar.exclusions>
|
||||
<sonar.exclusions>core/src/main/java/com/cloud/agent/api/PostMigrationCommand.java</sonar.exclusions>
|
||||
<sonar.exclusions>core/src/main/java/com/cloud/agent/api/PreMigrationCommand.java</sonar.exclusions>
|
||||
<sonar.exclusions>core/src/main/java/org/apache/cloudstack/storage/command/ClvmLockTransferCommand.java</sonar.exclusions>
|
||||
|
||||
<!-- Build properties -->
|
||||
<cs.jdk.version>11</cs.jdk.version>
|
||||
|
|
|
|||
|
|
@ -58,15 +58,11 @@ is_lv() {
|
|||
}
|
||||
|
||||
get_vg() {
|
||||
lvm lvs --noheadings --unbuffered --separator=/ "${1}" | cut -d '/' -f 2
|
||||
lvm lvs --noheadings --unbuffered --separator=/ "${1}" | cut -d '/' -f 2 | tr -d ' '
|
||||
}
|
||||
|
||||
get_lv() {
|
||||
lvm lvs --noheadings --unbuffered --separator=/ "${1}" | cut -d '/' -f 1
|
||||
}
|
||||
|
||||
double_hyphens() {
|
||||
echo ${1} | sed -e "s/-/--/g"
|
||||
lvm lvs --noheadings --unbuffered --separator=/ "${1}" | cut -d '/' -f 1 | tr -d ' '
|
||||
}
|
||||
|
||||
create_snapshot() {
|
||||
|
|
@ -77,30 +73,39 @@ create_snapshot() {
|
|||
islv_ret=$?
|
||||
|
||||
if [ ${dmsnapshot} = "yes" ] && [ "$islv_ret" == "1" ]; then
|
||||
# Modern LVM snapshot approach
|
||||
local lv=`get_lv ${disk}`
|
||||
local vg=`get_vg ${disk}`
|
||||
local lv_dm=`double_hyphens ${lv}`
|
||||
local vg_dm=`double_hyphens ${vg}`
|
||||
local lvdevice=/dev/mapper/${vg_dm}-${lv_dm}
|
||||
local lv_bytes=`blockdev --getsize64 ${lvdevice}`
|
||||
local lv_sectors=`blockdev --getsz ${lvdevice}`
|
||||
local lv_bytes=`blockdev --getsize64 ${disk}`
|
||||
|
||||
lvm lvcreate --size ${lv_bytes}b --name "${snapshotname}-cow" ${vg} >&2 || return 2
|
||||
dmsetup suspend ${vg_dm}-${lv_dm} >&2
|
||||
if dmsetup info -c --noheadings -o name ${vg_dm}-${lv_dm}-real > /dev/null 2>&1; then
|
||||
echo "0 ${lv_sectors} snapshot ${lvdevice}-real /dev/mapper/${vg_dm}-${snapshotname}--cow p 64" | \
|
||||
dmsetup create "${vg_dm}-${snapshotname}" >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
|
||||
dmsetup resume "${vg_dm}-${snapshotname}" >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
|
||||
else
|
||||
dmsetup table ${vg_dm}-${lv_dm} | dmsetup create ${vg_dm}-${lv_dm}-real >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
|
||||
dmsetup resume ${vg_dm}-${lv_dm}-real >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
|
||||
echo "0 ${lv_sectors} snapshot ${lvdevice}-real /dev/mapper/${vg_dm}-${snapshotname}--cow p 64" | \
|
||||
dmsetup create "${vg_dm}-${snapshotname}" >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
|
||||
echo "0 ${lv_sectors} snapshot-origin ${lvdevice}-real" | \
|
||||
dmsetup load ${vg_dm}-${lv_dm} >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
|
||||
dmsetup resume "${vg_dm}-${snapshotname}" >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
|
||||
# Calculate snapshot size (10% of origin size, minimum 100MB, maximum 10GB)
|
||||
local snapshot_size=$((lv_bytes / 10))
|
||||
local min_size=$((100 * 1024 * 1024)) # 100MB
|
||||
local max_size=$((10 * 1024 * 1024 * 1024)) # 10GB
|
||||
|
||||
if [ ${snapshot_size} -lt ${min_size} ]; then
|
||||
snapshot_size=${min_size}
|
||||
elif [ ${snapshot_size} -gt ${max_size} ]; then
|
||||
snapshot_size=${max_size}
|
||||
fi
|
||||
|
||||
# Round to nearest 512-byte multiple (LVM requirement)
|
||||
snapshot_size=$(((snapshot_size + 511) / 512 * 512))
|
||||
|
||||
# Create LVM snapshot using native command
|
||||
lvm lvcreate -L ${snapshot_size}b -s -n "${snapshotname}" "${vg}/${lv}" >&2
|
||||
if [ $? -gt 0 ]; then
|
||||
printf "***Failed to create LVM snapshot ${snapshotname} for ${vg}/${lv}\n" >&2
|
||||
return 2
|
||||
fi
|
||||
|
||||
# Activate the snapshot
|
||||
lvm lvchange --yes -ay "${vg}/${snapshotname}" >&2
|
||||
if [ $? -gt 0 ]; then
|
||||
printf "***Failed to activate LVM snapshot ${snapshotname}\n" >&2
|
||||
lvm lvremove -f "${vg}/${snapshotname}" >&2
|
||||
return 2
|
||||
fi
|
||||
dmsetup resume "${vg_dm}-${lv_dm}" >&2
|
||||
elif [ -f "${disk}" ]; then
|
||||
$qemu_img snapshot -c "$snapshotname" $disk
|
||||
|
||||
|
|
@ -132,25 +137,22 @@ destroy_snapshot() {
|
|||
islv_ret=$?
|
||||
|
||||
if [ "$islv_ret" == "1" ]; then
|
||||
# Modern LVM snapshot deletion
|
||||
local lv=`get_lv ${disk}`
|
||||
local vg=`get_vg ${disk}`
|
||||
local lv_dm=`double_hyphens ${lv}`
|
||||
local vg_dm=`double_hyphens ${vg}`
|
||||
if [ -e /dev/mapper/${vg_dm}-${lv_dm}-real ]; then
|
||||
local dm_refcount=`dmsetup info -c --noheadings -o open ${vg_dm}-${lv_dm}-real`
|
||||
if [ ${dm_refcount} -le 2 ]; then
|
||||
dmsetup suspend ${vg_dm}-${lv_dm} >&2
|
||||
dmsetup table ${vg_dm}-${lv_dm}-real | dmsetup load ${vg_dm}-${lv_dm} >&2
|
||||
dmsetup resume ${vg_dm}-${lv_dm}
|
||||
dmsetup remove "${vg_dm}-${snapshotname}"
|
||||
dmsetup remove ${vg_dm}-${lv_dm}-real
|
||||
else
|
||||
dmsetup remove "${vg_dm}-${snapshotname}"
|
||||
fi
|
||||
else
|
||||
dmsetup remove "${vg_dm}-${snapshotname}"
|
||||
|
||||
# Check if snapshot exists
|
||||
if ! lvm lvs "${vg}/${snapshotname}" > /dev/null 2>&1; then
|
||||
printf "Snapshot ${vg}/${snapshotname} does not exist or was already deleted\n" >&2
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Remove the snapshot using native LVM command
|
||||
lvm lvremove -f "${vg}/${snapshotname}" >&2
|
||||
if [ $? -gt 0 ]; then
|
||||
printf "***Failed to remove LVM snapshot ${vg}/${snapshotname}\n" >&2
|
||||
return 2
|
||||
fi
|
||||
lvm lvremove -f "${vg}/${snapshotname}-cow"
|
||||
elif [ -f $disk ]; then
|
||||
#delete all the existing snapshots
|
||||
$qemu_img snapshot -l $disk |tail -n +3|awk '{print $1}'|xargs -I {} $qemu_img snapshot -d {} $disk >&2
|
||||
|
|
@ -170,12 +172,37 @@ rollback_snapshot() {
|
|||
local disk=$1
|
||||
local snapshotname="$2"
|
||||
local failed=0
|
||||
is_lv ${disk}
|
||||
islv_ret=$?
|
||||
|
||||
$qemu_img snapshot -a $snapshotname $disk
|
||||
if [ ${dmrollback} = "yes" ] && [ "$islv_ret" == "1" ]; then
|
||||
# Modern LVM snapshot merge (rollback)
|
||||
local lv=`get_lv ${disk}`
|
||||
local vg=`get_vg ${disk}`
|
||||
|
||||
if [ $? -gt 0 ]
|
||||
then
|
||||
printf "***Failed to apply snapshot $snapshotname for path $disk\n" >&2
|
||||
# Check if snapshot exists
|
||||
if ! lvm lvs "${vg}/${snapshotname}" > /dev/null 2>&1; then
|
||||
printf "***Snapshot ${vg}/${snapshotname} does not exist\n" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Use lvconvert --merge to rollback
|
||||
lvm lvconvert --merge "${vg}/${snapshotname}" >&2
|
||||
if [ $? -gt 0 ]; then
|
||||
printf "***Failed to merge/rollback snapshot ${snapshotname} for ${vg}/${lv}\n" >&2
|
||||
return 1
|
||||
fi
|
||||
elif [ -f ${disk} ]; then
|
||||
# File-based snapshot rollback using qemu-img
|
||||
$qemu_img snapshot -a $snapshotname $disk
|
||||
|
||||
if [ $? -gt 0 ]
|
||||
then
|
||||
printf "***Failed to apply snapshot $snapshotname for path $disk\n" >&2
|
||||
failed=1
|
||||
fi
|
||||
else
|
||||
printf "***Failed to rollback snapshot $snapshotname, undefined type $disk\n" >&2
|
||||
failed=1
|
||||
fi
|
||||
|
||||
|
|
@ -204,20 +231,21 @@ backup_snapshot() {
|
|||
|
||||
if [ ${dmsnapshot} = "yes" ] && [ "$islv_ret" == "1" ] ; then
|
||||
local vg=`get_vg ${disk}`
|
||||
local vg_dm=`double_hyphens ${vg}`
|
||||
local scriptdir=`dirname ${0}`
|
||||
|
||||
if ! dmsetup info -c --noheadings -o name ${vg_dm}-${snapshotname} > /dev/null 2>&1; then
|
||||
# Check if snapshot exists using native LVM command
|
||||
if ! lvm lvs "${vg}/${snapshotname}" > /dev/null 2>&1; then
|
||||
printf "Disk ${disk} has no snapshot called ${snapshotname}.\n" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
qemuimg_ret=$($qemu_img $forceShareFlag -f raw -O qcow2 "/dev/mapper/${vg_dm}-${snapshotname}" "${destPath}/${destName}")
|
||||
# Use native LVM path for backup
|
||||
qemuimg_ret=$($qemu_img convert $forceShareFlag -f raw -O qcow2 "/dev/${vg}/${snapshotname}" "${destPath}/${destName}" 2>&1)
|
||||
ret_code=$?
|
||||
if [ $ret_code -gt 0 ] && [[ $qemuimg_ret == *"snapshot: invalid option -- 'U'"* ]]
|
||||
if [ $ret_code -gt 0 ] && ([[ $qemuimg_ret == *"invalid option"*"'U'"* ]] || [[ $qemuimg_ret == *"unrecognized option"*"'-U'"* ]])
|
||||
then
|
||||
forceShareFlag=""
|
||||
$qemu_img $forceShareFlag -f raw -O qcow2 "/dev/mapper/${vg_dm}-${snapshotname}" "${destPath}/${destName}"
|
||||
$qemu_img convert $forceShareFlag -f raw -O qcow2 "/dev/${vg}/${snapshotname}" "${destPath}/${destName}"
|
||||
ret_code=$?
|
||||
fi
|
||||
if [ $ret_code -gt 0 ]
|
||||
|
|
@ -240,9 +268,9 @@ backup_snapshot() {
|
|||
# Backup VM snapshot
|
||||
qemuimg_ret=$($qemu_img snapshot $forceShareFlag -l $disk 2>&1)
|
||||
ret_code=$?
|
||||
if [ $ret_code -gt 0 ] && [[ $qemuimg_ret == *"snapshot: invalid option -- 'U'"* ]]; then
|
||||
if [ $ret_code -gt 0 ] && ([[ $qemuimg_ret == *"invalid option"*"'U'"* ]] || [[ $qemuimg_ret == *"unrecognized option"*"'-U'"* ]]); then
|
||||
forceShareFlag=""
|
||||
qemuimg_ret=$($qemu_img snapshot $forceShareFlag -l $disk)
|
||||
qemuimg_ret=$($qemu_img snapshot $forceShareFlag -l $disk 2>&1)
|
||||
ret_code=$?
|
||||
fi
|
||||
|
||||
|
|
@ -251,11 +279,11 @@ backup_snapshot() {
|
|||
return 1
|
||||
fi
|
||||
|
||||
qemuimg_ret=$($qemu_img convert $forceShareFlag -f qcow2 -O qcow2 -l snapshot.name=$snapshotname $disk $destPath/$destName 2>&1 > /dev/null)
|
||||
qemuimg_ret=$($qemu_img convert $forceShareFlag -f qcow2 -O qcow2 -l snapshot.name=$snapshotname $disk $destPath/$destName 2>&1)
|
||||
ret_code=$?
|
||||
if [ $ret_code -gt 0 ] && [[ $qemuimg_ret == *"convert: invalid option -- 'U'"* ]]; then
|
||||
if [ $ret_code -gt 0 ] && ([[ $qemuimg_ret == *"invalid option"*"'U'"* ]] || [[ $qemuimg_ret == *"unrecognized option"*"'-U'"* ]]); then
|
||||
forceShareFlag=""
|
||||
qemuimg_ret=$($qemu_img convert $forceShareFlag -f qcow2 -O qcow2 -l snapshot.name=$snapshotname $disk $destPath/$destName 2>&1 > /dev/null)
|
||||
qemuimg_ret=$($qemu_img convert $forceShareFlag -f qcow2 -O qcow2 -l snapshot.name=$snapshotname $disk $destPath/$destName 2>&1)
|
||||
ret_code=$?
|
||||
fi
|
||||
|
||||
|
|
@ -279,7 +307,12 @@ backup_snapshot() {
|
|||
revert_snapshot() {
|
||||
local snapshotPath=$1
|
||||
local destPath=$2
|
||||
${qemu_img} convert -f qcow2 -O qcow2 "$snapshotPath" "$destPath" || \
|
||||
local output_format="qcow2"
|
||||
if [ -b "$destPath" ]; then
|
||||
output_format="raw"
|
||||
fi
|
||||
|
||||
${qemu_img} convert -f qcow2 -O ${output_format} "$snapshotPath" "$destPath" || \
|
||||
( printf "${qemu_img} failed to revert snapshot ${snapshotPath} to disk ${destPath}.\n" >&2; return 2 )
|
||||
return 0
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,168 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package com.cloud.storage;
|
||||
|
||||
import java.util.Arrays;
|
||||
import javax.inject.Inject;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.exception.AgentUnavailableException;
|
||||
import com.cloud.exception.OperationTimedoutException;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.Status;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.storage.dao.VolumeDetailsDao;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.storage.command.ClvmLockTransferCommand;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
public class ClvmLockManager {
|
||||
@Inject
|
||||
private VolumeDetailsDao _volsDetailsDao;
|
||||
@Inject
|
||||
private AgentManager _agentMgr;
|
||||
@Inject
|
||||
private HostDao _hostDao;
|
||||
|
||||
protected Logger logger = LogManager.getLogger(getClass());
|
||||
|
||||
public static boolean isClvmPoolType(Storage.StoragePoolType poolType) {
|
||||
return Arrays.asList(Storage.StoragePoolType.CLVM, Storage.StoragePoolType.CLVM_NG).contains(poolType);
|
||||
}
|
||||
|
||||
public Long getClvmLockHostId(Long volumeId, String volumeUuid) {
|
||||
VolumeDetailVO detail = _volsDetailsDao.findDetail(volumeId, VolumeInfo.CLVM_LOCK_HOST_ID);
|
||||
if (detail != null && detail.getValue() != null && !detail.getValue().isEmpty()) {
|
||||
try {
|
||||
return Long.parseLong(detail.getValue());
|
||||
} catch (NumberFormatException e) {
|
||||
logger.warn("Invalid clvmLockHostId in volume_details for volume {}: {}",
|
||||
volumeUuid, detail.getValue());
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Safely sets or updates the CLVM_LOCK_HOST_ID detail for a volume.
|
||||
* If the detail already exists, it will be updated. Otherwise, it will be created.
|
||||
*
|
||||
* @param volumeId The ID of the volume
|
||||
* @param hostId The host ID that holds/should hold the CLVM exclusive lock
|
||||
*/
|
||||
public void setClvmLockHostId(long volumeId, long hostId) {
|
||||
VolumeDetailVO existingDetail = _volsDetailsDao.findDetail(volumeId, VolumeInfo.CLVM_LOCK_HOST_ID);
|
||||
if (existingDetail != null) {
|
||||
existingDetail.setValue(String.valueOf(hostId));
|
||||
_volsDetailsDao.update(existingDetail.getId(), existingDetail);
|
||||
logger.debug("Updated CLVM_LOCK_HOST_ID for volume {} to host {}", volumeId, hostId);
|
||||
return;
|
||||
}
|
||||
_volsDetailsDao.addDetail(volumeId, VolumeInfo.CLVM_LOCK_HOST_ID, String.valueOf(hostId), false);
|
||||
logger.debug("Created CLVM_LOCK_HOST_ID for volume {} with host {}", volumeId, hostId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleans up CLVM lock host tracking detail from volume_details table.
|
||||
* Called after successful volume deletion to prevent orphaned records.
|
||||
*
|
||||
* @param volume The volume being deleted
|
||||
*/
|
||||
public void clearClvmLockHostDetail(VolumeVO volume) {
|
||||
try {
|
||||
VolumeDetailVO detail = _volsDetailsDao.findDetail(volume.getId(), VolumeInfo.CLVM_LOCK_HOST_ID);
|
||||
if (detail != null) {
|
||||
logger.debug("Removing CLVM lock host detail for deleted volume {}", volume.getUuid());
|
||||
_volsDetailsDao.remove(detail.getId());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("Failed to clean up CLVM lock host detail for volume {}: {}",
|
||||
volume.getUuid(), e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public boolean transferClvmVolumeLock(String volumeUuid, Long volumeId, String volumePath,
|
||||
StoragePoolVO pool, Long sourceHostId, Long destHostId) {
|
||||
if (pool == null) {
|
||||
logger.error("Cannot transfer CLVM lock for volume {} - pool is null", volumeUuid);
|
||||
return false;
|
||||
}
|
||||
|
||||
String vgName = pool.getPath();
|
||||
if (vgName.startsWith("/")) {
|
||||
vgName = vgName.substring(1);
|
||||
}
|
||||
|
||||
String lvPath = String.format("/dev/%s/%s", vgName, volumePath);
|
||||
|
||||
try {
|
||||
if (!sourceHostId.equals(destHostId)) {
|
||||
HostVO sourceHost = _hostDao.findById(sourceHostId);
|
||||
if (sourceHost != null && sourceHost.getStatus() == Status.Up) {
|
||||
ClvmLockTransferCommand deactivateCmd = new ClvmLockTransferCommand(
|
||||
ClvmLockTransferCommand.Operation.DEACTIVATE,
|
||||
lvPath,
|
||||
volumeUuid
|
||||
);
|
||||
|
||||
Answer deactivateAnswer = _agentMgr.send(sourceHostId, deactivateCmd);
|
||||
|
||||
if (deactivateAnswer == null || !deactivateAnswer.getResult()) {
|
||||
logger.warn("Failed to deactivate CLVM volume {} on source host {}. Will attempt activation on destination.",
|
||||
volumeUuid, sourceHostId);
|
||||
}
|
||||
} else {
|
||||
logger.warn("Source host {} is down. Will attempt force claim on destination host {}",
|
||||
sourceHostId, destHostId);
|
||||
}
|
||||
}
|
||||
|
||||
ClvmLockTransferCommand activateCmd = new ClvmLockTransferCommand(
|
||||
ClvmLockTransferCommand.Operation.ACTIVATE_EXCLUSIVE,
|
||||
lvPath,
|
||||
volumeUuid
|
||||
);
|
||||
|
||||
Answer activateAnswer = _agentMgr.send(destHostId, activateCmd);
|
||||
|
||||
if (activateAnswer == null || !activateAnswer.getResult()) {
|
||||
String error = activateAnswer != null ? activateAnswer.getDetails() : "null answer";
|
||||
logger.error("Failed to activate CLVM volume {} exclusively on dest host {}: {}",
|
||||
volumeUuid, destHostId, error);
|
||||
return false;
|
||||
}
|
||||
|
||||
setClvmLockHostId(volumeId, destHostId);
|
||||
|
||||
logger.info("Successfully transferred CLVM lock for volume {} from host {} to host {}",
|
||||
volumeUuid, sourceHostId, destHostId);
|
||||
|
||||
return true;
|
||||
|
||||
} catch (AgentUnavailableException | OperationTimedoutException e) {
|
||||
logger.error("Exception during CLVM lock transfer for volume {}: {}", volumeUuid, e.getMessage(), e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -367,6 +367,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
HostPodDao podDao;
|
||||
@Inject
|
||||
EndPointSelector _epSelector;
|
||||
@Inject
|
||||
ClvmLockManager clvmLockManager;
|
||||
|
||||
@Inject
|
||||
private VMSnapshotDetailsDao vmSnapshotDetailsDao;
|
||||
|
|
@ -410,6 +412,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
public static final ConfigKey<Boolean> AllowCheckAndRepairVolume = new ConfigKey<>("Advanced", Boolean.class, "volume.check.and.repair.leaks.before.use", "false",
|
||||
"To check and repair the volume if it has any leaks before performing volume attach or VM start operations", true, ConfigKey.Scope.StoragePool);
|
||||
|
||||
public static final ConfigKey<Boolean> CLVMSecureZeroFill = new ConfigKey<>("Advanced", Boolean.class, "clvm.secure.zero.fill", "false",
|
||||
"When enabled, CLVM volumes to be zero-filled at the time of deletion to prevent data from being recovered by VMs reusing the space, as thick LVM volumes write data linearly. Note: This setting is propagated to hosts when they connect to the storage pool. Changing this setting requires disconnecting and reconnecting hosts or restarting the KVM agent for it to take effect.", false, ConfigKey.Scope.StoragePool);
|
||||
|
||||
private final StateMachine2<Volume.State, Volume.Event, Volume> _volStateMachine;
|
||||
|
||||
private static final Set<Volume.State> STATES_VOLUME_CANNOT_BE_DESTROYED = new HashSet<>(Arrays.asList(Volume.State.Destroy, Volume.State.Expunging, Volume.State.Expunged, Volume.State.Allocated));
|
||||
|
|
@ -1752,6 +1757,11 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
if (DataStoreRole.Image.equals(role)) {
|
||||
_resourceLimitMgr.decrementResourceCount(volOnStorage.getAccountId(), ResourceType.secondary_storage, volOnStorage.getSize());
|
||||
}
|
||||
|
||||
// Clean up CLVM lock host tracking detail after successful deletion from primary storage
|
||||
if (DataStoreRole.Primary.equals(role)) {
|
||||
clvmLockManager.clearClvmLockHostDetail(volume);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2602,21 +2612,42 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
logger.trace(String.format("is it needed to move the volume: %b?", moveVolumeNeeded));
|
||||
}
|
||||
|
||||
if (moveVolumeNeeded) {
|
||||
// Check if CLVM lock transfer is needed (even if moveVolumeNeeded is false)
|
||||
// This handles the case where the volume is already on the correct storage pool
|
||||
// but the VM is running on a different host, requiring only a lock transfer
|
||||
boolean isClvmLockTransferNeeded = !moveVolumeNeeded &&
|
||||
isClvmLockTransferRequired(newVolumeOnPrimaryStorage, existingVolumeOfVm, vm);
|
||||
|
||||
if (isClvmLockTransferNeeded) {
|
||||
// CLVM lock transfer - no data copy, no pool change needed
|
||||
newVolumeOnPrimaryStorage = executeClvmLightweightMigration(
|
||||
newVolumeOnPrimaryStorage, vm, existingVolumeOfVm,
|
||||
"CLVM lock transfer", "same pool to different host");
|
||||
} else if (moveVolumeNeeded) {
|
||||
PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)newVolumeOnPrimaryStorage.getDataStore();
|
||||
if (primaryStore.isLocal()) {
|
||||
throw new CloudRuntimeException(
|
||||
"Failed to attach local data volume " + volumeToAttach.getName() + " to VM " + vm.getDisplayName() + " as migration of local data volume is not allowed");
|
||||
}
|
||||
StoragePoolVO vmRootVolumePool = _storagePoolDao.findById(existingVolumeOfVm.getPoolId());
|
||||
|
||||
try {
|
||||
HypervisorType volumeToAttachHyperType = _volsDao.getHypervisorType(volumeToAttach.getId());
|
||||
newVolumeOnPrimaryStorage = _volumeMgr.moveVolume(newVolumeOnPrimaryStorage, vmRootVolumePool.getDataCenterId(), vmRootVolumePool.getPodId(), vmRootVolumePool.getClusterId(),
|
||||
volumeToAttachHyperType);
|
||||
} catch (ConcurrentOperationException | StorageUnavailableException e) {
|
||||
logger.debug("move volume failed", e);
|
||||
throw new CloudRuntimeException("move volume failed", e);
|
||||
boolean isClvmLightweightMigration = isClvmLightweightMigrationNeeded(
|
||||
newVolumeOnPrimaryStorage, existingVolumeOfVm, vm);
|
||||
|
||||
if (isClvmLightweightMigration) {
|
||||
newVolumeOnPrimaryStorage = executeClvmLightweightMigration(
|
||||
newVolumeOnPrimaryStorage, vm, existingVolumeOfVm,
|
||||
"CLVM lightweight migration", "different pools, same VG");
|
||||
} else {
|
||||
StoragePoolVO vmRootVolumePool = _storagePoolDao.findById(existingVolumeOfVm.getPoolId());
|
||||
|
||||
try {
|
||||
HypervisorType volumeToAttachHyperType = _volsDao.getHypervisorType(volumeToAttach.getId());
|
||||
newVolumeOnPrimaryStorage = _volumeMgr.moveVolume(newVolumeOnPrimaryStorage, vmRootVolumePool.getDataCenterId(), vmRootVolumePool.getPodId(), vmRootVolumePool.getClusterId(),
|
||||
volumeToAttachHyperType);
|
||||
} catch (ConcurrentOperationException | StorageUnavailableException e) {
|
||||
logger.debug("move volume failed", e);
|
||||
throw new CloudRuntimeException("move volume failed", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
VolumeVO newVol = _volsDao.findById(newVolumeOnPrimaryStorage.getId());
|
||||
|
|
@ -2631,6 +2662,351 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
return newVol;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to get storage pools for volume and VM.
|
||||
*
|
||||
* @param volumeToAttach The volume being attached
|
||||
* @param vmExistingVolume The VM's existing volume
|
||||
* @return Pair of StoragePoolVO objects (volumePool, vmPool), or null if either pool is missing
|
||||
*/
|
||||
private Pair<StoragePoolVO, StoragePoolVO> getStoragePoolsForVolumeAttachment(VolumeInfo volumeToAttach, VolumeVO vmExistingVolume) {
|
||||
if (volumeToAttach == null || vmExistingVolume == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
StoragePoolVO volumePool = _storagePoolDao.findById(volumeToAttach.getPoolId());
|
||||
StoragePoolVO vmPool = _storagePoolDao.findById(vmExistingVolume.getPoolId());
|
||||
|
||||
if (volumePool == null || vmPool == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return new Pair<>(volumePool, vmPool);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if both storage pools are CLVM type.
|
||||
*
|
||||
* @param volumePool Storage pool for the volume
|
||||
* @param vmPool Storage pool for the VM
|
||||
* @return true if both pools are CLVM type
|
||||
*/
|
||||
private boolean areBothPoolsClvmType(StoragePoolVO volumePool, StoragePoolVO vmPool) {
|
||||
return volumePool.getPoolType() == StoragePoolType.CLVM &&
|
||||
vmPool.getPoolType() == StoragePoolType.CLVM;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a storage pool is CLVM type.
|
||||
*
|
||||
* @param pool Storage pool to check
|
||||
* @return true if pool is CLVM type
|
||||
*/
|
||||
private boolean isClvmPool(StoragePoolVO pool) {
|
||||
return pool != null && pool.getPoolType() == StoragePoolType.CLVM;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the Volume Group (VG) name from a CLVM storage pool path.
|
||||
* For CLVM, the path is typically: /vgname
|
||||
*
|
||||
* @param poolPath The storage pool path
|
||||
* @return VG name, or null if path is null
|
||||
*/
|
||||
private String extractVgNameFromPath(String poolPath) {
|
||||
if (poolPath == null) {
|
||||
return null;
|
||||
}
|
||||
return poolPath.startsWith("/") ? poolPath.substring(1) : poolPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if two CLVM storage pools are in the same Volume Group.
|
||||
*
|
||||
* @param volumePool Storage pool for the volume
|
||||
* @param vmPool Storage pool for the VM
|
||||
* @return true if both pools are in the same VG
|
||||
*/
|
||||
private boolean arePoolsInSameVolumeGroup(StoragePoolVO volumePool, StoragePoolVO vmPool) {
|
||||
String volumeVgName = extractVgNameFromPath(volumePool.getPath());
|
||||
String vmVgName = extractVgNameFromPath(vmPool.getPath());
|
||||
|
||||
return volumeVgName != null && volumeVgName.equals(vmVgName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if a CLVM volume needs lightweight lock migration instead of full data copy.
|
||||
*
|
||||
* Lightweight migration is needed when:
|
||||
* 1. Volume is on CLVM storage
|
||||
* 2. Source and destination are in the same Volume Group
|
||||
* 3. Only the host/lock needs to change (not the storage pool)
|
||||
*
|
||||
* @param volumeToAttach The volume being attached
|
||||
* @param vmExistingVolume The VM's existing volume (typically root volume)
|
||||
* @param vm The VM to attach the volume to
|
||||
* @return true if lightweight CLVM lock migration should be used
|
||||
*/
|
||||
private boolean isClvmLightweightMigrationNeeded(VolumeInfo volumeToAttach, VolumeVO vmExistingVolume, UserVmVO vm) {
|
||||
Pair<StoragePoolVO, StoragePoolVO> pools = getStoragePoolsForVolumeAttachment(volumeToAttach, vmExistingVolume);
|
||||
if (pools == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
StoragePoolVO volumePool = pools.first();
|
||||
StoragePoolVO vmPool = pools.second();
|
||||
|
||||
if (!areBothPoolsClvmType(volumePool, vmPool)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (arePoolsInSameVolumeGroup(volumePool, vmPool)) {
|
||||
String vgName = extractVgNameFromPath(volumePool.getPath());
|
||||
logger.info("CLVM lightweight migration detected: Volume {} is in same VG ({}) as VM {} volumes, " +
|
||||
"only lock transfer needed (no data copy)",
|
||||
volumeToAttach.getUuid(), vgName, vm.getUuid());
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if a CLVM volume requires lock transfer when already on the correct storage pool.
|
||||
*
|
||||
* Lock transfer is needed when:
|
||||
* 1. Volume is already on the same CLVM storage pool as VM's volumes
|
||||
* 2. But the volume lock is held by a different host than where the VM is running
|
||||
* 3. Only the lock needs to change (no pool change, no data copy)
|
||||
*
|
||||
* @param volumeToAttach The volume being attached
|
||||
* @param vmExistingVolume The VM's existing volume (typically root volume)
|
||||
* @param vm The VM to attach the volume to
|
||||
* @return true if CLVM lock transfer is needed (but not full migration)
|
||||
*/
|
||||
private boolean isClvmLockTransferRequired(VolumeInfo volumeToAttach, VolumeVO vmExistingVolume, UserVmVO vm) {
|
||||
if (vm == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Pair<StoragePoolVO, StoragePoolVO> pools = getStoragePoolsForVolumeAttachment(volumeToAttach, vmExistingVolume);
|
||||
if (pools == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
StoragePoolVO volumePool = pools.first();
|
||||
StoragePoolVO vmPool = pools.second();
|
||||
|
||||
if (!isClvmPool(volumePool)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (volumePool.getId() != vmPool.getId()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Long volumeLockHostId = findClvmVolumeLockHost(volumeToAttach);
|
||||
|
||||
Long vmHostId = vm.getHostId();
|
||||
if (vmHostId == null) {
|
||||
vmHostId = vm.getLastHostId();
|
||||
}
|
||||
|
||||
if (volumeLockHostId == null) {
|
||||
VolumeVO volumeVO = _volsDao.findById(volumeToAttach.getId());
|
||||
if (volumeVO != null && volumeVO.getState() == Volume.State.Ready && volumeVO.getInstanceId() == null) {
|
||||
logger.debug("CLVM volume {} is detached on same pool as VM {}, lock transfer may be needed",
|
||||
volumeToAttach.getUuid(), vm.getUuid());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (volumeLockHostId != null && vmHostId != null && !volumeLockHostId.equals(vmHostId)) {
|
||||
logger.info("CLVM lock transfer required: Volume {} lock is on host {} but VM {} is on host {}",
|
||||
volumeToAttach.getUuid(), volumeLockHostId, vm.getUuid(), vmHostId);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines the destination host for CLVM lock migration.
|
||||
*
|
||||
* If VM is running, uses the VM's current host.
|
||||
* If VM is stopped, picks an available UP host from the storage pool's cluster.
|
||||
*
|
||||
* @param vm The VM
|
||||
* @param vmExistingVolume The VM's existing volume (to determine cluster)
|
||||
* @return Host ID, or null if cannot be determined
|
||||
*/
|
||||
private Long determineClvmLockDestinationHost(UserVmVO vm, VolumeVO vmExistingVolume) {
|
||||
Long destHostId = vm.getHostId();
|
||||
if (destHostId != null) {
|
||||
return destHostId;
|
||||
}
|
||||
|
||||
if (vmExistingVolume != null && vmExistingVolume.getPoolId() != null) {
|
||||
StoragePoolVO pool = _storagePoolDao.findById(vmExistingVolume.getPoolId());
|
||||
if (pool != null && pool.getClusterId() != null) {
|
||||
List<HostVO> hosts = _hostDao.findByClusterId(pool.getClusterId());
|
||||
if (hosts != null && !hosts.isEmpty()) {
|
||||
// Pick first available UP host
|
||||
for (HostVO host : hosts) {
|
||||
if (host.getStatus() == Status.Up) {
|
||||
destHostId = host.getId();
|
||||
logger.debug("VM {} is stopped, selected host {} from cluster {} for CLVM lock migration",
|
||||
vm.getUuid(), destHostId, pool.getClusterId());
|
||||
return destHostId;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes CLVM lightweight migration with consistent logging and error handling.
|
||||
*
|
||||
* This helper method wraps the actual migration logic to eliminate code duplication
|
||||
* between different CLVM migration scenarios (lock transfer vs. lightweight migration).
|
||||
*
|
||||
* @param volume The volume to migrate locks for
|
||||
* @param vm The VM to attach the volume to
|
||||
* @param vmExistingVolume The VM's existing volume (to determine target host)
|
||||
* @param operationType Description of the operation type for logging (e.g., "CLVM lock transfer")
|
||||
* @param scenarioDescription Description of the scenario for logging (e.g., "same pool to different host")
|
||||
* @return Updated VolumeInfo after lock migration
|
||||
* @throws CloudRuntimeException if migration fails
|
||||
*/
|
||||
private VolumeInfo executeClvmLightweightMigration(VolumeInfo volume, UserVmVO vm, VolumeVO vmExistingVolume,
|
||||
String operationType, String scenarioDescription) {
|
||||
logger.info("Performing {} for volume {} to VM {} ({})",
|
||||
operationType, volume.getUuid(), vm.getUuid(), scenarioDescription);
|
||||
|
||||
try {
|
||||
return performClvmLightweightMigration(volume, vm, vmExistingVolume);
|
||||
} catch (Exception e) {
|
||||
logger.error("{} failed for volume {}: {}",
|
||||
operationType, volume.getUuid(), e.getMessage(), e);
|
||||
throw new CloudRuntimeException(operationType + " failed", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs lightweight CLVM lock migration for volume attachment.
|
||||
*
|
||||
* This transfers the LVM exclusive lock from the current host to the VM's host
|
||||
* without copying data (since CLVM volumes are on cluster-wide shared storage).
|
||||
*
|
||||
* @param volume The volume to migrate locks for
|
||||
* @param vm The VM to attach the volume to
|
||||
* @param vmExistingVolume The VM's existing volume (to determine target host)
|
||||
* @return Updated VolumeInfo after lock migration
|
||||
* @throws Exception if lock migration fails
|
||||
*/
|
||||
private VolumeInfo performClvmLightweightMigration(VolumeInfo volume, UserVmVO vm, VolumeVO vmExistingVolume) throws Exception {
|
||||
String volumeUuid = volume.getUuid();
|
||||
Long vmId = vm.getId();
|
||||
|
||||
logger.info("Starting CLVM lightweight lock migration for volume {} (id: {}) to VM {} (id: {})",
|
||||
volumeUuid, volume.getId(), vm.getUuid(), vmId);
|
||||
|
||||
Long destHostId = determineClvmLockDestinationHost(vm, vmExistingVolume);
|
||||
|
||||
if (destHostId == null) {
|
||||
throw new CloudRuntimeException(
|
||||
"Cannot determine destination host for CLVM lock migration - VM has no host and no available cluster hosts");
|
||||
}
|
||||
|
||||
Long sourceHostId = findClvmVolumeLockHost(volume);
|
||||
|
||||
if (sourceHostId == null) {
|
||||
logger.warn("Could not determine source host for CLVM volume {} lock, " +
|
||||
"assuming volume is not exclusively locked", volumeUuid);
|
||||
sourceHostId = destHostId;
|
||||
}
|
||||
|
||||
if (sourceHostId.equals(destHostId)) {
|
||||
logger.info("CLVM volume {} already has lock on destination host {}, no migration needed",
|
||||
volumeUuid, destHostId);
|
||||
return volume;
|
||||
}
|
||||
|
||||
logger.info("Migrating CLVM volume {} lock from host {} to host {}",
|
||||
volumeUuid, sourceHostId, destHostId);
|
||||
|
||||
boolean success = transferClvmVolumeLock(volume, sourceHostId, destHostId);
|
||||
|
||||
if (!success) {
|
||||
throw new CloudRuntimeException(
|
||||
String.format("Failed to transfer CLVM lock for volume %s from host %s to host %s",
|
||||
volumeUuid, sourceHostId, destHostId));
|
||||
}
|
||||
|
||||
logger.info("Successfully migrated CLVM volume {} lock from host {} to host {}",
|
||||
volumeUuid, sourceHostId, destHostId);
|
||||
|
||||
return volFactory.getVolume(volume.getId());
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds which host currently has the exclusive lock on a CLVM volume.
|
||||
*
|
||||
* @param volume The CLVM volume
|
||||
* @return Host ID that has the exclusive lock, or null if cannot be determined
|
||||
*/
|
||||
private Long findClvmVolumeLockHost(VolumeInfo volume) {
|
||||
Long lockHostId = clvmLockManager.getClvmLockHostId(volume.getId(), volume.getUuid());
|
||||
if (lockHostId != null) {
|
||||
return lockHostId;
|
||||
}
|
||||
|
||||
Long instanceId = volume.getInstanceId();
|
||||
if (instanceId != null) {
|
||||
VMInstanceVO vmInstance = _vmInstanceDao.findById(instanceId);
|
||||
if (vmInstance != null && vmInstance.getHostId() != null) {
|
||||
return vmInstance.getHostId();
|
||||
}
|
||||
}
|
||||
|
||||
StoragePoolVO pool = _storagePoolDao.findById(volume.getPoolId());
|
||||
if (pool != null && pool.getClusterId() != null) {
|
||||
List<HostVO> hosts = _hostDao.findByClusterId(pool.getClusterId());
|
||||
if (hosts != null && !hosts.isEmpty()) {
|
||||
// Return first available UP host
|
||||
for (HostVO host : hosts) {
|
||||
if (host.getStatus() == Status.Up) {
|
||||
return host.getId();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Transfers CLVM volume exclusive lock from source host to destination host.
|
||||
*
|
||||
* @param volume The volume to transfer lock for
|
||||
* @param sourceHostId Host currently holding the lock
|
||||
* @param destHostId Host to transfer lock to
|
||||
* @return true if successful, false otherwise
|
||||
*/
|
||||
private boolean transferClvmVolumeLock(VolumeInfo volume, Long sourceHostId, Long destHostId) {
|
||||
StoragePoolVO pool = _storagePoolDao.findById(volume.getPoolId());
|
||||
if (pool == null) {
|
||||
logger.error("Cannot find storage pool for volume {}", volume.getUuid());
|
||||
return false;
|
||||
}
|
||||
|
||||
return clvmLockManager.transferClvmVolumeLock(volume.getUuid(), volume.getId(),
|
||||
volume.getPath(), pool, sourceHostId, destHostId);
|
||||
}
|
||||
|
||||
public Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId, Boolean allowAttachForSharedFS) {
|
||||
Account caller = CallContext.current().getCallingAccount();
|
||||
|
||||
|
|
@ -5381,7 +5757,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
|
|||
MatchStoragePoolTagsWithDiskOffering,
|
||||
UseHttpsToUpload,
|
||||
WaitDetachDevice,
|
||||
AllowCheckAndRepairVolume
|
||||
AllowCheckAndRepairVolume,
|
||||
CLVMSecureZeroFill
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1667,6 +1667,9 @@ public class SnapshotManagerImpl extends MutualExclusiveIdsManagerBase implement
|
|||
if (backupSnapToSecondary) {
|
||||
if (!isKvmAndFileBasedStorage) {
|
||||
backupSnapshotToSecondary(payload.getAsyncBackup(), snapshotStrategy, snapshotOnPrimary, payload.getZoneIds(), payload.getStoragePoolIds());
|
||||
if (storagePool.getPoolType() == StoragePoolType.CLVM) {
|
||||
_snapshotStoreDao.removeBySnapshotStore(snapshotId, snapshotOnPrimary.getDataStore().getId(), snapshotOnPrimary.getDataStore().getRole());
|
||||
}
|
||||
} else {
|
||||
postSnapshotDirectlyToSecondary(snapshot, snapshotOnPrimary, snapshotId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -436,6 +436,12 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme
|
|||
vmSnapshotType = VMSnapshot.Type.Disk;
|
||||
}
|
||||
|
||||
// CLVM_NG: Block VM snapshots until Phase 2 implementation is complete
|
||||
if (rootVolumePool.getPoolType() == Storage.StoragePoolType.CLVM_NG) {
|
||||
throw new InvalidParameterValueException("VM snapshots are not yet supported on CLVM_NG storage pools. " +
|
||||
"This feature will be available in a future release.");
|
||||
}
|
||||
|
||||
try {
|
||||
return createAndPersistVMSnapshot(userVmVo, vsDescription, vmSnapshotName, vsDisplayName, vmSnapshotType);
|
||||
} catch (Exception e) {
|
||||
|
|
|
|||
|
|
@ -179,6 +179,8 @@
|
|||
|
||||
<bean id="oCFS2ManagerImpl" class="com.cloud.storage.OCFS2ManagerImpl" />
|
||||
|
||||
<bean id="clvmLockManager" class="com.cloud.storage.ClvmLockManager" />
|
||||
|
||||
<bean id="outOfBandManagementServiceImpl" class="org.apache.cloudstack.outofbandmanagement.OutOfBandManagementServiceImpl">
|
||||
<property name="outOfBandManagementDrivers" value="#{outOfBandManagementDriversRegistry.registered}" />
|
||||
</bean>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,229 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package com.cloud.storage;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.exception.AgentUnavailableException;
|
||||
import com.cloud.exception.OperationTimedoutException;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.Status;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.storage.dao.VolumeDetailsDao;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.storage.command.ClvmLockTransferCommand;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyLong;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
@RunWith(MockitoJUnitRunner.class)
|
||||
public class ClvmLockManagerTest {
|
||||
|
||||
@Mock
|
||||
private VolumeDetailsDao volsDetailsDao;
|
||||
|
||||
@Mock
|
||||
private AgentManager agentMgr;
|
||||
|
||||
@Mock
|
||||
private HostDao hostDao;
|
||||
|
||||
@InjectMocks
|
||||
private ClvmLockManager clvmLockManager;
|
||||
|
||||
private static final Long VOLUME_ID = 100L;
|
||||
private static final Long HOST_ID_1 = 1L;
|
||||
private static final Long HOST_ID_2 = 2L;
|
||||
private static final String VOLUME_UUID = "test-volume-uuid";
|
||||
private static final String VOLUME_PATH = "test-volume-path";
|
||||
private static final String VG_NAME = "acsvg";
|
||||
|
||||
@Before
|
||||
public void setUp() {
|
||||
// Reset mocks before each test
|
||||
Mockito.reset(volsDetailsDao, agentMgr, hostDao);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetClvmLockHostId_Success() {
|
||||
VolumeDetailVO detail = new VolumeDetailVO();
|
||||
detail.setValue("123");
|
||||
when(volsDetailsDao.findDetail(VOLUME_ID, VolumeInfo.CLVM_LOCK_HOST_ID)).thenReturn(detail);
|
||||
|
||||
Long result = clvmLockManager.getClvmLockHostId(VOLUME_ID, VOLUME_UUID);
|
||||
|
||||
Assert.assertEquals(Long.valueOf(123), result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetClvmLockHostId_NoDetail() {
|
||||
when(volsDetailsDao.findDetail(VOLUME_ID, VolumeInfo.CLVM_LOCK_HOST_ID)).thenReturn(null);
|
||||
|
||||
Long result = clvmLockManager.getClvmLockHostId(VOLUME_ID, VOLUME_UUID);
|
||||
|
||||
Assert.assertNull(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetClvmLockHostId_InvalidNumber() {
|
||||
VolumeDetailVO detail = new VolumeDetailVO();
|
||||
detail.setValue("invalid");
|
||||
when(volsDetailsDao.findDetail(VOLUME_ID, VolumeInfo.CLVM_LOCK_HOST_ID)).thenReturn(detail);
|
||||
|
||||
Long result = clvmLockManager.getClvmLockHostId(VOLUME_ID, VOLUME_UUID);
|
||||
|
||||
Assert.assertNull(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetClvmLockHostId_NewDetail() {
|
||||
when(volsDetailsDao.findDetail(VOLUME_ID, VolumeInfo.CLVM_LOCK_HOST_ID)).thenReturn(null);
|
||||
|
||||
clvmLockManager.setClvmLockHostId(VOLUME_ID, HOST_ID_1);
|
||||
|
||||
verify(volsDetailsDao, times(1)).addDetail(eq(VOLUME_ID), eq(VolumeInfo.CLVM_LOCK_HOST_ID),
|
||||
eq(String.valueOf(HOST_ID_1)), eq(false));
|
||||
verify(volsDetailsDao, never()).update(anyLong(), any());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetClvmLockHostId_UpdateExisting() {
|
||||
VolumeDetailVO existingDetail = Mockito.mock(VolumeDetailVO.class);
|
||||
when(existingDetail.getId()).thenReturn(50L);
|
||||
when(volsDetailsDao.findDetail(VOLUME_ID, VolumeInfo.CLVM_LOCK_HOST_ID)).thenReturn(existingDetail);
|
||||
|
||||
clvmLockManager.setClvmLockHostId(VOLUME_ID, HOST_ID_2);
|
||||
|
||||
verify(existingDetail, times(1)).setValue(String.valueOf(HOST_ID_2));
|
||||
verify(volsDetailsDao, times(1)).update(eq(50L), eq(existingDetail));
|
||||
verify(volsDetailsDao, never()).addDetail(anyLong(), any(), any(), Mockito.anyBoolean());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClearClvmLockHostDetail_Success() {
|
||||
VolumeVO volume = Mockito.mock(VolumeVO.class);
|
||||
when(volume.getId()).thenReturn(VOLUME_ID);
|
||||
when(volume.getUuid()).thenReturn(VOLUME_UUID);
|
||||
|
||||
VolumeDetailVO detail = Mockito.mock(VolumeDetailVO.class);
|
||||
when(detail.getId()).thenReturn(99L);
|
||||
when(volsDetailsDao.findDetail(VOLUME_ID, VolumeInfo.CLVM_LOCK_HOST_ID)).thenReturn(detail);
|
||||
|
||||
clvmLockManager.clearClvmLockHostDetail(volume);
|
||||
|
||||
verify(volsDetailsDao, times(1)).remove(99L);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClearClvmLockHostDetail_NoDetail() {
|
||||
VolumeVO volume = Mockito.mock(VolumeVO.class);
|
||||
when(volume.getId()).thenReturn(VOLUME_ID);
|
||||
when(volsDetailsDao.findDetail(VOLUME_ID, VolumeInfo.CLVM_LOCK_HOST_ID)).thenReturn(null);
|
||||
|
||||
clvmLockManager.clearClvmLockHostDetail(volume);
|
||||
|
||||
verify(volsDetailsDao, never()).remove(anyLong());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTransferClvmVolumeLock_Success() throws AgentUnavailableException, OperationTimedoutException {
|
||||
StoragePoolVO pool = Mockito.mock(StoragePoolVO.class);
|
||||
when(pool.getPath()).thenReturn("/" + VG_NAME);
|
||||
|
||||
HostVO sourceHost = Mockito.mock(HostVO.class);
|
||||
when(sourceHost.getStatus()).thenReturn(Status.Up);
|
||||
when(hostDao.findById(HOST_ID_1)).thenReturn(sourceHost);
|
||||
|
||||
Answer deactivateAnswer = new Answer(null, true, null);
|
||||
Answer activateAnswer = new Answer(null, true, null);
|
||||
|
||||
when(agentMgr.send(eq(HOST_ID_1), any(ClvmLockTransferCommand.class))).thenReturn(deactivateAnswer);
|
||||
when(agentMgr.send(eq(HOST_ID_2), any(ClvmLockTransferCommand.class))).thenReturn(activateAnswer);
|
||||
|
||||
boolean result = clvmLockManager.transferClvmVolumeLock(VOLUME_UUID, VOLUME_ID,
|
||||
VOLUME_PATH, pool, HOST_ID_1, HOST_ID_2);
|
||||
|
||||
Assert.assertTrue(result);
|
||||
verify(agentMgr, times(2)).send(anyLong(), any(ClvmLockTransferCommand.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTransferClvmVolumeLock_NullPool() {
|
||||
boolean result = clvmLockManager.transferClvmVolumeLock(VOLUME_UUID, VOLUME_ID,
|
||||
VOLUME_PATH, null, HOST_ID_1, HOST_ID_2);
|
||||
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTransferClvmVolumeLock_SameHost() throws AgentUnavailableException, OperationTimedoutException {
|
||||
StoragePoolVO pool = Mockito.mock(StoragePoolVO.class);
|
||||
when(pool.getPath()).thenReturn("/" + VG_NAME);
|
||||
|
||||
Answer activateAnswer = new Answer(null, true, null);
|
||||
when(agentMgr.send(eq(HOST_ID_1), any(ClvmLockTransferCommand.class))).thenReturn(activateAnswer);
|
||||
|
||||
boolean result = clvmLockManager.transferClvmVolumeLock(VOLUME_UUID, VOLUME_ID,
|
||||
VOLUME_PATH, pool, HOST_ID_1, HOST_ID_1);
|
||||
|
||||
Assert.assertTrue(result);
|
||||
verify(agentMgr, times(1)).send(anyLong(), any(ClvmLockTransferCommand.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTransferClvmVolumeLock_ActivationFails() throws AgentUnavailableException, OperationTimedoutException {
|
||||
StoragePoolVO pool = Mockito.mock(StoragePoolVO.class);
|
||||
when(pool.getPath()).thenReturn(VG_NAME);
|
||||
|
||||
Answer activateAnswer = new Answer(null, false, "Activation failed");
|
||||
when(agentMgr.send(eq(HOST_ID_1), any(ClvmLockTransferCommand.class))).thenReturn(activateAnswer);
|
||||
|
||||
boolean result = clvmLockManager.transferClvmVolumeLock(VOLUME_UUID, VOLUME_ID,
|
||||
VOLUME_PATH, pool, HOST_ID_1, HOST_ID_1);
|
||||
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTransferClvmVolumeLock_AgentUnavailable() throws AgentUnavailableException, OperationTimedoutException {
|
||||
StoragePoolVO pool = Mockito.mock(StoragePoolVO.class);
|
||||
when(pool.getPath()).thenReturn(VG_NAME);
|
||||
|
||||
when(agentMgr.send(anyLong(), any(ClvmLockTransferCommand.class)))
|
||||
.thenThrow(new AgentUnavailableException("Agent unavailable", HOST_ID_2));
|
||||
|
||||
boolean result = clvmLockManager.transferClvmVolumeLock(VOLUME_UUID, VOLUME_ID,
|
||||
VOLUME_PATH, pool, HOST_ID_1, HOST_ID_2);
|
||||
|
||||
Assert.assertFalse(result);
|
||||
}
|
||||
}
|
||||
|
|
@ -226,6 +226,8 @@ public class VolumeApiServiceImplTest {
|
|||
ClusterDao clusterDao;
|
||||
@Mock
|
||||
VolumeOrchestrationService volumeOrchestrationService;
|
||||
@Mock
|
||||
ClvmLockManager clvmLockManager;
|
||||
|
||||
|
||||
private DetachVolumeCmd detachCmd = new DetachVolumeCmd();
|
||||
|
|
|
|||
|
|
@ -383,7 +383,7 @@
|
|||
<a-input v-model:value="form.radossecret" :placeholder="$t('label.rados.secret')" />
|
||||
</a-form-item>
|
||||
</div>
|
||||
<div v-if="form.protocol === 'CLVM'">
|
||||
<div v-if="form.protocol === 'CLVM' || form.protocol === 'CLVM_NG'">
|
||||
<a-form-item name="volumegroup" ref="volumegroup" :label="$t('label.volumegroup')">
|
||||
<a-input v-model:value="form.volumegroup" :placeholder="$t('label.volumegroup')" />
|
||||
</a-form-item>
|
||||
|
|
@ -607,7 +607,7 @@ export default {
|
|||
const cluster = this.clusters.find(cluster => cluster.id === this.form.cluster)
|
||||
this.hypervisorType = cluster.hypervisortype
|
||||
if (this.hypervisorType === 'KVM') {
|
||||
this.protocols = ['nfs', 'SharedMountPoint', 'RBD', 'CLVM', 'Gluster', 'Linstor', 'custom', 'FiberChannel']
|
||||
this.protocols = ['nfs', 'SharedMountPoint', 'RBD', 'CLVM', 'CLVM_NG', 'Gluster', 'Linstor', 'custom', 'FiberChannel']
|
||||
if (this.form.scope === 'host') {
|
||||
this.protocols.push('Filesystem')
|
||||
}
|
||||
|
|
@ -729,6 +729,15 @@ export default {
|
|||
}
|
||||
return url
|
||||
},
|
||||
clvmNgURL (vgname) {
|
||||
var url
|
||||
if (vgname.indexOf('://') === -1) {
|
||||
url = 'clvm_ng://localhost/' + vgname
|
||||
} else {
|
||||
url = vgname
|
||||
}
|
||||
return url
|
||||
},
|
||||
vmfsURL (server, path) {
|
||||
var url
|
||||
if (server.indexOf('://') === -1) {
|
||||
|
|
@ -853,6 +862,9 @@ export default {
|
|||
} else if (values.protocol === 'CLVM') {
|
||||
var vg = (values.volumegroup.substring(0, 1) !== '/') ? ('/' + values.volumegroup) : values.volumegroup
|
||||
url = this.clvmURL(vg)
|
||||
} else if (values.protocol === 'CLVM_NG') {
|
||||
var vg = (values.volumegroup.substring(0, 1) !== '/') ? ('/' + values.volumegroup) : values.volumegroup
|
||||
url = this.clvmNgURL(vg)
|
||||
} else if (values.protocol === 'RBD') {
|
||||
url = this.rbdURL(values.radosmonitor, values.radospool, values.radosuser, values.radossecret)
|
||||
if (values.datapool) {
|
||||
|
|
|
|||
Loading…
Reference in New Issue