fix revert snapshot format type and handle revert snapshot functionality for clvm

This commit is contained in:
Pearl Dsilva 2026-02-24 19:00:58 -05:00
parent 7b5af5e6ca
commit d8890ecacf
5 changed files with 119 additions and 5 deletions

View File

@ -258,6 +258,7 @@ import com.cloud.storage.Volume;
import com.cloud.storage.Volume.Type;
import com.cloud.storage.VolumeApiService;
import com.cloud.storage.VolumeApiServiceImpl;
import com.cloud.storage.VolumeDetailVO;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.GuestOSCategoryDao;
@ -266,6 +267,7 @@ import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VMTemplateZoneDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.dao.VolumeDetailsDao;
import com.cloud.storage.snapshot.SnapshotManager;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.user.Account;
@ -361,6 +363,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
@Inject
private VolumeDao _volsDao;
@Inject
private VolumeDetailsDao _volsDetailsDao;
@Inject
private HighAvailabilityManager _haMgr;
@Inject
private HostPodDao _podDao;
@ -3274,6 +3278,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
logger.warn("Exception during post-migration tasks for VM {} on destination host {}: {}. Migration completed but some cleanup may be needed.",
vm.getInstanceName(), dstHostId, e.getMessage(), e);
}
updateClvmLockHostForVmVolumes(vm.getId(), dstHostId);
} finally {
if (!migrated) {
logger.info("Migration was unsuccessful. Cleaning up: {}", vm);
@ -3359,6 +3365,37 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
_vmDao.persist(newVm);
}
/**
* Updates CLVM_LOCK_HOST_ID for all CLVM volumes attached to a VM after VM migration.
* This ensures that subsequent operations on CLVM volumes are routed to the correct host.
*
* @param vmId The ID of the VM that was migrated
* @param destHostId The destination host ID where the VM now resides
*/
private void updateClvmLockHostForVmVolumes(long vmId, long destHostId) {
List<VolumeVO> volumes = _volsDao.findByInstance(vmId);
if (volumes == null || volumes.isEmpty()) {
return;
}
for (VolumeVO volume : volumes) {
StoragePoolVO pool = _storagePoolDao.findById(volume.getPoolId());
if (pool != null && pool.getPoolType() == Storage.StoragePoolType.CLVM) {
VolumeDetailVO existingDetail = _volsDetailsDao.findDetail(volume.getId(), VolumeInfo.CLVM_LOCK_HOST_ID);
if (existingDetail != null) {
existingDetail.setValue(String.valueOf(destHostId));
_volsDetailsDao.update(existingDetail.getId(), existingDetail);
logger.debug("Updated CLVM_LOCK_HOST_ID for volume {} to host {} after VM {} migration",
volume.getUuid(), destHostId, vmId);
} else {
_volsDetailsDao.addDetail(volume.getId(), VolumeInfo.CLVM_LOCK_HOST_ID, String.valueOf(destHostId), false);
logger.debug("Created CLVM_LOCK_HOST_ID for volume {} with host {} after VM {} migration",
volume.getUuid(), destHostId, vmId);
}
}
}
}
/**
* We create the mapping of volumes and storage pool to migrate the VMs according to the information sent by the user.
* If the user did not enter a complete mapping, the volumes that were left behind will be auto mapped using {@link #createStoragePoolMappingsForVolumes(VirtualMachineProfile, DataCenterDeployment, Map, List)}

View File

@ -819,6 +819,39 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
}
}
/**
* Updates the CLVM_LOCK_HOST_ID for a migrated volume if applicable.
* For CLVM volumes that are attached to a VM, this updates the lock host tracking
* to point to the VM's current host after volume migration.
*
* @param migratedVolume The volume that was migrated
* @param destPool The destination storage pool
* @param operationType Description of the operation (e.g., "migrated", "live-migrated") for logging
*/
private void updateClvmLockHostAfterMigration(Volume migratedVolume, StoragePool destPool, String operationType) {
if (migratedVolume == null || destPool == null) {
return;
}
StoragePoolVO pool = _storagePoolDao.findById(destPool.getId());
if (pool == null || pool.getPoolType() != Storage.StoragePoolType.CLVM) {
return;
}
if (migratedVolume.getInstanceId() == null) {
return;
}
VMInstanceVO vm = vmInstanceDao.findById(migratedVolume.getInstanceId());
if (vm == null || vm.getHostId() == null) {
return;
}
setClvmLockHostId(migratedVolume.getId(), vm.getHostId());
logger.debug("Updated CLVM_LOCK_HOST_ID for {} volume {} to host {} where VM {} is running",
operationType, migratedVolume.getUuid(), vm.getHostId(), vm.getInstanceName());
}
public String getRandomVolumeName() {
return UUID.randomUUID().toString();
}
@ -1485,6 +1518,9 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
_snapshotDao.updateVolumeIds(vol.getId(), result.getVolume().getId());
_snapshotDataStoreDao.updateVolumeIds(vol.getId(), result.getVolume().getId());
}
// For CLVM volumes attached to a VM, update the CLVM_LOCK_HOST_ID after migration
updateClvmLockHostAfterMigration(result.getVolume(), destPool, "migrated");
}
return result.getVolume();
} catch (InterruptedException | ExecutionException e) {
@ -1510,6 +1546,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
logger.error("Volume [{}] migration failed due to [{}].", volToString, result.getResult());
return null;
}
// For CLVM volumes attached to a VM, update the CLVM_LOCK_HOST_ID after live migration
updateClvmLockHostAfterMigration(result.getVolume(), destPool, "live-migrated");
return result.getVolume();
} catch (InterruptedException | ExecutionException e) {
logger.error("Volume [{}] migration failed due to [{}].", volToString, e.getMessage());
@ -1552,6 +1592,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
logger.error(msg);
throw new CloudRuntimeException(msg);
}
for (Map.Entry<Volume, StoragePool> entry : volumeToPool.entrySet()) {
Volume volume = entry.getKey();
StoragePool destPool = entry.getValue();
updateClvmLockHostAfterMigration(volume, destPool, "vm-migrated");
}
} catch (InterruptedException | ExecutionException e) {
logger.error("Failed to migrate VM [{}] along with its volumes due to [{}].", vm, e.getMessage());
logger.debug("Exception: ", e);

View File

@ -470,6 +470,26 @@ public class DefaultEndPointSelector implements EndPointSelector {
@Override
public EndPoint select(DataObject object) {
DataStore store = object.getDataStore();
// For CLVM volumes, check if there's a lock host ID to route to
if (object instanceof VolumeInfo && store.getRole() == DataStoreRole.Primary) {
VolumeInfo volume = (VolumeInfo) object;
StoragePoolVO pool = _storagePoolDao.findById(store.getId());
if (pool != null && pool.getPoolType() == Storage.StoragePoolType.CLVM) {
Long lockHostId = getClvmLockHostId(volume);
if (lockHostId != null) {
logger.debug("Routing CLVM volume {} operation to lock holder host {}",
volume.getUuid(), lockHostId);
EndPoint ep = getEndPointFromHostId(lockHostId);
if (ep != null) {
return ep;
}
logger.warn("Could not get endpoint for CLVM lock host {}, falling back to default selection",
lockHostId);
}
}
}
EndPoint ep = select(store);
if (ep != null) {
return ep;

View File

@ -422,12 +422,19 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri
CommandResult result = new CommandResult();
try {
EndPoint ep = null;
if (snapshotOnPrimaryStore != null) {
ep = epSelector.select(snapshotOnPrimaryStore);
} else {
VolumeInfo volumeInfo = volFactory.getVolume(snapshot.getVolumeId(), DataStoreRole.Primary);
VolumeInfo volumeInfo = volFactory.getVolume(snapshot.getVolumeId(), DataStoreRole.Primary);
StoragePoolVO storagePool = primaryStoreDao.findById(volumeInfo.getPoolId());
if (storagePool != null && storagePool.getPoolType() == StoragePoolType.CLVM) {
ep = epSelector.select(volumeInfo);
} else {
if (snapshotOnPrimaryStore != null) {
ep = epSelector.select(snapshotOnPrimaryStore);
} else {
ep = epSelector.select(volumeInfo);
}
}
if ( ep == null ){
String errMsg = "No remote endpoint to send RevertSnapshotCommand, check if host or ssvm is down?";
logger.error(errMsg);

View File

@ -307,7 +307,12 @@ backup_snapshot() {
revert_snapshot() {
local snapshotPath=$1
local destPath=$2
${qemu_img} convert -f qcow2 -O qcow2 "$snapshotPath" "$destPath" || \
local output_format="qcow2"
if [ -b "$destPath" ]; then
output_format="raw"
fi
${qemu_img} convert -f qcow2 -O ${output_format} "$snapshotPath" "$destPath" || \
( printf "${qemu_img} failed to revert snapshot ${snapshotPath} to disk ${destPath}.\n" >&2; return 2 )
return 0
}