mirror of https://github.com/apache/cloudstack.git
Add lock mechanism considering template id, pool id, host id in PowerFlex Storage (#8233)
Observed a failure to start new virtual machine with PowerFlex storage. Traced it to concurrent VM starts using the same template and the same host to copy. Second mapping attempt failed. While creating the volume clone from the seeded template in primary storage, adding a lock with the string containing IDs of template, storage pool and destination host avoids the situation of concurrent mapping attempts with the same host.
This commit is contained in:
parent
7ea068c4dc
commit
7eb36367c9
|
|
@ -1478,8 +1478,8 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future);
|
||||
} else {
|
||||
// We have a template on PowerFlex primary storage. Create new volume and copy to it.
|
||||
s_logger.debug("Copying the template to the volume on primary storage");
|
||||
createManagedVolumeCopyManagedTemplateAsync(volumeInfo, destPrimaryDataStore, templateOnPrimary, destHost, future);
|
||||
createManagedVolumeCopyManagedTemplateAsyncWithLock(volumeInfo, destPrimaryDataStore, templateOnPrimary,
|
||||
destHost, future, destDataStoreId, srcTemplateInfo.getId());
|
||||
}
|
||||
} else {
|
||||
s_logger.debug("Primary storage does not support cloning or no support for UUID resigning on the host side; copying the template normally");
|
||||
|
|
@ -1490,6 +1490,32 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
return future;
|
||||
}
|
||||
|
||||
private void createManagedVolumeCopyManagedTemplateAsyncWithLock(VolumeInfo volumeInfo, PrimaryDataStore destPrimaryDataStore, TemplateInfo templateOnPrimary,
|
||||
Host destHost, AsyncCallFuture<VolumeApiResult> future, long destDataStoreId, long srcTemplateId) {
|
||||
GlobalLock lock = null;
|
||||
try {
|
||||
String tmplIdManagedPoolIdDestinationHostLockString = "tmplId:" + srcTemplateId + "managedPoolId:" + destDataStoreId + "destinationHostId:" + destHost.getId();
|
||||
lock = GlobalLock.getInternLock(tmplIdManagedPoolIdDestinationHostLockString);
|
||||
if (lock == null) {
|
||||
throw new CloudRuntimeException("Unable to create volume from template, couldn't get global lock on " + tmplIdManagedPoolIdDestinationHostLockString);
|
||||
}
|
||||
|
||||
int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600);
|
||||
if (!lock.lock(storagePoolMaxWaitSeconds)) {
|
||||
s_logger.debug("Unable to create volume from template, couldn't lock on " + tmplIdManagedPoolIdDestinationHostLockString);
|
||||
throw new CloudRuntimeException("Unable to create volume from template, couldn't lock on " + tmplIdManagedPoolIdDestinationHostLockString);
|
||||
}
|
||||
|
||||
s_logger.debug("Copying the template to the volume on primary storage");
|
||||
createManagedVolumeCopyManagedTemplateAsync(volumeInfo, destPrimaryDataStore, templateOnPrimary, destHost, future);
|
||||
} finally {
|
||||
if (lock != null) {
|
||||
lock.unlock();
|
||||
lock.releaseRef();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private boolean computeSupportsVolumeClone(long zoneId, HypervisorType hypervisorType) {
|
||||
if (HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType)) {
|
||||
return true;
|
||||
|
|
|
|||
Loading…
Reference in New Issue