diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index f9238fa0e71..86f45630611 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -3053,7 +3053,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } protected void migrate(final VMInstanceVO vm, final long srcHostId, final DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException { - logger.info("Migrating {} to {}", vm, dest); + logger.info("Start preparing migration of the VM: {} to {}", vm, dest); final long dstHostId = dest.getHost().getId(); final Host fromHost = _hostDao.findById(srcHostId); if (fromHost == null) { @@ -3118,9 +3118,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (pfma == null || !pfma.getResult()) { final String details = pfma != null ? pfma.getDetails() : "null answer returned"; final String msg = "Unable to prepare for migration due to " + details; + logger.error("Failed to prepare destination host {} for migration of VM {} : {}", dstHostId, vm.getInstanceName(), details); pfma = null; throw new AgentUnavailableException(msg, dstHostId); } + logger.debug("Successfully prepared destination host {} for migration of VM {} ", dstHostId, vm.getInstanceName()); } catch (final OperationTimedoutException e1) { throw new AgentUnavailableException("Operation timed out", dstHostId); } finally { @@ -3141,18 +3143,23 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac volumeMgr.release(vm.getId(), dstHostId); } - logger.info("Migration cancelled because state has changed: {}", vm); - throw new ConcurrentOperationException("Migration cancelled because state has changed: " + vm); + String msg = "Migration cancelled because state has changed: " + vm; + logger.warn(msg); + throw new ConcurrentOperationException(msg); } } catch (final NoTransitionException e1) { _networkMgr.rollbackNicForMigration(vmSrc, profile); volumeMgr.release(vm.getId(), dstHostId); - logger.info("Migration cancelled because {}", e1.getMessage()); + String msg = String.format("Migration cancelled for VM %s due to state transition failure: %s", + vm.getInstanceName(), e1.getMessage()); + logger.warn(msg, e1); throw new ConcurrentOperationException("Migration cancelled because " + e1.getMessage()); } catch (final CloudRuntimeException e2) { _networkMgr.rollbackNicForMigration(vmSrc, profile); volumeMgr.release(vm.getId(), dstHostId); - logger.info("Migration cancelled because {}", e2.getMessage()); + String msg = String.format("Migration cancelled for VM %s due to runtime exception: %s", + vm.getInstanceName(), e2.getMessage()); + logger.error(msg, e2); work.setStep(Step.Done); _workDao.update(work.getId(), work); try { @@ -3172,8 +3179,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac final Answer ma = _agentMgr.send(vm.getLastHostId(), mc); if (ma == null || !ma.getResult()) { final String details = ma != null ? ma.getDetails() : "null answer returned"; + String msg = String.format("Migration command failed for VM %s on source host id=%s to destination host %s: %s", + vm.getInstanceName(), vm.getLastHostId(), dstHostId, details); + logger.error(msg); throw new CloudRuntimeException(details); } + logger.info("Migration command successful for VM {}", vm.getInstanceName()); } catch (final OperationTimedoutException e) { boolean success = false; if (HypervisorType.KVM.equals(vm.getHypervisorType())) { @@ -3210,7 +3221,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { if (!checkVmOnHost(vm, dstHostId)) { - logger.error("Unable to complete migration for {}", vm); + logger.error("Migration verification failed for VM {} : VM not found on destination host {} ", vm.getInstanceName(), dstHostId); try { _agentMgr.send(srcHostId, new Commands(cleanup(vm, dpdkInterfaceMapping)), null); } catch (final AgentUnavailableException e) { @@ -3225,7 +3236,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac migrated = true; } finally { if (!migrated) { - logger.info("Migration was unsuccessful. Cleaning up: {}", vm); + logger.info("Migration was unsuccessful. Cleaning up: {}", vm); _networkMgr.rollbackNicForMigration(vmSrc, profile); volumeMgr.release(vm.getId(), dstHostId); // deallocate GPU devices for the VM on the destination host @@ -3237,7 +3248,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { _agentMgr.send(dstHostId, new Commands(cleanup(vm, dpdkInterfaceMapping)), null); } catch (final AgentUnavailableException ae) { - logger.warn("Looks like the destination Host is unavailable for cleanup", ae); + logger.warn("Destination host {} unavailable for cleanup after failed migration of VM {}", dstHostId, vm.getInstanceName(), ae); } _networkMgr.setHypervisorHostname(profile, dest, false); try { @@ -3246,6 +3257,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac logger.warn(e.getMessage()); } } else { + logger.info("Migration completed successfully for VM %s" + vm); _networkMgr.commitNicForMigration(vmSrc, profile); volumeMgr.release(vm.getId(), srcHostId); // deallocate GPU devices for the VM on the src host after migration is complete @@ -3276,6 +3288,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac migrateCommand.setVlanToPersistenceMap(vlanToPersistenceMap); } + logger.debug("Setting auto convergence to: {}", StorageManager.KvmAutoConvergence.value()); migrateCommand.setAutoConvergence(StorageManager.KvmAutoConvergence.value()); migrateCommand.setHostGuid(destination.getHost().getGuid()); diff --git a/plugins/hypervisors/kvm/pom.xml b/plugins/hypervisors/kvm/pom.xml index b9256e5f617..255ada09ef4 100644 --- a/plugins/hypervisors/kvm/pom.xml +++ b/plugins/hypervisors/kvm/pom.xml @@ -67,6 +67,31 @@ java-linstor ${cs.java-linstor.version} + + com.fasterxml.jackson.core + jackson-core + ${cs.jackson.version} + + + com.fasterxml.jackson.core + jackson-annotations + ${cs.jackson.version} + + + com.fasterxml.jackson.core + jackson-databind + ${cs.jackson.version} + + + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 + ${cs.jackson.version} + + + com.fasterxml.jackson.module + jackson-module-jaxb-annotations + ${cs.jackson.version} + net.java.dev.jna jna diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java index 859de5143f9..81328d6ffb9 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java @@ -278,17 +278,20 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper 0 && sleeptime > migrateWait * 1000) { DomainState state = null; try { state = dm.getInfo().state; + logger.info("VM domain state when trying to abort migration : {}", state); } catch (final LibvirtException e) { logger.info("Couldn't get VM domain state after " + sleeptime + "ms: " + e.getMessage()); } if (state != null && state == DomainState.VIR_DOMAIN_RUNNING) { try { DomainJobInfo job = dm.getJobInfo(); - logger.info(String.format("Aborting migration of VM [%s] with domain job [%s] due to time out after %d seconds.", vmName, job, migrateWait)); + logger.warn("Aborting migration of VM {} with domain job [{}] due to timeout after {} seconds. " + + "Job stats: data processed={} bytes, data remaining={} bytes", vmName, job, migrateWait, job.getDataProcessed(), job.getDataRemaining()); dm.abortJob(); result = String.format("Migration of VM [%s] was cancelled by CloudStack due to time out after %d seconds.", vmName, migrateWait); commandState = Command.State.FAILED; @@ -303,10 +306,12 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper 0 && sleeptime > migratePauseAfter) { DomainState state = null; try { state = dm.getInfo().state; + logger.info("VM domain state when trying to pause VM for migration: {}", state); } catch (final LibvirtException e) { logger.info("Couldn't get VM domain state after " + sleeptime + "ms: " + e.getMessage()); } @@ -381,6 +386,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper optEP = getDiskfullEP(linstorApi, rscName); + final StoragePool pool = (StoragePool) volumeInfo.getDataStore(); + Optional optEP = getDiskfullEP(linstorApi, pool, rscName); if (optEP.isEmpty()) { - optEP = getLinstorEP(linstorApi, rscName); + optEP = getLinstorEP(linstorApi, pool, rscName); } if (optEP.isPresent()) { @@ -1064,13 +1067,29 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver Answer answer = copyVolume(srcData, dstData); res = new CopyCommandResult(null, answer); } else { - Answer answer = new Answer(null, false, "noimpl"); - res = new CopyCommandResult(null, answer); - res.setResult("Not implemented yet"); + throw new CloudRuntimeException("Not implemented for Linstor primary storage."); } callback.complete(res); } + private Host getEnabledClusterHost(StoragePool storagePool, List linstorNodeNames) { + List csHosts; + if (storagePool.getClusterId() != null) { + csHosts = _hostDao.findByClusterId(storagePool.getClusterId()); + } else { + csHosts = _hostDao.findByDataCenterId(storagePool.getDataCenterId()); + } + Collections.shuffle(csHosts); // so we do not always pick the same host for operations + for (HostVO host : csHosts) { + if (host.getResourceState() == ResourceState.Enabled && + host.getStatus() == Status.Up && + linstorNodeNames.contains(host.getName())) { + return host; + } + } + return null; + } + /** * Tries to get a Linstor cloudstack end point, that is at least diskless. * @@ -1079,47 +1098,37 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver * @return Optional RemoteHostEndPoint if one could get found. * @throws ApiException */ - private Optional getLinstorEP(DevelopersApi api, String rscName) throws ApiException { + private Optional getLinstorEP(DevelopersApi api, StoragePool storagePool, String rscName) + throws ApiException { List linstorNodeNames = LinstorUtil.getLinstorNodeNames(api); - Collections.shuffle(linstorNodeNames); // do not always pick the first linstor node - - Host host = null; - for (String nodeName : linstorNodeNames) { - host = _hostDao.findByName(nodeName); - if (host != null && host.getResourceState() == ResourceState.Enabled) { - logger.info(String.format("Linstor: Make resource %s available on node %s ...", rscName, nodeName)); - ApiCallRcList answers = api.resourceMakeAvailableOnNode(rscName, nodeName, new ResourceMakeAvailable()); - if (!answers.hasError()) { - break; // found working host - } else { - logger.error( - String.format("Linstor: Unable to make resource %s on node %s available: %s", - rscName, - nodeName, - LinstorUtil.getBestErrorMessage(answers))); - } + Host host = getEnabledClusterHost(storagePool, linstorNodeNames); + if (host != null) { + logger.info("Linstor: Make resource {} available on node {} ...", rscName, host.getName()); + ApiCallRcList answers = api.resourceMakeAvailableOnNode( + rscName, host.getName(), new ResourceMakeAvailable()); + if (answers.hasError()) { + logger.error("Linstor: Unable to make resource {} on node {} available: {}", + rscName, host.getName(), LinstorUtil.getBestErrorMessage(answers)); + return Optional.empty(); + } else { + return Optional.of(RemoteHostEndPoint.getHypervisorHostEndPoint(host)); } } - if (host == null) - { - logger.error("Linstor: Couldn't create a resource on any cloudstack host."); - return Optional.empty(); - } - else - { - return Optional.of(RemoteHostEndPoint.getHypervisorHostEndPoint(host)); - } + logger.error("Linstor: Couldn't create a resource on any cloudstack host."); + return Optional.empty(); } - private Optional getDiskfullEP(DevelopersApi api, String rscName) throws ApiException { + private Optional getDiskfullEP(DevelopersApi api, StoragePool storagePool, String rscName) + throws ApiException { List linSPs = LinstorUtil.getDiskfulStoragePools(api, rscName); if (linSPs != null) { - for (com.linbit.linstor.api.model.StoragePool sp : linSPs) { - Host host = _hostDao.findByName(sp.getNodeName()); - if (host != null && host.getResourceState() == ResourceState.Enabled) { - return Optional.of(RemoteHostEndPoint.getHypervisorHostEndPoint(host)); - } + List linstorNodeNames = linSPs.stream() + .map(com.linbit.linstor.api.model.StoragePool::getNodeName) + .collect(Collectors.toList()); + Host host = getEnabledClusterHost(storagePool, linstorNodeNames); + if (host != null) { + return Optional.of(RemoteHostEndPoint.getHypervisorHostEndPoint(host)); } } logger.error("Linstor: No diskfull host found."); @@ -1200,12 +1209,12 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver VirtualMachineManager.ExecuteInSequence.value()); try { - Optional optEP = getLinstorEP(api, rscName); + Optional optEP = getLinstorEP(api, pool, rscName); if (optEP.isPresent()) { answer = optEP.get().sendMessage(cmd); } else { - answer = new Answer(cmd, false, "Unable to get matching Linstor endpoint."); deleteResourceDefinition(pool, rscName); + throw new CloudRuntimeException("Unable to get matching Linstor endpoint."); } } catch (ApiException exc) { logger.error("copy template failed: ", exc); @@ -1242,12 +1251,12 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver Answer answer; try { - Optional optEP = getLinstorEP(api, rscName); + Optional optEP = getLinstorEP(api, pool, rscName); if (optEP.isPresent()) { answer = optEP.get().sendMessage(cmd); } else { - answer = new Answer(cmd, false, "Unable to get matching Linstor endpoint."); + throw new CloudRuntimeException("Unable to get matching Linstor endpoint."); } } catch (ApiException exc) { logger.error("copy volume failed: ", exc); @@ -1280,14 +1289,14 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver try { String devName = restoreResourceFromSnapshot(api, pool, rscName, snapshotName, restoreName); - Optional optEPAny = getLinstorEP(api, restoreName); + Optional optEPAny = getLinstorEP(api, pool, restoreName); if (optEPAny.isPresent()) { // patch the src device path to the temporary linstor resource snapshotObject.setPath(devName); origCmd.setSrcTO(snapshotObject.getTO()); answer = optEPAny.get().sendMessage(origCmd); - } else{ - answer = new Answer(origCmd, false, "Unable to get matching Linstor endpoint."); + } else { + throw new CloudRuntimeException("Unable to get matching Linstor endpoint."); } } finally { // delete the temporary resource, noop if already gone @@ -1349,7 +1358,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver VirtualMachineManager.ExecuteInSequence.value()); cmd.setOptions(options); - Optional optEP = getDiskfullEP(api, rscName); + Optional optEP = getDiskfullEP(api, pool, rscName); Answer answer; if (optEP.isPresent()) { answer = optEP.get().sendMessage(cmd); diff --git a/pom.xml b/pom.xml index 3c7deba7ebc..33b232045ca 100644 --- a/pom.xml +++ b/pom.xml @@ -188,6 +188,7 @@ 5.3.26 0.5.4 3.1.7 + 3.25.5 @@ -730,6 +731,17 @@ xml-apis 2.0.2 + + + com.google.protobuf + protobuf-java + ${cs.protobuf.version} + + + com.google.protobuf + protobuf-java-util + ${cs.protobuf.version} + com.linbit.linstor.api java-linstor diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 1ae609c7961..17a893c4400 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -7189,6 +7189,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new CloudRuntimeException("Unable to find suitable destination to migrate VM " + vm.getInstanceName()); } + logger.info("Starting migration of VM {} from host {} to host {} ", vm.getInstanceName(), srcHostId, dest.getHost().getId()); collectVmDiskAndNetworkStatistics(vmId, State.Running); _itMgr.migrate(vm.getUuid(), srcHostId, dest); return findMigratedVm(vm.getId(), vm.getType()); @@ -7260,6 +7261,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir private DeployDestination checkVmMigrationDestination(VMInstanceVO vm, Host srcHost, Host destinationHost) throws VirtualMachineMigrationException { if (destinationHost == null) { + logger.error("Destination host is null for migration of VM: {}", vm.getInstanceName()); return null; } if (destinationHost.getId() == srcHost.getId()) { diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index c40682cce76..86df45411e9 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -406,6 +406,7 @@ "label.app.name": "CloudStack", "label.application.policy.set": "Application Policy Set", "label.apply": "Apply", +"label.apply.to.all": "Apply to all", "label.apply.tungsten.firewall.policy": "Apply Firewall Policy", "label.apply.tungsten.network.policy": "Apply Network Policy", "label.apply.tungsten.tag": "Apply tag", @@ -4049,6 +4050,7 @@ "message.vnf.no.credentials": "No credentials found for the VNF appliance.", "message.vnf.select.networks": "Please select the relevant network for each VNF NIC.", "message.volume.desc": "Volume to use as a ROOT disk", +"message.volume.pool.apply.to.all": "Selected storage pool will be applied to all existing volumes of the instance.", "message.volume.state.allocated": "The volume is allocated but has not been created yet.", "message.volume.state.attaching": "The volume is attaching to a volume from Ready state.", "message.volume.state.copying": "The volume is being copied from the image store to primary storage, in case it's an uploaded volume.", diff --git a/ui/src/components/view/InfoCard.vue b/ui/src/components/view/InfoCard.vue index 3b84def952e..0031d730f56 100644 --- a/ui/src/components/view/InfoCard.vue +++ b/ui/src/components/view/InfoCard.vue @@ -709,7 +709,7 @@
{{ $t('label.storagepool') }}
- {{ resource.storage || resource.storageid }} + {{ resource.storage || resource.storageid }} {{ resource.storage || resource.storageid }} {{ resource.storagetype }} diff --git a/ui/src/components/view/InstanceVolumesStoragePoolSelectListView.vue b/ui/src/components/view/InstanceVolumesStoragePoolSelectListView.vue index 5319e39334b..b5663402a93 100644 --- a/ui/src/components/view/InstanceVolumesStoragePoolSelectListView.vue +++ b/ui/src/components/view/InstanceVolumesStoragePoolSelectListView.vue @@ -206,13 +206,19 @@ export default { closeVolumeStoragePoolSelector () { this.selectedVolumeForStoragePoolSelection = {} }, - handleVolumeStoragePoolSelection (volumeId, storagePool) { + handleVolumeStoragePoolSelection (volumeId, storagePool, applyToAll) { for (const volume of this.volumes) { - if (volume.id === volumeId) { + if (applyToAll) { volume.selectedstorageid = storagePool.id volume.selectedstoragename = storagePool.name volume.selectedstorageclusterid = storagePool.clusterid - break + } else { + if (volume.id === volumeId) { + volume.selectedstorageid = storagePool.id + volume.selectedstoragename = storagePool.name + volume.selectedstorageclusterid = storagePool.clusterid + break + } } } this.updateVolumeToStoragePoolSelection() diff --git a/ui/src/components/view/ListView.vue b/ui/src/components/view/ListView.vue index 6445bdf61cf..ba75465d892 100644 --- a/ui/src/components/view/ListView.vue +++ b/ui/src/components/view/ListView.vue @@ -161,17 +161,9 @@ >{{ $t(text.toLowerCase()) }} - {{ text }} - {{ text }} - + {{ text }} + {{ text }} +   @@ -607,10 +599,7 @@ {{ text }}