mirror of https://github.com/apache/cloudstack.git
Merge remote-tracking branch 'apache/4.22'
This commit is contained in:
commit
538578366a
|
|
@ -3053,7 +3053,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
}
|
||||
|
||||
protected void migrate(final VMInstanceVO vm, final long srcHostId, final DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException {
|
||||
logger.info("Migrating {} to {}", vm, dest);
|
||||
logger.info("Start preparing migration of the VM: {} to {}", vm, dest);
|
||||
final long dstHostId = dest.getHost().getId();
|
||||
final Host fromHost = _hostDao.findById(srcHostId);
|
||||
if (fromHost == null) {
|
||||
|
|
@ -3118,9 +3118,11 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
if (pfma == null || !pfma.getResult()) {
|
||||
final String details = pfma != null ? pfma.getDetails() : "null answer returned";
|
||||
final String msg = "Unable to prepare for migration due to " + details;
|
||||
logger.error("Failed to prepare destination host {} for migration of VM {} : {}", dstHostId, vm.getInstanceName(), details);
|
||||
pfma = null;
|
||||
throw new AgentUnavailableException(msg, dstHostId);
|
||||
}
|
||||
logger.debug("Successfully prepared destination host {} for migration of VM {} ", dstHostId, vm.getInstanceName());
|
||||
} catch (final OperationTimedoutException e1) {
|
||||
throw new AgentUnavailableException("Operation timed out", dstHostId);
|
||||
} finally {
|
||||
|
|
@ -3141,18 +3143,23 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
volumeMgr.release(vm.getId(), dstHostId);
|
||||
}
|
||||
|
||||
logger.info("Migration cancelled because state has changed: {}", vm);
|
||||
throw new ConcurrentOperationException("Migration cancelled because state has changed: " + vm);
|
||||
String msg = "Migration cancelled because state has changed: " + vm;
|
||||
logger.warn(msg);
|
||||
throw new ConcurrentOperationException(msg);
|
||||
}
|
||||
} catch (final NoTransitionException e1) {
|
||||
_networkMgr.rollbackNicForMigration(vmSrc, profile);
|
||||
volumeMgr.release(vm.getId(), dstHostId);
|
||||
logger.info("Migration cancelled because {}", e1.getMessage());
|
||||
String msg = String.format("Migration cancelled for VM %s due to state transition failure: %s",
|
||||
vm.getInstanceName(), e1.getMessage());
|
||||
logger.warn(msg, e1);
|
||||
throw new ConcurrentOperationException("Migration cancelled because " + e1.getMessage());
|
||||
} catch (final CloudRuntimeException e2) {
|
||||
_networkMgr.rollbackNicForMigration(vmSrc, profile);
|
||||
volumeMgr.release(vm.getId(), dstHostId);
|
||||
logger.info("Migration cancelled because {}", e2.getMessage());
|
||||
String msg = String.format("Migration cancelled for VM %s due to runtime exception: %s",
|
||||
vm.getInstanceName(), e2.getMessage());
|
||||
logger.error(msg, e2);
|
||||
work.setStep(Step.Done);
|
||||
_workDao.update(work.getId(), work);
|
||||
try {
|
||||
|
|
@ -3172,8 +3179,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
final Answer ma = _agentMgr.send(vm.getLastHostId(), mc);
|
||||
if (ma == null || !ma.getResult()) {
|
||||
final String details = ma != null ? ma.getDetails() : "null answer returned";
|
||||
String msg = String.format("Migration command failed for VM %s on source host id=%s to destination host %s: %s",
|
||||
vm.getInstanceName(), vm.getLastHostId(), dstHostId, details);
|
||||
logger.error(msg);
|
||||
throw new CloudRuntimeException(details);
|
||||
}
|
||||
logger.info("Migration command successful for VM {}", vm.getInstanceName());
|
||||
} catch (final OperationTimedoutException e) {
|
||||
boolean success = false;
|
||||
if (HypervisorType.KVM.equals(vm.getHypervisorType())) {
|
||||
|
|
@ -3210,7 +3221,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
|
||||
try {
|
||||
if (!checkVmOnHost(vm, dstHostId)) {
|
||||
logger.error("Unable to complete migration for {}", vm);
|
||||
logger.error("Migration verification failed for VM {} : VM not found on destination host {} ", vm.getInstanceName(), dstHostId);
|
||||
try {
|
||||
_agentMgr.send(srcHostId, new Commands(cleanup(vm, dpdkInterfaceMapping)), null);
|
||||
} catch (final AgentUnavailableException e) {
|
||||
|
|
@ -3237,7 +3248,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
try {
|
||||
_agentMgr.send(dstHostId, new Commands(cleanup(vm, dpdkInterfaceMapping)), null);
|
||||
} catch (final AgentUnavailableException ae) {
|
||||
logger.warn("Looks like the destination Host is unavailable for cleanup", ae);
|
||||
logger.warn("Destination host {} unavailable for cleanup after failed migration of VM {}", dstHostId, vm.getInstanceName(), ae);
|
||||
}
|
||||
_networkMgr.setHypervisorHostname(profile, dest, false);
|
||||
try {
|
||||
|
|
@ -3246,6 +3257,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
logger.warn(e.getMessage());
|
||||
}
|
||||
} else {
|
||||
logger.info("Migration completed successfully for VM %s" + vm);
|
||||
_networkMgr.commitNicForMigration(vmSrc, profile);
|
||||
volumeMgr.release(vm.getId(), srcHostId);
|
||||
// deallocate GPU devices for the VM on the src host after migration is complete
|
||||
|
|
@ -3276,6 +3288,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
|
|||
migrateCommand.setVlanToPersistenceMap(vlanToPersistenceMap);
|
||||
}
|
||||
|
||||
logger.debug("Setting auto convergence to: {}", StorageManager.KvmAutoConvergence.value());
|
||||
migrateCommand.setAutoConvergence(StorageManager.KvmAutoConvergence.value());
|
||||
migrateCommand.setHostGuid(destination.getHost().getGuid());
|
||||
|
||||
|
|
|
|||
|
|
@ -67,6 +67,31 @@
|
|||
<artifactId>java-linstor</artifactId>
|
||||
<version>${cs.java-linstor.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-core</artifactId>
|
||||
<version>${cs.jackson.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-annotations</artifactId>
|
||||
<version>${cs.jackson.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-databind</artifactId>
|
||||
<version>${cs.jackson.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.datatype</groupId>
|
||||
<artifactId>jackson-datatype-jsr310</artifactId>
|
||||
<version>${cs.jackson.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.module</groupId>
|
||||
<artifactId>jackson-module-jaxb-annotations</artifactId>
|
||||
<version>${cs.jackson.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.java.dev.jna</groupId>
|
||||
<artifactId>jna</artifactId>
|
||||
|
|
|
|||
|
|
@ -278,17 +278,20 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
|
||||
// abort the vm migration if the job is executed more than vm.migrate.wait
|
||||
final int migrateWait = libvirtComputingResource.getMigrateWait();
|
||||
logger.info("vm.migrate.wait value set to: {}for VM: {}", migrateWait, vmName);
|
||||
if (migrateWait > 0 && sleeptime > migrateWait * 1000) {
|
||||
DomainState state = null;
|
||||
try {
|
||||
state = dm.getInfo().state;
|
||||
logger.info("VM domain state when trying to abort migration : {}", state);
|
||||
} catch (final LibvirtException e) {
|
||||
logger.info("Couldn't get VM domain state after " + sleeptime + "ms: " + e.getMessage());
|
||||
}
|
||||
if (state != null && state == DomainState.VIR_DOMAIN_RUNNING) {
|
||||
try {
|
||||
DomainJobInfo job = dm.getJobInfo();
|
||||
logger.info(String.format("Aborting migration of VM [%s] with domain job [%s] due to time out after %d seconds.", vmName, job, migrateWait));
|
||||
logger.warn("Aborting migration of VM {} with domain job [{}] due to timeout after {} seconds. " +
|
||||
"Job stats: data processed={} bytes, data remaining={} bytes", vmName, job, migrateWait, job.getDataProcessed(), job.getDataRemaining());
|
||||
dm.abortJob();
|
||||
result = String.format("Migration of VM [%s] was cancelled by CloudStack due to time out after %d seconds.", vmName, migrateWait);
|
||||
commandState = Command.State.FAILED;
|
||||
|
|
@ -303,10 +306,12 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
|
||||
// pause vm if we meet the vm.migrate.pauseafter threshold and not already paused
|
||||
final int migratePauseAfter = libvirtComputingResource.getMigratePauseAfter();
|
||||
logger.info("vm.migrate.pauseafter value set to: {} for VM: {}", migratePauseAfter, vmName);
|
||||
if (migratePauseAfter > 0 && sleeptime > migratePauseAfter) {
|
||||
DomainState state = null;
|
||||
try {
|
||||
state = dm.getInfo().state;
|
||||
logger.info("VM domain state when trying to pause VM for migration: {}", state);
|
||||
} catch (final LibvirtException e) {
|
||||
logger.info("Couldn't get VM domain state after " + sleeptime + "ms: " + e.getMessage());
|
||||
}
|
||||
|
|
@ -381,6 +386,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
}
|
||||
|
||||
if (result == null) {
|
||||
logger.info("Post-migration cleanup for VM {}: ", vmName);
|
||||
libvirtComputingResource.destroyNetworkRulesForVM(conn, vmName);
|
||||
for (final InterfaceDef iface : ifaces) {
|
||||
String vlanId = libvirtComputingResource.getVlanIdFromBridgeName(iface.getBrName());
|
||||
|
|
@ -394,6 +400,7 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
|
|||
commandState = Command.State.COMPLETED;
|
||||
libvirtComputingResource.createOrUpdateLogFileForCommand(command, commandState);
|
||||
} else if (commandState == null) {
|
||||
logger.error("Migration of VM {} failed with result: {}", vmName, result);
|
||||
commandState = Command.State.FAILED;
|
||||
libvirtComputingResource.createOrUpdateLogFileForCommand(command, commandState);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -56,6 +56,7 @@ public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapp
|
|||
final VirtualMachineTO vm = command.getVirtualMachine();
|
||||
|
||||
if (command.isRollback()) {
|
||||
logger.info("Handling rollback for PrepareForMigration of VM {}", vm.getName());
|
||||
return handleRollback(command, libvirtComputingResource);
|
||||
}
|
||||
|
||||
|
|
@ -83,6 +84,7 @@ public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapp
|
|||
if (interfaceDef != null && interfaceDef.getNetType() == GuestNetType.VHOSTUSER) {
|
||||
DpdkTO to = new DpdkTO(interfaceDef.getDpdkOvsPath(), interfaceDef.getDpdkSourcePort(), interfaceDef.getInterfaceMode());
|
||||
dpdkInterfaceMapping.put(nic.getMac(), to);
|
||||
logger.debug("Configured DPDK interface for VM {}", vm.getName());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -122,6 +124,7 @@ public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapp
|
|||
return new PrepareForMigrationAnswer(command, "failed to connect physical disks to host");
|
||||
}
|
||||
|
||||
logger.info("Successfully prepared destination host for migration of VM {}", vm.getName());
|
||||
return createPrepareForMigrationAnswer(command, dpdkInterfaceMapping, libvirtComputingResource, vm);
|
||||
} catch (final LibvirtException | CloudRuntimeException | InternalErrorException | URISyntaxException e) {
|
||||
if (MapUtils.isNotEmpty(dpdkInterfaceMapping)) {
|
||||
|
|
@ -157,6 +160,7 @@ public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapp
|
|||
KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr();
|
||||
VirtualMachineTO vmTO = command.getVirtualMachine();
|
||||
|
||||
logger.info("Rolling back PrepareForMigration for VM {}: disconnecting physical disks", vmTO.getName());
|
||||
if (!storagePoolMgr.disconnectPhysicalDisksViaVmSpec(vmTO)) {
|
||||
return new PrepareForMigrationAnswer(command, "failed to disconnect physical disks from host");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -350,7 +350,7 @@ public class CephObjectStoreDriverImpl extends BaseObjectStoreDriverImpl {
|
|||
new AWSStaticCredentialsProvider(
|
||||
new BasicAWSCredentials(accessKey, secretKey)))
|
||||
.withEndpointConfiguration(
|
||||
new AwsClientBuilder.EndpointConfiguration(url, null))
|
||||
new AwsClientBuilder.EndpointConfiguration(url, "us-east-1"))
|
||||
.build();
|
||||
|
||||
if (client == null) {
|
||||
|
|
|
|||
|
|
@ -63,6 +63,8 @@ import com.cloud.api.storage.LinstorBackupSnapshotCommand;
|
|||
import com.cloud.api.storage.LinstorRevertBackupSnapshotCommand;
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.host.Status;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
import com.cloud.resource.ResourceState;
|
||||
import com.cloud.storage.DataStoreRole;
|
||||
|
|
@ -922,9 +924,10 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
|||
_backupsnapshotwait,
|
||||
VirtualMachineManager.ExecuteInSequence.value());
|
||||
|
||||
Optional<RemoteHostEndPoint> optEP = getDiskfullEP(linstorApi, rscName);
|
||||
final StoragePool pool = (StoragePool) volumeInfo.getDataStore();
|
||||
Optional<RemoteHostEndPoint> optEP = getDiskfullEP(linstorApi, pool, rscName);
|
||||
if (optEP.isEmpty()) {
|
||||
optEP = getLinstorEP(linstorApi, rscName);
|
||||
optEP = getLinstorEP(linstorApi, pool, rscName);
|
||||
}
|
||||
|
||||
if (optEP.isPresent()) {
|
||||
|
|
@ -1064,13 +1067,29 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
|||
Answer answer = copyVolume(srcData, dstData);
|
||||
res = new CopyCommandResult(null, answer);
|
||||
} else {
|
||||
Answer answer = new Answer(null, false, "noimpl");
|
||||
res = new CopyCommandResult(null, answer);
|
||||
res.setResult("Not implemented yet");
|
||||
throw new CloudRuntimeException("Not implemented for Linstor primary storage.");
|
||||
}
|
||||
callback.complete(res);
|
||||
}
|
||||
|
||||
private Host getEnabledClusterHost(StoragePool storagePool, List<String> linstorNodeNames) {
|
||||
List<HostVO> csHosts;
|
||||
if (storagePool.getClusterId() != null) {
|
||||
csHosts = _hostDao.findByClusterId(storagePool.getClusterId());
|
||||
} else {
|
||||
csHosts = _hostDao.findByDataCenterId(storagePool.getDataCenterId());
|
||||
}
|
||||
Collections.shuffle(csHosts); // so we do not always pick the same host for operations
|
||||
for (HostVO host : csHosts) {
|
||||
if (host.getResourceState() == ResourceState.Enabled &&
|
||||
host.getStatus() == Status.Up &&
|
||||
linstorNodeNames.contains(host.getName())) {
|
||||
return host;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tries to get a Linstor cloudstack end point, that is at least diskless.
|
||||
*
|
||||
|
|
@ -1079,49 +1098,39 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
|||
* @return Optional RemoteHostEndPoint if one could get found.
|
||||
* @throws ApiException
|
||||
*/
|
||||
private Optional<RemoteHostEndPoint> getLinstorEP(DevelopersApi api, String rscName) throws ApiException {
|
||||
private Optional<RemoteHostEndPoint> getLinstorEP(DevelopersApi api, StoragePool storagePool, String rscName)
|
||||
throws ApiException {
|
||||
List<String> linstorNodeNames = LinstorUtil.getLinstorNodeNames(api);
|
||||
Collections.shuffle(linstorNodeNames); // do not always pick the first linstor node
|
||||
|
||||
Host host = null;
|
||||
for (String nodeName : linstorNodeNames) {
|
||||
host = _hostDao.findByName(nodeName);
|
||||
if (host != null && host.getResourceState() == ResourceState.Enabled) {
|
||||
logger.info(String.format("Linstor: Make resource %s available on node %s ...", rscName, nodeName));
|
||||
ApiCallRcList answers = api.resourceMakeAvailableOnNode(rscName, nodeName, new ResourceMakeAvailable());
|
||||
if (!answers.hasError()) {
|
||||
break; // found working host
|
||||
Host host = getEnabledClusterHost(storagePool, linstorNodeNames);
|
||||
if (host != null) {
|
||||
logger.info("Linstor: Make resource {} available on node {} ...", rscName, host.getName());
|
||||
ApiCallRcList answers = api.resourceMakeAvailableOnNode(
|
||||
rscName, host.getName(), new ResourceMakeAvailable());
|
||||
if (answers.hasError()) {
|
||||
logger.error("Linstor: Unable to make resource {} on node {} available: {}",
|
||||
rscName, host.getName(), LinstorUtil.getBestErrorMessage(answers));
|
||||
return Optional.empty();
|
||||
} else {
|
||||
logger.error(
|
||||
String.format("Linstor: Unable to make resource %s on node %s available: %s",
|
||||
rscName,
|
||||
nodeName,
|
||||
LinstorUtil.getBestErrorMessage(answers)));
|
||||
}
|
||||
return Optional.of(RemoteHostEndPoint.getHypervisorHostEndPoint(host));
|
||||
}
|
||||
}
|
||||
|
||||
if (host == null)
|
||||
{
|
||||
logger.error("Linstor: Couldn't create a resource on any cloudstack host.");
|
||||
return Optional.empty();
|
||||
}
|
||||
else
|
||||
{
|
||||
return Optional.of(RemoteHostEndPoint.getHypervisorHostEndPoint(host));
|
||||
}
|
||||
}
|
||||
|
||||
private Optional<RemoteHostEndPoint> getDiskfullEP(DevelopersApi api, String rscName) throws ApiException {
|
||||
private Optional<RemoteHostEndPoint> getDiskfullEP(DevelopersApi api, StoragePool storagePool, String rscName)
|
||||
throws ApiException {
|
||||
List<com.linbit.linstor.api.model.StoragePool> linSPs = LinstorUtil.getDiskfulStoragePools(api, rscName);
|
||||
if (linSPs != null) {
|
||||
for (com.linbit.linstor.api.model.StoragePool sp : linSPs) {
|
||||
Host host = _hostDao.findByName(sp.getNodeName());
|
||||
if (host != null && host.getResourceState() == ResourceState.Enabled) {
|
||||
List<String> linstorNodeNames = linSPs.stream()
|
||||
.map(com.linbit.linstor.api.model.StoragePool::getNodeName)
|
||||
.collect(Collectors.toList());
|
||||
Host host = getEnabledClusterHost(storagePool, linstorNodeNames);
|
||||
if (host != null) {
|
||||
return Optional.of(RemoteHostEndPoint.getHypervisorHostEndPoint(host));
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.error("Linstor: No diskfull host found.");
|
||||
return Optional.empty();
|
||||
}
|
||||
|
|
@ -1200,12 +1209,12 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
|||
VirtualMachineManager.ExecuteInSequence.value());
|
||||
|
||||
try {
|
||||
Optional<RemoteHostEndPoint> optEP = getLinstorEP(api, rscName);
|
||||
Optional<RemoteHostEndPoint> optEP = getLinstorEP(api, pool, rscName);
|
||||
if (optEP.isPresent()) {
|
||||
answer = optEP.get().sendMessage(cmd);
|
||||
} else {
|
||||
answer = new Answer(cmd, false, "Unable to get matching Linstor endpoint.");
|
||||
deleteResourceDefinition(pool, rscName);
|
||||
throw new CloudRuntimeException("Unable to get matching Linstor endpoint.");
|
||||
}
|
||||
} catch (ApiException exc) {
|
||||
logger.error("copy template failed: ", exc);
|
||||
|
|
@ -1242,12 +1251,12 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
|||
Answer answer;
|
||||
|
||||
try {
|
||||
Optional<RemoteHostEndPoint> optEP = getLinstorEP(api, rscName);
|
||||
Optional<RemoteHostEndPoint> optEP = getLinstorEP(api, pool, rscName);
|
||||
if (optEP.isPresent()) {
|
||||
answer = optEP.get().sendMessage(cmd);
|
||||
}
|
||||
else {
|
||||
answer = new Answer(cmd, false, "Unable to get matching Linstor endpoint.");
|
||||
throw new CloudRuntimeException("Unable to get matching Linstor endpoint.");
|
||||
}
|
||||
} catch (ApiException exc) {
|
||||
logger.error("copy volume failed: ", exc);
|
||||
|
|
@ -1280,14 +1289,14 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
|||
try {
|
||||
String devName = restoreResourceFromSnapshot(api, pool, rscName, snapshotName, restoreName);
|
||||
|
||||
Optional<RemoteHostEndPoint> optEPAny = getLinstorEP(api, restoreName);
|
||||
Optional<RemoteHostEndPoint> optEPAny = getLinstorEP(api, pool, restoreName);
|
||||
if (optEPAny.isPresent()) {
|
||||
// patch the src device path to the temporary linstor resource
|
||||
snapshotObject.setPath(devName);
|
||||
origCmd.setSrcTO(snapshotObject.getTO());
|
||||
answer = optEPAny.get().sendMessage(origCmd);
|
||||
} else {
|
||||
answer = new Answer(origCmd, false, "Unable to get matching Linstor endpoint.");
|
||||
throw new CloudRuntimeException("Unable to get matching Linstor endpoint.");
|
||||
}
|
||||
} finally {
|
||||
// delete the temporary resource, noop if already gone
|
||||
|
|
@ -1349,7 +1358,7 @@ public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver
|
|||
VirtualMachineManager.ExecuteInSequence.value());
|
||||
cmd.setOptions(options);
|
||||
|
||||
Optional<RemoteHostEndPoint> optEP = getDiskfullEP(api, rscName);
|
||||
Optional<RemoteHostEndPoint> optEP = getDiskfullEP(api, pool, rscName);
|
||||
Answer answer;
|
||||
if (optEP.isPresent()) {
|
||||
answer = optEP.get().sendMessage(cmd);
|
||||
|
|
|
|||
12
pom.xml
12
pom.xml
|
|
@ -188,6 +188,7 @@
|
|||
<org.springframework.version>5.3.26</org.springframework.version>
|
||||
<cs.ini.version>0.5.4</cs.ini.version>
|
||||
<cs.caffeine.version>3.1.7</cs.caffeine.version>
|
||||
<cs.protobuf.version>3.25.5</cs.protobuf.version>
|
||||
</properties>
|
||||
|
||||
<distributionManagement>
|
||||
|
|
@ -730,6 +731,17 @@
|
|||
<artifactId>xml-apis</artifactId>
|
||||
<version>2.0.2</version>
|
||||
</dependency>
|
||||
<!-- enforced protobuf version here as mysql-connector-java is pulling older version (3.19.3) -->
|
||||
<dependency>
|
||||
<groupId>com.google.protobuf</groupId>
|
||||
<artifactId>protobuf-java</artifactId>
|
||||
<version>${cs.protobuf.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.protobuf</groupId>
|
||||
<artifactId>protobuf-java-util</artifactId>
|
||||
<version>${cs.protobuf.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.linbit.linstor.api</groupId>
|
||||
<artifactId>java-linstor</artifactId>
|
||||
|
|
|
|||
|
|
@ -7189,6 +7189,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
throw new CloudRuntimeException("Unable to find suitable destination to migrate VM " + vm.getInstanceName());
|
||||
}
|
||||
|
||||
logger.info("Starting migration of VM {} from host {} to host {} ", vm.getInstanceName(), srcHostId, dest.getHost().getId());
|
||||
collectVmDiskAndNetworkStatistics(vmId, State.Running);
|
||||
_itMgr.migrate(vm.getUuid(), srcHostId, dest);
|
||||
return findMigratedVm(vm.getId(), vm.getType());
|
||||
|
|
@ -7260,6 +7261,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir
|
|||
|
||||
private DeployDestination checkVmMigrationDestination(VMInstanceVO vm, Host srcHost, Host destinationHost) throws VirtualMachineMigrationException {
|
||||
if (destinationHost == null) {
|
||||
logger.error("Destination host is null for migration of VM: {}", vm.getInstanceName());
|
||||
return null;
|
||||
}
|
||||
if (destinationHost.getId() == srcHost.getId()) {
|
||||
|
|
|
|||
|
|
@ -406,6 +406,7 @@
|
|||
"label.app.name": "CloudStack",
|
||||
"label.application.policy.set": "Application Policy Set",
|
||||
"label.apply": "Apply",
|
||||
"label.apply.to.all": "Apply to all",
|
||||
"label.apply.tungsten.firewall.policy": "Apply Firewall Policy",
|
||||
"label.apply.tungsten.network.policy": "Apply Network Policy",
|
||||
"label.apply.tungsten.tag": "Apply tag",
|
||||
|
|
@ -4049,6 +4050,7 @@
|
|||
"message.vnf.no.credentials": "No credentials found for the VNF appliance.",
|
||||
"message.vnf.select.networks": "Please select the relevant network for each VNF NIC.",
|
||||
"message.volume.desc": "Volume to use as a ROOT disk",
|
||||
"message.volume.pool.apply.to.all": "Selected storage pool will be applied to all existing volumes of the instance.",
|
||||
"message.volume.state.allocated": "The volume is allocated but has not been created yet.",
|
||||
"message.volume.state.attaching": "The volume is attaching to a volume from Ready state.",
|
||||
"message.volume.state.copying": "The volume is being copied from the image store to primary storage, in case it's an uploaded volume.",
|
||||
|
|
|
|||
|
|
@ -709,7 +709,7 @@
|
|||
<div class="resource-detail-item__label">{{ $t('label.storagepool') }}</div>
|
||||
<div class="resource-detail-item__details">
|
||||
<database-outlined />
|
||||
<router-link v-if="!isStatic && $router.resolve('/storagepool/' + resource.storageid).matched[0].redirect !== '/exception/404'" :to="{ path: '/storagepool/' + resource.storageid }">{{ resource.storage || resource.storageid }} </router-link>
|
||||
<router-link v-if="!isStatic && $router.resolve('/storagepool/' + encodeURIComponent(resource.storageid)).matched[0].redirect !== '/exception/404'" :to="{ path: '/storagepool/' + encodeURIComponent(resource.storageid) }">{{ resource.storage || resource.storageid }} </router-link>
|
||||
<span v-else>{{ resource.storage || resource.storageid }}</span>
|
||||
<a-tag style="margin-left: 5px;" v-if="resource.storagetype">
|
||||
{{ resource.storagetype }}
|
||||
|
|
|
|||
|
|
@ -206,8 +206,13 @@ export default {
|
|||
closeVolumeStoragePoolSelector () {
|
||||
this.selectedVolumeForStoragePoolSelection = {}
|
||||
},
|
||||
handleVolumeStoragePoolSelection (volumeId, storagePool) {
|
||||
handleVolumeStoragePoolSelection (volumeId, storagePool, applyToAll) {
|
||||
for (const volume of this.volumes) {
|
||||
if (applyToAll) {
|
||||
volume.selectedstorageid = storagePool.id
|
||||
volume.selectedstoragename = storagePool.name
|
||||
volume.selectedstorageclusterid = storagePool.clusterid
|
||||
} else {
|
||||
if (volume.id === volumeId) {
|
||||
volume.selectedstorageid = storagePool.id
|
||||
volume.selectedstoragename = storagePool.name
|
||||
|
|
@ -215,6 +220,7 @@ export default {
|
|||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
this.updateVolumeToStoragePoolSelection()
|
||||
},
|
||||
updateVolumeToStoragePoolSelection () {
|
||||
|
|
|
|||
|
|
@ -161,17 +161,9 @@
|
|||
>{{ $t(text.toLowerCase()) }}</router-link>
|
||||
</span>
|
||||
<span v-else>
|
||||
<router-link
|
||||
:to="{ path: $route.path + '/' + record.id }"
|
||||
v-if="record.id"
|
||||
>{{ text }}</router-link>
|
||||
<router-link
|
||||
:to="{ path: $route.path + '/' + record.name }"
|
||||
v-else
|
||||
>{{ text }}</router-link>
|
||||
<span
|
||||
v-if="['guestnetwork','vpc'].includes($route.path.split('/')[1]) && record.restartrequired && !record.vpcid"
|
||||
>
|
||||
<router-link :to="{ path: $route.path + '/' + encodeURIComponent(record.id) }" v-if="record.id">{{ text }}</router-link>
|
||||
<router-link :to="{ path: $route.path + '/' + record.name }" v-else>{{ text }}</router-link>
|
||||
<span v-if="['guestnetwork','vpc'].includes($route.path.split('/')[1]) && record.restartrequired && !record.vpcid">
|
||||
|
||||
<a-tooltip>
|
||||
<template #title>{{ $t('label.restartrequired') }}</template>
|
||||
|
|
@ -607,10 +599,7 @@
|
|||
<span v-else>{{ text }}</span>
|
||||
</template>
|
||||
<template v-if="column.key === 'storage'">
|
||||
<router-link
|
||||
v-if="record.storageid"
|
||||
:to="{ path: '/storagepool/' + record.storageid }"
|
||||
>{{ text }}</router-link>
|
||||
<router-link v-if="record.storageid" :to="{ path: '/storagepool/' + encodeURIComponent(record.storageid) }">{{ text }}</router-link>
|
||||
<span v-else>{{ text }}</span>
|
||||
</template>
|
||||
<template
|
||||
|
|
|
|||
|
|
@ -25,6 +25,15 @@
|
|||
:autoAssignAllowed="autoAssignAllowed"
|
||||
@select="handleSelect" />
|
||||
|
||||
<a-form-item
|
||||
class="top-spaced">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.apply.to.all')" :tooltip="$t('message.volume.pool.apply.to.all')"/>
|
||||
</template>
|
||||
<a-switch
|
||||
v-model:checked="applyToAll" />
|
||||
</a-form-item>
|
||||
|
||||
<a-divider />
|
||||
|
||||
<div class="actions">
|
||||
|
|
@ -36,11 +45,13 @@
|
|||
</template>
|
||||
|
||||
<script>
|
||||
import TooltipLabel from '@/components/widgets/TooltipLabel'
|
||||
import StoragePoolSelectView from '@/components/view/StoragePoolSelectView'
|
||||
|
||||
export default {
|
||||
name: 'VolumeStoragePoolSelectionForm',
|
||||
components: {
|
||||
TooltipLabel,
|
||||
StoragePoolSelectView
|
||||
},
|
||||
props: {
|
||||
|
|
@ -70,7 +81,8 @@ export default {
|
|||
},
|
||||
data () {
|
||||
return {
|
||||
selectedStoragePool: null
|
||||
selectedStoragePool: null,
|
||||
applyToAll: false
|
||||
}
|
||||
},
|
||||
watch: {
|
||||
|
|
@ -95,7 +107,7 @@ export default {
|
|||
}
|
||||
},
|
||||
submitForm () {
|
||||
this.$emit('select', this.resource.id, this.selectedStoragePool)
|
||||
this.$emit('select', this.resource.id, this.selectedStoragePool, this.applyToAll)
|
||||
this.closeModal()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@
|
|||
{{ parseFloat(record.size / (1024.0 * 1024.0 * 1024.0)).toFixed(2) }} GB
|
||||
</template>
|
||||
<template v-if="column.key === 'storage'">
|
||||
<router-link v-if="record.storageid" :to="{ path: '/storagepool/' + record.storageid }">{{ text }}</router-link>
|
||||
<router-link v-if="record.storageid" :to="{ path: '/storagepool/' + encodeURIComponent(record.storageid) }">{{ text }}</router-link>
|
||||
<span v-else>{{ text }}</span>
|
||||
</template>
|
||||
</template>
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ export default {
|
|||
this.breadList = []
|
||||
this.$route.matched.forEach((item, idx) => {
|
||||
const parent = this.$route.matched[idx - 1]
|
||||
if (item && parent && parent.name !== 'index' && !item.path.endsWith(':id')) {
|
||||
if (item && parent && parent.name !== 'index' && !item.path.endsWith(':id') && !item.path.endsWith(':id(.*)')) {
|
||||
this.breadList.pop()
|
||||
}
|
||||
this.breadList.push(item)
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ function generateRouterMap (section) {
|
|||
hideChildrenInMenu: true,
|
||||
children: [
|
||||
{
|
||||
path: '/' + child.name + '/:id',
|
||||
path: '/' + child.name + '/:id(.*)',
|
||||
hidden: child.hidden,
|
||||
meta: {
|
||||
title: child.title,
|
||||
|
|
@ -147,7 +147,7 @@ function generateRouterMap (section) {
|
|||
map.meta.tabs = section.tabs
|
||||
|
||||
map.children = [{
|
||||
path: '/' + section.name + '/:id',
|
||||
path: '/' + section.name + '/:id(.*)',
|
||||
actions: section.actions ? section.actions : [],
|
||||
meta: {
|
||||
title: section.title,
|
||||
|
|
|
|||
|
|
@ -562,7 +562,7 @@ const user = {
|
|||
}).catch(error => {
|
||||
reject(error)
|
||||
})
|
||||
|
||||
if ('listConfigurations' in store.getters.apis) {
|
||||
getAPI('listConfigurations', { name: 'hypervisor.custom.display.name' }).then(json => {
|
||||
if (json.listconfigurationsresponse.configuration !== null) {
|
||||
const config = json.listconfigurationsresponse.configuration[0]
|
||||
|
|
@ -571,6 +571,7 @@ const user = {
|
|||
}).catch(error => {
|
||||
reject(error)
|
||||
})
|
||||
}
|
||||
})
|
||||
},
|
||||
UpdateConfiguration ({ commit }) {
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ const err = (error) => {
|
|||
if (response.config && response.config.params && ['forgotPassword', 'listIdps', 'cloudianIsEnabled'].includes(response.config.params.command)) {
|
||||
return
|
||||
}
|
||||
const originalPath = router.currentRoute.value.fullPath
|
||||
const originalPath = router.currentRoute.value.path
|
||||
for (const key in response.data) {
|
||||
if (key.includes('response')) {
|
||||
if (response.data[key].errortext.includes('not available for user')) {
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@
|
|||
class="top-spaced"
|
||||
:placeholder="$t('label.search')"
|
||||
v-model:value="searchQuery"
|
||||
@search="fetchData"
|
||||
@search="fetchHostsForMigration"
|
||||
v-focus="true" />
|
||||
<a-table
|
||||
class="top-spaced"
|
||||
|
|
@ -97,7 +97,7 @@
|
|||
</a-pagination>
|
||||
|
||||
<a-form-item
|
||||
v-if="isUserVm"
|
||||
v-if="isUserVm && hasVolumes"
|
||||
class="top-spaced">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.migrate.with.storage')" :tooltip="$t('message.migrate.with.storage')"/>
|
||||
|
|
@ -106,9 +106,29 @@
|
|||
v-model:checked="migrateWithStorage"
|
||||
:disabled="!selectedHost || !selectedHost.id || selectedHost.id === -1" />
|
||||
</a-form-item>
|
||||
|
||||
<a-radio-group
|
||||
v-if="migrateWithStorage"
|
||||
v-model:value="migrateMode"
|
||||
@change="e => { handleMigrateModeChange(e.target.value) }">
|
||||
<a-radio class="radio-style" :value="1">
|
||||
{{ $t('label.migrate.instance.single.storage') }}
|
||||
</a-radio>
|
||||
<a-radio class="radio-style" :value="2">
|
||||
{{ $t('label.migrate.instance.specific.storages') }}
|
||||
</a-radio>
|
||||
</a-radio-group>
|
||||
|
||||
<div v-if="migrateWithStorage && migrateMode == 1">
|
||||
<storage-pool-select-view
|
||||
ref="storagePoolSelection"
|
||||
:autoAssignAllowed="false"
|
||||
:resource="resource"
|
||||
@select="handleStoragePoolChange" />
|
||||
</div>
|
||||
<instance-volumes-storage-pool-select-list-view
|
||||
ref="volumeToPoolSelect"
|
||||
v-if="migrateWithStorage"
|
||||
v-if="migrateWithStorage && migrateMode !== 1"
|
||||
class="top-spaced"
|
||||
:resource="resource"
|
||||
:clusterId="selectedHost.id ? selectedHost.clusterid : null"
|
||||
|
|
@ -118,7 +138,7 @@
|
|||
|
||||
<div class="actions">
|
||||
<a-button @click="closeModal">{{ $t('label.cancel') }}</a-button>
|
||||
<a-button type="primary" ref="submit" :disabled="!selectedHost.id" @click="submitForm">{{ $t('label.ok') }}</a-button>
|
||||
<a-button type="primary" ref="submit" :disabled="!selectedHost.id || (migrateWithStorage && migrateMode === 1 && !volumeToPoolSelection.length)" @click="submitForm">{{ $t('label.ok') }}</a-button>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
|
@ -126,12 +146,14 @@
|
|||
<script>
|
||||
import { getAPI, postAPI } from '@/api'
|
||||
import TooltipLabel from '@/components/widgets/TooltipLabel'
|
||||
import StoragePoolSelectView from '@/components/view/StoragePoolSelectView'
|
||||
import InstanceVolumesStoragePoolSelectListView from '@/components/view/InstanceVolumesStoragePoolSelectListView'
|
||||
|
||||
export default {
|
||||
name: 'VMMigrateWizard',
|
||||
components: {
|
||||
TooltipLabel,
|
||||
StoragePoolSelectView,
|
||||
InstanceVolumesStoragePoolSelectListView
|
||||
},
|
||||
props: {
|
||||
|
|
@ -188,6 +210,7 @@ export default {
|
|||
}
|
||||
],
|
||||
migrateWithStorage: false,
|
||||
migrateMode: 1,
|
||||
volumeToPoolSelection: [],
|
||||
volumes: []
|
||||
}
|
||||
|
|
@ -198,6 +221,9 @@ export default {
|
|||
computed: {
|
||||
isUserVm () {
|
||||
return this.$route.meta.resourceType === 'UserVm'
|
||||
},
|
||||
hasVolumes () {
|
||||
return this.volumes && this.volumes.length > 0
|
||||
}
|
||||
},
|
||||
watch: {
|
||||
|
|
@ -212,6 +238,10 @@ export default {
|
|||
return array !== null && array !== undefined && Array.isArray(array) && array.length > 0
|
||||
},
|
||||
fetchData () {
|
||||
this.fetchHostsForMigration()
|
||||
this.fetchVolumes()
|
||||
},
|
||||
fetchHostsForMigration () {
|
||||
this.loading = true
|
||||
getAPI('findHostsForMigration', {
|
||||
virtualmachineid: this.resource.id,
|
||||
|
|
@ -240,17 +270,16 @@ export default {
|
|||
handleChangePage (page, pageSize) {
|
||||
this.page = page
|
||||
this.pageSize = pageSize
|
||||
this.fetchData()
|
||||
this.fetchHostsForMigration()
|
||||
},
|
||||
handleChangePageSize (currentPage, pageSize) {
|
||||
this.page = currentPage
|
||||
this.pageSize = pageSize
|
||||
this.fetchData()
|
||||
this.fetchHostsForMigration()
|
||||
},
|
||||
handleSelectedHostChange (host) {
|
||||
if (host.id === -1) {
|
||||
this.migrateWithStorage = false
|
||||
this.fetchVolumes()
|
||||
}
|
||||
this.selectedHost = host
|
||||
this.selectedVolumeForStoragePoolSelection = {}
|
||||
|
|
@ -259,6 +288,17 @@ export default {
|
|||
this.$refs.volumeToPoolSelect.resetSelection()
|
||||
}
|
||||
},
|
||||
handleMigrateModeChange () {
|
||||
this.volumeToPoolSelection = []
|
||||
},
|
||||
handleStoragePoolChange (storagePool) {
|
||||
this.volumeToPoolSelection = []
|
||||
for (const volume of this.volumes) {
|
||||
if (storagePool && storagePool.id && storagePool.id !== -1) {
|
||||
this.volumeToPoolSelection.push({ volume: volume.id, pool: storagePool.id })
|
||||
}
|
||||
}
|
||||
},
|
||||
handleVolumeToPoolChange (volumeToPool) {
|
||||
this.volumeToPoolSelection = volumeToPool
|
||||
},
|
||||
|
|
@ -269,7 +309,7 @@ export default {
|
|||
listAll: true,
|
||||
virtualmachineid: this.resource.id
|
||||
}).then(response => {
|
||||
this.volumes = response.listvolumesresponse.volume
|
||||
this.volumes = response?.listvolumesresponse?.volume || []
|
||||
}).finally(() => {
|
||||
this.loading = false
|
||||
})
|
||||
|
|
@ -278,7 +318,7 @@ export default {
|
|||
if (this.selectedHost.requiresStorageMotion || this.volumeToPoolSelection.length > 0) {
|
||||
return true
|
||||
}
|
||||
if (this.selectedHost.id === -1 && this.volumes && this.volumes.length > 0) {
|
||||
if (this.selectedHost.id === -1 && this.hasVolumes) {
|
||||
for (var volume of this.volumes) {
|
||||
if (volume.storagetype === 'local') {
|
||||
return true
|
||||
|
|
@ -306,7 +346,7 @@ export default {
|
|||
var params = this.selectedHost.id === -1
|
||||
? { autoselect: true, virtualmachineid: this.resource.id }
|
||||
: { hostid: this.selectedHost.id, virtualmachineid: this.resource.id }
|
||||
if (this.migrateWithStorage) {
|
||||
if (this.migrateWithStorage && this.volumeToPoolSelection && this.volumeToPoolSelection.length > 0) {
|
||||
for (var i = 0; i < this.volumeToPoolSelection.length; i++) {
|
||||
const mapping = this.volumeToPoolSelection[i]
|
||||
params['migrateto[' + i + '].volume'] = mapping.volume
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@
|
|||
:rowKey="record => record.zoneid">
|
||||
<template #bodyCell="{ text, record, column }">
|
||||
<template v-if="column.dataIndex === 'datastore' && record.datastoreId">
|
||||
<router-link :to="{ path: '/storagepool/' + record.datastoreId }">
|
||||
<router-link :to="{ path: '/storagepool/' + encodeURIComponent(record.datastoreId) }">
|
||||
{{ text }}
|
||||
</router-link>
|
||||
</template>
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@
|
|||
:rowKey="record => record.datastoreId">
|
||||
<template #bodyCell="{ text, record, column }">
|
||||
<template v-if="column.dataIndex === 'datastore' && record.datastoreId">
|
||||
<router-link :to="{ path: '/storagepool/' + record.datastoreId }">
|
||||
<router-link :to="{ path: '/storagepool/' + encodeURIComponent(record.datastoreId) }">
|
||||
{{ text }}
|
||||
</router-link>
|
||||
</template>
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@
|
|||
<template #bodyCell="{ column, record }">
|
||||
<template v-if="column.key === 'zonename'">
|
||||
<span v-if="record.datastoreid">
|
||||
<router-link :to="{ path: (record.datastoretype === 'Primary' ? '/storagepool/' : '/imagestore/') + record.datastoreid }">
|
||||
<router-link :to="{ path: (record.datastoretype === 'Primary' ? '/storagepool/' : '/imagestore/') + encodeURIComponent(record.datastoreid) }">
|
||||
<span v-if="fetchZoneIcon(record.zoneid)">
|
||||
<resource-icon :image="zoneIcon" size="1x" style="margin-right: 5px"/>
|
||||
</span>
|
||||
|
|
|
|||
|
|
@ -127,8 +127,8 @@ describe('Views > compute > MigrateWizard.vue', () => {
|
|||
if (Object.keys(originalFunc).length > 0) {
|
||||
Object.keys(originalFunc).forEach(key => {
|
||||
switch (key) {
|
||||
case 'fetchData':
|
||||
wrapper.vm.fetchData = originalFunc[key]
|
||||
case 'fetchHostsForMigration':
|
||||
wrapper.vm.fetchHostsForMigration = originalFunc[key]
|
||||
break
|
||||
default:
|
||||
break
|
||||
|
|
@ -138,11 +138,11 @@ describe('Views > compute > MigrateWizard.vue', () => {
|
|||
})
|
||||
|
||||
describe('Methods', () => {
|
||||
describe('fetchData()', () => {
|
||||
describe('fetchHostsForMigration()', () => {
|
||||
it('API should be called with resource is empty and searchQuery is empty', async (done) => {
|
||||
await mockAxios.mockResolvedValue({ findhostsformigrationresponse: { count: 0, host: [] } })
|
||||
await wrapper.setProps({ resource: {} })
|
||||
await wrapper.vm.fetchData()
|
||||
await wrapper.vm.fetchHostsForMigration()
|
||||
await flushPromises()
|
||||
|
||||
expect(mockAxios).toHaveBeenCalled()
|
||||
|
|
@ -164,7 +164,7 @@ describe('Views > compute > MigrateWizard.vue', () => {
|
|||
it('API should be called with resource.id is empty and searchQuery is empty', async (done) => {
|
||||
await mockAxios.mockResolvedValue({ findhostsformigrationresponse: { count: 0, host: [] } })
|
||||
await wrapper.setProps({ resource: { id: null } })
|
||||
await wrapper.vm.fetchData()
|
||||
await wrapper.vm.fetchHostsForMigration()
|
||||
await flushPromises()
|
||||
|
||||
expect(mockAxios).toHaveBeenCalled()
|
||||
|
|
@ -186,7 +186,7 @@ describe('Views > compute > MigrateWizard.vue', () => {
|
|||
it('API should be called with resource.id is not empty and searchQuery is empty', async (done) => {
|
||||
await mockAxios.mockResolvedValue({ findhostsformigrationresponse: { count: 0, host: [] } })
|
||||
await wrapper.setProps({ resource: { id: 'test-id-value' } })
|
||||
await wrapper.vm.fetchData()
|
||||
await wrapper.vm.fetchHostsForMigration()
|
||||
await flushPromises()
|
||||
|
||||
expect(mockAxios).toHaveBeenCalled()
|
||||
|
|
@ -209,7 +209,7 @@ describe('Views > compute > MigrateWizard.vue', () => {
|
|||
await mockAxios.mockResolvedValue({ findhostsformigrationresponse: { count: 0, host: [] } })
|
||||
await wrapper.setProps({ resource: { id: 'test-id-value' } })
|
||||
await wrapper.setData({ searchQuery: 'test-query-value' })
|
||||
await wrapper.vm.fetchData()
|
||||
await wrapper.vm.fetchHostsForMigration()
|
||||
await flushPromises()
|
||||
|
||||
expect(mockAxios).toHaveBeenCalled()
|
||||
|
|
@ -236,7 +236,7 @@ describe('Views > compute > MigrateWizard.vue', () => {
|
|||
page: 2,
|
||||
pageSize: 20
|
||||
})
|
||||
await wrapper.vm.fetchData()
|
||||
await wrapper.vm.fetchHostsForMigration()
|
||||
await flushPromises()
|
||||
|
||||
expect(mockAxios).toHaveBeenCalled()
|
||||
|
|
@ -258,7 +258,7 @@ describe('Views > compute > MigrateWizard.vue', () => {
|
|||
it('check hosts, totalCount when api is called with response result is empty', async (done) => {
|
||||
await mockAxios.mockResolvedValue({ findhostsformigrationresponse: { count: 0, host: [] } })
|
||||
await wrapper.setProps({ resource: {} })
|
||||
await wrapper.vm.fetchData()
|
||||
await wrapper.vm.fetchHostsForMigration()
|
||||
await flushPromises()
|
||||
|
||||
expect(wrapper.vm.hosts).toEqual([])
|
||||
|
|
@ -281,7 +281,7 @@ describe('Views > compute > MigrateWizard.vue', () => {
|
|||
}
|
||||
})
|
||||
await wrapper.setProps({ resource: {} })
|
||||
await wrapper.vm.fetchData()
|
||||
await wrapper.vm.fetchHostsForMigration()
|
||||
await flushPromises()
|
||||
|
||||
expect(wrapper.vm.hosts).toEqual([{
|
||||
|
|
@ -301,7 +301,7 @@ describe('Views > compute > MigrateWizard.vue', () => {
|
|||
|
||||
await mockAxios.mockRejectedValue(mockError)
|
||||
await wrapper.setProps({ resource: {} })
|
||||
await wrapper.vm.fetchData()
|
||||
await wrapper.vm.fetchHostsForMigration()
|
||||
await flushPromises()
|
||||
|
||||
expect(mocks.$notifyError).toHaveBeenCalled()
|
||||
|
|
@ -535,14 +535,14 @@ describe('Views > compute > MigrateWizard.vue', () => {
|
|||
await mockAxios.mockResolvedValue(mockData)
|
||||
await wrapper.setProps({
|
||||
resource: {
|
||||
id: 'test-resource-id',
|
||||
id: 'test-resource-id-err',
|
||||
name: 'test-resource-name'
|
||||
}
|
||||
})
|
||||
await wrapper.setData({
|
||||
selectedHost: {
|
||||
requiresStorageMotion: true,
|
||||
id: 'test-host-id',
|
||||
id: 'test-host-id-err',
|
||||
name: 'test-host-name'
|
||||
}
|
||||
})
|
||||
|
|
@ -564,14 +564,14 @@ describe('Views > compute > MigrateWizard.vue', () => {
|
|||
await mockAxios.mockResolvedValue(mockData)
|
||||
await wrapper.setProps({
|
||||
resource: {
|
||||
id: 'test-resource-id',
|
||||
id: 'test-resource-id-catch',
|
||||
name: 'test-resource-name'
|
||||
}
|
||||
})
|
||||
await wrapper.setData({
|
||||
selectedHost: {
|
||||
requiresStorageMotion: true,
|
||||
id: 'test-host-id',
|
||||
id: 'test-host-id-catch',
|
||||
name: 'test-host-name'
|
||||
}
|
||||
})
|
||||
|
|
@ -591,7 +591,7 @@ describe('Views > compute > MigrateWizard.vue', () => {
|
|||
await wrapper.setData({
|
||||
selectedHost: {
|
||||
requiresStorageMotion: true,
|
||||
id: 'test-host-id',
|
||||
id: 'test-host-id-no-res',
|
||||
name: 'test-host-name'
|
||||
}
|
||||
})
|
||||
|
|
@ -609,11 +609,11 @@ describe('Views > compute > MigrateWizard.vue', () => {
|
|||
})
|
||||
|
||||
describe('handleChangePage()', () => {
|
||||
it('check page, pageSize and fetchData() when handleChangePage() is called', async (done) => {
|
||||
originalFunc.fetchData = wrapper.vm.fetchData
|
||||
wrapper.vm.fetchData = jest.fn()
|
||||
it('check page, pageSize and fetchHostsForMigration() when handleChangePage() is called', async (done) => {
|
||||
originalFunc.fetchHostsForMigration = wrapper.vm.fetchHostsForMigration
|
||||
wrapper.vm.fetchHostsForMigration = jest.fn()
|
||||
|
||||
const fetchData = jest.spyOn(wrapper.vm, 'fetchData').mockImplementation(() => {})
|
||||
const fetchHostsForMigration = jest.spyOn(wrapper.vm, 'fetchHostsForMigration').mockImplementation(() => {})
|
||||
await wrapper.setProps({ resource: {} })
|
||||
await wrapper.setData({
|
||||
page: 1,
|
||||
|
|
@ -624,17 +624,17 @@ describe('Views > compute > MigrateWizard.vue', () => {
|
|||
|
||||
expect(wrapper.vm.page).toEqual(2)
|
||||
expect(wrapper.vm.pageSize).toEqual(20)
|
||||
expect(fetchData).toBeCalled()
|
||||
expect(fetchHostsForMigration).toBeCalled()
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
describe('handleChangePageSize()', () => {
|
||||
it('check page, pageSize and fetchData() when handleChangePageSize() is called', async (done) => {
|
||||
originalFunc.fetchData = wrapper.vm.fetchData
|
||||
wrapper.vm.fetchData = jest.fn()
|
||||
it('check page, pageSize and fetchHostsForMigration() when handleChangePageSize() is called', async (done) => {
|
||||
originalFunc.fetchHostsForMigration = wrapper.vm.fetchHostsForMigration
|
||||
wrapper.vm.fetchHostsForMigration = jest.fn()
|
||||
|
||||
const fetchData = jest.spyOn(wrapper.vm, 'fetchData').mockImplementation(() => {})
|
||||
const fetchHostsForMigration = jest.spyOn(wrapper.vm, 'fetchHostsForMigration').mockImplementation(() => {})
|
||||
await wrapper.setProps({ resource: {} })
|
||||
await wrapper.setData({
|
||||
page: 1,
|
||||
|
|
@ -645,7 +645,7 @@ describe('Views > compute > MigrateWizard.vue', () => {
|
|||
|
||||
expect(wrapper.vm.page).toEqual(2)
|
||||
expect(wrapper.vm.pageSize).toEqual(20)
|
||||
expect(fetchData).toBeCalled()
|
||||
expect(fetchHostsForMigration).toBeCalled()
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
|
|
|||
|
|
@ -196,6 +196,11 @@
|
|||
<artifactId>jackson-databind</artifactId>
|
||||
<version>${cs.jackson.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.dataformat</groupId>
|
||||
<artifactId>jackson-dataformat-cbor</artifactId>
|
||||
<version>${cs.jackson.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-compress</artifactId>
|
||||
|
|
|
|||
Loading…
Reference in New Issue