mirror of https://github.com/apache/cloudstack.git
Merge cc9a6fb7e6 into 5893ba5a8c
This commit is contained in:
commit
612392a699
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
/plugins/storage/volume/linstor @rp-
|
||||
/plugins/storage/volume/storpool @slavkap
|
||||
/plugins/storage/volume/ontap @rajiv1 @sandeeplocharla @piyush5 @suryag
|
||||
/plugins/storage/volume/ontap @rajiv-jain-netapp @sandeeplocharla @piyush5netapp @suryag1201
|
||||
|
||||
.pre-commit-config.yaml @jbampton
|
||||
/.github/linters/ @jbampton
|
||||
|
|
|
|||
|
|
@ -77,6 +77,8 @@ public class KvmFileBasedStorageVmSnapshotStrategy extends StorageVMSnapshotStra
|
|||
|
||||
private static final List<Storage.StoragePoolType> supportedStoragePoolTypes = List.of(Storage.StoragePoolType.Filesystem, Storage.StoragePoolType.NetworkFilesystem, Storage.StoragePoolType.SharedMountPoint);
|
||||
|
||||
private static final String ONTAP_PROVIDER_NAME = "NetApp ONTAP";
|
||||
|
||||
@Inject
|
||||
protected SnapshotDataStoreDao snapshotDataStoreDao;
|
||||
|
||||
|
|
@ -325,8 +327,13 @@ public class KvmFileBasedStorageVmSnapshotStrategy extends StorageVMSnapshotStra
|
|||
List<VolumeVO> volumes = volumeDao.findByInstance(vmId);
|
||||
for (VolumeVO volume : volumes) {
|
||||
StoragePoolVO storagePoolVO = storagePool.findById(volume.getPoolId());
|
||||
if (storagePoolVO.isManaged() && ONTAP_PROVIDER_NAME.equals(storagePoolVO.getStorageProviderName())) {
|
||||
logger.debug(" {} as the VM has a volume on ONTAP managed storage pool [{}]. " +
|
||||
"ONTAP managed storage has its own dedicated VM snapshot strategy.", cantHandleLog, storagePoolVO.getName());
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
if (!supportedStoragePoolTypes.contains(storagePoolVO.getPoolType())) {
|
||||
logger.debug(String.format("%s as the VM has a volume that is in a storage with unsupported type [%s].", cantHandleLog, storagePoolVO.getPoolType()));
|
||||
logger.debug("{} as the VM has a volume that is in a storage with unsupported type [{}].", cantHandleLog, storagePoolVO.getPoolType());
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
List<SnapshotVO> snapshots = snapshotDao.listByVolumeIdAndTypeNotInAndStateNotRemoved(volume.getId(), Snapshot.Type.GROUP);
|
||||
|
|
@ -503,8 +510,9 @@ public class KvmFileBasedStorageVmSnapshotStrategy extends StorageVMSnapshotStra
|
|||
return processCreateVmSnapshotAnswer(vmSnapshot, volumeInfoToSnapshotObjectMap, createDiskOnlyVMSnapshotAnswer, userVm, vmSnapshotVO, virtualSize, parentSnapshotVo);
|
||||
}
|
||||
|
||||
logger.error("Disk-only VM snapshot for VM [{}] failed{}.", userVm.getUuid(), answer != null ? " due to" + answer.getDetails() : "");
|
||||
throw new CloudRuntimeException(String.format("Disk-only VM snapshot for VM [%s] failed.", userVm.getUuid()));
|
||||
String details = answer != null ? answer.getDetails() : String.format("No answer received from host [%s]. The host may be unreachable.", hostId);
|
||||
logger.error("Disk-only VM snapshot for VM [{}] failed due to: {}.", userVm.getUuid(), details);
|
||||
throw new CloudRuntimeException(String.format("Disk-only VM snapshot for VM [%s] failed due to: %s.", userVm.getUuid(), details));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -83,6 +83,7 @@ import org.apache.cloudstack.storage.image.store.TemplateObject;
|
|||
import org.apache.cloudstack.storage.to.TemplateObjectTO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.commons.lang3.ObjectUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
|
@ -1347,6 +1348,11 @@ public class VolumeServiceImpl implements VolumeService {
|
|||
primaryDataStore.setDetails(details);
|
||||
|
||||
grantAccess(volumeInfo, destHost, primaryDataStore);
|
||||
volumeInfo = volFactory.getVolume(volumeInfo.getId(), primaryDataStore);
|
||||
// For Netapp ONTAP iscsiName or Lun path is available only after grantAccess
|
||||
String managedStoreTarget = ObjectUtils.defaultIfNull(volumeInfo.get_iScsiName(), volumeInfo.getUuid());
|
||||
details.put(PrimaryDataStore.MANAGED_STORE_TARGET, managedStoreTarget);
|
||||
primaryDataStore.setDetails(details);
|
||||
|
||||
try {
|
||||
motionSrv.copyAsync(srcTemplateInfo, destTemplateInfo, destHost, caller);
|
||||
|
|
|
|||
|
|
@ -106,6 +106,10 @@ public class LibvirtCreateDiskOnlyVMSnapshotCommandWrapper extends CommandWrappe
|
|||
return new CreateDiskOnlyVmSnapshotAnswer(cmd, false, errorMsg, null);
|
||||
}
|
||||
return new CreateDiskOnlyVmSnapshotAnswer(cmd, false, e.getMessage(), null);
|
||||
} catch (Exception e) {
|
||||
String errorMsg = String.format("Creation of disk-only VM snapshot for VM [%s] failed due to %s.", vmName, e.getMessage());
|
||||
logger.error(errorMsg, e);
|
||||
return new CreateDiskOnlyVmSnapshotAnswer(cmd, false, errorMsg, null);
|
||||
} finally {
|
||||
if (dm != null) {
|
||||
try {
|
||||
|
|
@ -146,21 +150,13 @@ public class LibvirtCreateDiskOnlyVMSnapshotCommandWrapper extends CommandWrappe
|
|||
}
|
||||
} catch (LibvirtException | QemuImgException e) {
|
||||
logger.error("Exception while creating disk-only VM snapshot for VM [{}]. Deleting leftover deltas.", vmName, e);
|
||||
for (VolumeObjectTO volumeObjectTO : volumeObjectTos) {
|
||||
Pair<Long, String> volSizeAndNewPath = mapVolumeToSnapshotSizeAndNewVolumePath.get(volumeObjectTO.getUuid());
|
||||
PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) volumeObjectTO.getDataStore();
|
||||
KVMStoragePool kvmStoragePool = storagePoolMgr.getStoragePool(primaryDataStoreTO.getPoolType(), primaryDataStoreTO.getUuid());
|
||||
|
||||
if (volSizeAndNewPath == null) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
Files.deleteIfExists(Path.of(kvmStoragePool.getLocalPathFor(volSizeAndNewPath.second())));
|
||||
} catch (IOException ex) {
|
||||
logger.warn("Tried to delete leftover snapshot at [{}] failed.", volSizeAndNewPath.second(), ex);
|
||||
}
|
||||
}
|
||||
cleanupLeftoverDeltas(volumeObjectTos, mapVolumeToSnapshotSizeAndNewVolumePath, storagePoolMgr);
|
||||
return new Answer(cmd, e);
|
||||
} catch (Exception e) {
|
||||
logger.error("Unexpected exception while creating disk-only VM snapshot for VM [{}]. Deleting leftover deltas.", vmName, e);
|
||||
cleanupLeftoverDeltas(volumeObjectTos, mapVolumeToSnapshotSizeAndNewVolumePath, storagePoolMgr);
|
||||
return new CreateDiskOnlyVmSnapshotAnswer(cmd, false,
|
||||
String.format("Creation of disk-only VM snapshot for VM [%s] failed due to %s.", vmName, e.getMessage()), null);
|
||||
}
|
||||
|
||||
return new CreateDiskOnlyVmSnapshotAnswer(cmd, true, null, mapVolumeToSnapshotSizeAndNewVolumePath);
|
||||
|
|
@ -192,6 +188,23 @@ public class LibvirtCreateDiskOnlyVMSnapshotCommandWrapper extends CommandWrappe
|
|||
return new Pair<>(snapshotXml, volumeObjectToNewPathMap);
|
||||
}
|
||||
|
||||
protected void cleanupLeftoverDeltas(List<VolumeObjectTO> volumeObjectTos, Map<String, Pair<Long, String>> mapVolumeToSnapshotSizeAndNewVolumePath, KVMStoragePoolManager storagePoolMgr) {
|
||||
for (VolumeObjectTO volumeObjectTO : volumeObjectTos) {
|
||||
Pair<Long, String> volSizeAndNewPath = mapVolumeToSnapshotSizeAndNewVolumePath.get(volumeObjectTO.getUuid());
|
||||
PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) volumeObjectTO.getDataStore();
|
||||
KVMStoragePool kvmStoragePool = storagePoolMgr.getStoragePool(primaryDataStoreTO.getPoolType(), primaryDataStoreTO.getUuid());
|
||||
|
||||
if (volSizeAndNewPath == null) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
Files.deleteIfExists(Path.of(kvmStoragePool.getLocalPathFor(volSizeAndNewPath.second())));
|
||||
} catch (IOException ex) {
|
||||
logger.warn("Tried to delete leftover snapshot at [{}] failed.", volSizeAndNewPath.second(), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected long getFileSize(String path) {
|
||||
return new File(path).length();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,9 +16,14 @@
|
|||
// under the License.
|
||||
package com.cloud.hypervisor.kvm.storage;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.nio.file.Path;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
|
||||
|
|
@ -95,14 +100,8 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
|
|||
|
||||
String result = iScsiAdmCmd.execute();
|
||||
|
||||
if (result != null) {
|
||||
logger.debug("Failed to add iSCSI target " + volumeUuid);
|
||||
System.out.println("Failed to add iSCSI target " + volumeUuid);
|
||||
|
||||
if (!handleNodeCreateResult(result, volumeUuid)) {
|
||||
return false;
|
||||
} else {
|
||||
logger.debug("Successfully added iSCSI target " + volumeUuid);
|
||||
System.out.println("Successfully added to iSCSI target " + volumeUuid);
|
||||
}
|
||||
|
||||
String chapInitiatorUsername = details.get(DiskTO.CHAP_INITIATOR_USERNAME);
|
||||
|
|
@ -123,24 +122,26 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
|
|||
}
|
||||
}
|
||||
|
||||
// ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10:3260 --login
|
||||
iScsiAdmCmd = new Script(true, "iscsiadm", 0, logger);
|
||||
final String host = pool.getSourceHost();
|
||||
final int port = pool.getSourcePort();
|
||||
final String iqn = getIqn(volumeUuid);
|
||||
|
||||
// Always try to login; treat benign outcomes as success (idempotent)
|
||||
iScsiAdmCmd = new Script(true, "iscsiadm", 0, logger);
|
||||
iScsiAdmCmd.add("-m", "node");
|
||||
iScsiAdmCmd.add("-T", getIqn(volumeUuid));
|
||||
iScsiAdmCmd.add("-p", pool.getSourceHost() + ":" + pool.getSourcePort());
|
||||
iScsiAdmCmd.add("-T", iqn);
|
||||
iScsiAdmCmd.add("-p", host + ":" + port);
|
||||
iScsiAdmCmd.add("--login");
|
||||
|
||||
result = iScsiAdmCmd.execute();
|
||||
|
||||
if (result != null) {
|
||||
logger.debug("Failed to log in to iSCSI target " + volumeUuid);
|
||||
System.out.println("Failed to log in to iSCSI target " + volumeUuid);
|
||||
|
||||
if (!handleLoginResult(result, volumeUuid)) {
|
||||
return false;
|
||||
} else {
|
||||
logger.debug("Successfully logged in to iSCSI target " + volumeUuid);
|
||||
System.out.println("Successfully logged in to iSCSI target " + volumeUuid);
|
||||
}
|
||||
|
||||
// If the session already existed, a newly mapped LUN won't be visible until a rescan.
|
||||
if (result != null) {
|
||||
rescanIscsiSessions(iqn, host, port);
|
||||
}
|
||||
|
||||
// There appears to be a race condition where logging in to the iSCSI volume via iscsiadm
|
||||
|
|
@ -158,8 +159,58 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the result of an iscsiadm node-create command.
|
||||
* Returns true if the node was created or already exists, false on failure.
|
||||
*/
|
||||
boolean handleNodeCreateResult(String result, String volumeUuid) {
|
||||
if (result == null) {
|
||||
logger.debug("Successfully added iSCSI node for target {}", volumeUuid);
|
||||
return true;
|
||||
}
|
||||
String msg = result.toLowerCase();
|
||||
if (msg.contains("already exists") || msg.contains("database exists") || msg.contains("exists")) {
|
||||
logger.debug("iSCSI node already exists for target {}, proceeding", volumeUuid);
|
||||
return true;
|
||||
}
|
||||
logger.debug("Failed to add iSCSI node for target {}: {}", volumeUuid, result);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the result of an iscsiadm login command.
|
||||
* Returns true if the login succeeded or session already exists, false on failure.
|
||||
*/
|
||||
boolean handleLoginResult(String result, String volumeUuid) {
|
||||
if (result == null) {
|
||||
logger.debug("Successfully logged in to iSCSI target {}", volumeUuid);
|
||||
return true;
|
||||
}
|
||||
String msg = result.toLowerCase();
|
||||
if (msg.contains("already present") || msg.contains("already logged in") || msg.contains("session exists")) {
|
||||
logger.debug("iSCSI session already exists for target {}, proceeding", volumeUuid);
|
||||
return true;
|
||||
}
|
||||
logger.debug("Failed to log in to iSCSI target {}: {}", volumeUuid, result);
|
||||
return false;
|
||||
}
|
||||
|
||||
private void rescanIscsiSessions(String iqn, String host, int port) {
|
||||
Script rescanCmd = new Script(true, "iscsiadm", 0, logger);
|
||||
rescanCmd.add("-m", "node");
|
||||
rescanCmd.add("-T", iqn);
|
||||
rescanCmd.add("-p", host + ":" + port);
|
||||
rescanCmd.add("--rescan");
|
||||
String rescanResult = rescanCmd.execute();
|
||||
if (rescanResult != null) {
|
||||
logger.warn("iSCSI session rescan returned: {}", rescanResult);
|
||||
} else {
|
||||
logger.debug("iSCSI session rescan completed successfully for {}@{}:{}", iqn, host, port);
|
||||
}
|
||||
}
|
||||
|
||||
private void waitForDiskToBecomeAvailable(String volumeUuid, KVMStoragePool pool) {
|
||||
int numberOfTries = 10;
|
||||
int numberOfTries = 30;
|
||||
int timeBetweenTries = 1000;
|
||||
|
||||
while (getPhysicalDisk(volumeUuid, pool).getSize() == 0 && numberOfTries > 0) {
|
||||
|
|
@ -238,6 +289,15 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
|
|||
}
|
||||
|
||||
private long getDeviceSize(String deviceByPath) {
|
||||
try {
|
||||
if (!Files.exists(Paths.get(deviceByPath))) {
|
||||
logger.debug("Device by-path does not exist yet: " + deviceByPath);
|
||||
return 0L;
|
||||
}
|
||||
} catch (Exception ignore) {
|
||||
// If FS check fails for any reason, fall back to blockdev call
|
||||
}
|
||||
|
||||
Script iScsiAdmCmd = new Script(true, "blockdev", 0, logger);
|
||||
|
||||
iScsiAdmCmd.add("--getsize64", deviceByPath);
|
||||
|
|
@ -280,8 +340,96 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
|
|||
return tmp[index].trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if there are other LUNs on the same iSCSI target (IQN) that are still
|
||||
* visible as block devices. This is needed because ONTAP uses a single IQN per
|
||||
* SVM — logging out of the target would kill ALL LUNs, not just the one being
|
||||
* disconnected.
|
||||
*
|
||||
* Checks /dev/disk/by-path/ for symlinks matching the same host:port + IQN but
|
||||
* with a different LUN number.
|
||||
*/
|
||||
private boolean hasOtherActiveLuns(String host, int port, String iqn, String lun) {
|
||||
String prefix = "ip-" + host + ":" + port + "-iscsi-" + iqn + "-lun-";
|
||||
File byPathDir = new File("/dev/disk/by-path");
|
||||
if (!byPathDir.exists() || !byPathDir.isDirectory()) {
|
||||
return false;
|
||||
}
|
||||
File[] entries = byPathDir.listFiles();
|
||||
if (entries == null) {
|
||||
return false;
|
||||
}
|
||||
for (File entry : entries) {
|
||||
String name = entry.getName();
|
||||
// Skip partition entries (e.g. lun-0-part1, lun-0-part2) — these are not
|
||||
// independent LUNs, they are partition symlinks for the same LUN disk.
|
||||
// Only count actual LUN entries (no "-part" suffix after the lun number).
|
||||
if (name.startsWith(prefix) && !name.equals(prefix + lun) && !name.contains("-part")) {
|
||||
logger.debug("Found other active LUN on same target: " + name);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes a single stale SCSI device from the kernel using the sysfs interface.
|
||||
*
|
||||
* When ONTAP unmaps a LUN from the host's igroup, the by-path symlink and the
|
||||
* underlying SCSI device (/dev/sdX) remain present in the kernel until explicitly
|
||||
* removed — the kernel does not auto-remove devices from live iSCSI sessions.
|
||||
*
|
||||
* This method resolves the by-path symlink to the real block device name (e.g. sdd),
|
||||
* then writes "1" to /sys/block/<dev>/device/delete — the standard Linux kernel SCSI
|
||||
* API for removing a single device without tearing down the entire iSCSI session.
|
||||
* Once the kernel processes the delete, it also removes the by-path symlink.
|
||||
*
|
||||
* This is used instead of iscsiadm --logout when other LUNs on the same IQN are still
|
||||
* active (ONTAP single-IQN-per-SVM model), since logout would tear down ALL LUNs.
|
||||
*/
|
||||
private void removeStaleScsiDevice(String host, int port, String iqn, String lun) {
|
||||
String byPath = getByPath(host, port, "/" + iqn + "/" + lun);
|
||||
Path byPathLink = Paths.get(byPath);
|
||||
if (!Files.exists(byPathLink)) {
|
||||
logger.debug("by-path entry for LUN " + lun + " already gone, nothing to remove");
|
||||
return;
|
||||
}
|
||||
try {
|
||||
Path realDevice = byPathLink.toRealPath();
|
||||
String devName = realDevice.getFileName().toString();
|
||||
File deleteFile = new File("/sys/block/" + devName + "/device/delete");
|
||||
if (!deleteFile.exists()) {
|
||||
logger.warn("sysfs delete entry not found for device " + devName + " — cannot remove stale SCSI device");
|
||||
return;
|
||||
}
|
||||
try (FileWriter fw = new FileWriter(deleteFile)) {
|
||||
fw.write("1");
|
||||
}
|
||||
logger.info("Removed stale SCSI device " + devName + " for LUN /" + iqn + "/" + lun + " via sysfs");
|
||||
} catch (Exception e) {
|
||||
logger.warn("Failed to remove stale SCSI device for LUN /" + iqn + "/" + lun + ": " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private boolean disconnectPhysicalDisk(String host, int port, String iqn, String lun) {
|
||||
// use iscsiadm to log out of the iSCSI target and un-discover it
|
||||
// Check if other LUNs on the same IQN target are still in use.
|
||||
// ONTAP (and similar) uses a single IQN per SVM with multiple LUNs.
|
||||
// Doing iscsiadm --logout tears down the ENTIRE target session,
|
||||
// which would destroy access to ALL LUNs — not just the one being disconnected.
|
||||
if (hasOtherActiveLuns(host, port, iqn, lun)) {
|
||||
logger.info("Skipping iSCSI logout for /" + iqn + "/" + lun +
|
||||
" — other LUNs on the same target are still active. Removing stale SCSI device for this LUN only.");
|
||||
removeStaleScsiDevice(host, port, iqn, lun);
|
||||
// After removing this LUN's device, re-check: if no other LUNs remain active,
|
||||
// If it is the last one then must logout to clean up the iSCSI session entirely.
|
||||
if (hasOtherActiveLuns(host, port, iqn, lun)) {
|
||||
logger.info("Other LUNs still active after removing /" + iqn + "/" + lun + " — session kept alive.");
|
||||
return true;
|
||||
}
|
||||
logger.info("No more active LUNs on target after removing /" + iqn + "/" + lun + " — proceeding with iSCSI logout.");
|
||||
}
|
||||
|
||||
// No other LUNs active on this target — safe to logout and delete the node record.
|
||||
|
||||
// ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10:3260 --logout
|
||||
Script iScsiAdmCmd = new Script(true, "iscsiadm", 0, logger);
|
||||
|
|
@ -422,6 +570,19 @@ public class IscsiAdmStorageAdaptor implements StorageAdaptor {
|
|||
try {
|
||||
QemuImg q = new QemuImg(timeout);
|
||||
q.convert(srcFile, destFile);
|
||||
// Below fix is required when vendor depends on host based copy rather than storage CAN_CREATE_VOLUME_FROM_VOLUME capability
|
||||
// When host based template copy is triggered , small size template sits in RAM(depending on host memory and RAM) and copy is marked successful and by the time flush to storage is triggered
|
||||
// disconnectPhysicalDisk would disconnect the lun , hence template staying in RAM is not copied to storage lun. Below does flushing of data to storage and marking
|
||||
// copy as successful once flush is complete.
|
||||
Script flushCmd = new Script(true, "blockdev", 0, logger);
|
||||
flushCmd.add("--flushbufs", destDisk.getPath());
|
||||
String flushResult = flushCmd.execute();
|
||||
if (flushResult != null) {
|
||||
logger.warn("iSCSI copyPhysicalDisk: blockdev --flushbufs returned: {}", flushResult);
|
||||
}
|
||||
Script syncCmd = new Script(true, "sync", 0, logger);
|
||||
syncCmd.execute();
|
||||
logger.info("iSCSI copyPhysicalDisk: flush/sync completed ");
|
||||
} catch (QemuImgException | LibvirtException ex) {
|
||||
String msg = "Failed to copy data from " + srcDisk.getPath() + " to " +
|
||||
destDisk.getPath() + ". The error was the following: " + ex.getMessage();
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@
|
|||
<junit-jupiter.version>5.8.1</junit-jupiter.version>
|
||||
<mockito.version>3.12.4</mockito.version>
|
||||
<mockito-junit-jupiter.version>5.2.0</mockito-junit-jupiter.version>
|
||||
<byte-buddy-agent.version>1.11.13</byte-buddy-agent.version>
|
||||
</properties>
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
|
|
@ -121,12 +122,24 @@
|
|||
<version>${mockito.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.bytebuddy</groupId>
|
||||
<artifactId>byte-buddy-agent</artifactId>
|
||||
<version>${byte-buddy-agent.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.assertj</groupId>
|
||||
<artifactId>assertj-core</artifactId>
|
||||
<version>${assertj.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-engine-storage-snapshot</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<repositories>
|
||||
<repository>
|
||||
|
|
@ -151,6 +164,7 @@
|
|||
<version>${maven-surefire-plugin.version}</version>
|
||||
<configuration>
|
||||
<skipTests>false</skipTests>
|
||||
<argLine>-javaagent:${settings.localRepository}/net/bytebuddy/byte-buddy-agent/${byte-buddy-agent.version}/byte-buddy-agent-${byte-buddy-agent.version}.jar</argLine>
|
||||
<includes>
|
||||
<include>**/*Test.java</include>
|
||||
</includes>
|
||||
|
|
|
|||
|
|
@ -18,13 +18,26 @@
|
|||
*/
|
||||
package org.apache.cloudstack.storage.driver;
|
||||
|
||||
import org.apache.cloudstack.storage.utils.OntapStorageConstants;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.to.DataObjectType;
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.storage.VolumeDetailVO;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.ScopeType;
|
||||
import com.cloud.storage.dao.SnapshotDetailsDao;
|
||||
import com.cloud.storage.dao.SnapshotDetailsVO;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.storage.dao.VolumeDetailsDao;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
|
||||
|
|
@ -37,23 +50,53 @@ import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
|
|||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
import org.apache.cloudstack.storage.command.CommandResult;
|
||||
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.feign.client.SnapshotFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.model.FlexVolSnapshot;
|
||||
import org.apache.cloudstack.storage.feign.model.Lun;
|
||||
import org.apache.cloudstack.storage.feign.model.response.JobResponse;
|
||||
import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
|
||||
import org.apache.cloudstack.storage.service.SANStrategy;
|
||||
import org.apache.cloudstack.storage.service.StorageStrategy;
|
||||
import org.apache.cloudstack.storage.service.UnifiedSANStrategy;
|
||||
import org.apache.cloudstack.storage.service.model.AccessGroup;
|
||||
import org.apache.cloudstack.storage.service.model.CloudStackVolume;
|
||||
import org.apache.cloudstack.storage.service.model.ProtocolType;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.storage.utils.OntapStorageUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Primary datastore driver for NetApp ONTAP storage systems.
|
||||
* Handles volume lifecycle operations for iSCSI and NFS protocols.
|
||||
*/
|
||||
public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(OntapPrimaryDatastoreDriver.class);
|
||||
|
||||
@Inject private StoragePoolDetailsDao storagePoolDetailsDao;
|
||||
@Inject private PrimaryDataStoreDao storagePoolDao;
|
||||
@Inject private VolumeDao volumeDao;
|
||||
@Inject private VolumeDetailsDao volumeDetailsDao;
|
||||
@Inject private SnapshotDetailsDao snapshotDetailsDao;
|
||||
|
||||
@Override
|
||||
public Map<String, String> getCapabilities() {
|
||||
logger.trace("OntapPrimaryDatastoreDriver: getCapabilities: Called");
|
||||
Map<String, String> mapCapabilities = new HashMap<>();
|
||||
mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.FALSE.toString());
|
||||
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.FALSE.toString());
|
||||
|
||||
mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString());
|
||||
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
|
||||
return mapCapabilities;
|
||||
}
|
||||
|
||||
|
|
@ -66,13 +109,221 @@ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver {
|
|||
public DataStoreTO getStoreTO(DataStore store) { return null; }
|
||||
|
||||
@Override
|
||||
public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback<CreateCmdResult> callback) {
|
||||
throw new UnsupportedOperationException("Create operation is not supported for ONTAP primary storage.");
|
||||
public boolean volumesRequireGrantAccessWhenUsed() {
|
||||
logger.info("OntapPrimaryDatastoreDriver: volumesRequireGrantAccessWhenUsed: Called");
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a volume on the ONTAP storage system.
|
||||
*/
|
||||
@Override
|
||||
public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback<CreateCmdResult> callback) {
|
||||
CreateCmdResult createCmdResult = null;
|
||||
String errMsg;
|
||||
|
||||
if (dataObject == null) {
|
||||
throw new InvalidParameterValueException("dataObject should not be null");
|
||||
}
|
||||
if (dataStore == null) {
|
||||
throw new InvalidParameterValueException("dataStore should not be null");
|
||||
}
|
||||
if (callback == null) {
|
||||
throw new InvalidParameterValueException("callback should not be null");
|
||||
}
|
||||
|
||||
try {
|
||||
logger.info("Started for data store name [{}] and data object name [{}] of type [{}]",
|
||||
dataStore.getName(), dataObject.getName(), dataObject.getType());
|
||||
|
||||
StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
|
||||
if (storagePool == null) {
|
||||
logger.error("createAsync: Storage Pool not found for id: " + dataStore.getId());
|
||||
throw new CloudRuntimeException("Storage Pool not found for id: " + dataStore.getId());
|
||||
}
|
||||
|
||||
Map<String, String> details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId());
|
||||
|
||||
if (dataObject.getType() == DataObjectType.VOLUME) {
|
||||
VolumeInfo volInfo = (VolumeInfo) dataObject;
|
||||
|
||||
// Create the backend storage object (LUN for iSCSI, no-op for NFS)
|
||||
CloudStackVolume created = createCloudStackVolume(storagePool, volInfo, details);
|
||||
|
||||
// Update CloudStack volume record with storage pool association and protocol-specific details
|
||||
VolumeVO volumeVO = volumeDao.findById(volInfo.getId());
|
||||
if (volumeVO != null) {
|
||||
volumeVO.setPoolType(storagePool.getPoolType());
|
||||
volumeVO.setPoolId(storagePool.getId());
|
||||
|
||||
if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(OntapStorageConstants.PROTOCOL))) {
|
||||
String lunName = created != null && created.getLun() != null ? created.getLun().getName() : null;
|
||||
if (lunName == null) {
|
||||
throw new CloudRuntimeException("Missing LUN name for volume " + volInfo.getId());
|
||||
}
|
||||
|
||||
// Persist LUN details for future operations (delete, grant/revoke access)
|
||||
volumeDetailsDao.addDetail(volInfo.getId(), OntapStorageConstants.LUN_DOT_UUID, created.getLun().getUuid(), false);
|
||||
volumeDetailsDao.addDetail(volInfo.getId(), OntapStorageConstants.LUN_DOT_NAME, lunName, false);
|
||||
if (created.getLun().getUuid() != null) {
|
||||
volumeVO.setFolder(created.getLun().getUuid());
|
||||
}
|
||||
|
||||
logger.info("createAsync: Created LUN [{}] for volume [{}]. LUN mapping will occur during grantAccess() to per-host igroup.",
|
||||
lunName, volumeVO.getId());
|
||||
createCmdResult = new CreateCmdResult(lunName, new Answer(null, true, null));
|
||||
} else if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(OntapStorageConstants.PROTOCOL))) {
|
||||
createCmdResult = new CreateCmdResult(volInfo.getUuid(), new Answer(null, true, null));
|
||||
logger.info("createAsync: Managed NFS volume [{}] with path [{}] associated with pool {}",
|
||||
volumeVO.getId(), volInfo.getUuid(), storagePool.getId());
|
||||
}
|
||||
volumeDao.update(volumeVO.getId(), volumeVO);
|
||||
}
|
||||
} else {
|
||||
errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync";
|
||||
logger.error(errMsg);
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
errMsg = e.getMessage();
|
||||
logger.error("createAsync: Failed for dataObject name [{}]: {}", dataObject.getName(), errMsg);
|
||||
createCmdResult = new CreateCmdResult(null, new Answer(null, false, errMsg));
|
||||
createCmdResult.setResult(e.toString());
|
||||
} finally {
|
||||
if (createCmdResult != null && createCmdResult.isSuccess()) {
|
||||
logger.info("createAsync: Operation completed successfully for {}", dataObject.getType());
|
||||
}
|
||||
callback.complete(createCmdResult);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a volume on the ONTAP backend.
|
||||
*/
|
||||
private CloudStackVolume createCloudStackVolume(StoragePoolVO storagePool, VolumeInfo volumeObject, Map<String, String> details) {
|
||||
StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(details);
|
||||
CloudStackVolume cloudStackVolumeRequest = OntapStorageUtils.createCloudStackVolumeRequestByProtocol(storagePool, details, volumeObject);
|
||||
return storageStrategy.createCloudStackVolume(cloudStackVolumeRequest);
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a volume or snapshot from the ONTAP storage system.
|
||||
*
|
||||
* <p>For volumes, deletes the backend storage object (LUN for iSCSI, no-op for NFS).
|
||||
* For snapshots, deletes the FlexVolume snapshot from ONTAP that was created by takeSnapshot.</p>
|
||||
*/
|
||||
@Override
|
||||
public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallback<CommandResult> callback) {
|
||||
throw new UnsupportedOperationException("Delete operation is not supported for ONTAP primary storage.");
|
||||
CommandResult commandResult = new CommandResult();
|
||||
try {
|
||||
if (store == null || data == null) {
|
||||
throw new CloudRuntimeException("store or data is null");
|
||||
}
|
||||
|
||||
if (data.getType() == DataObjectType.VOLUME) {
|
||||
StoragePoolVO storagePool = storagePoolDao.findById(store.getId());
|
||||
if (storagePool == null) {
|
||||
logger.error("deleteAsync: Storage Pool not found for id: " + store.getId());
|
||||
throw new CloudRuntimeException("Storage Pool not found for id: " + store.getId());
|
||||
}
|
||||
Map<String, String> details = storagePoolDetailsDao.listDetailsKeyPairs(store.getId());
|
||||
StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(details);
|
||||
logger.info("createCloudStackVolumeForTypeVolume: Connection to Ontap SVM [{}] successful, preparing CloudStackVolumeRequest", details.get(OntapStorageConstants.SVM_NAME));
|
||||
VolumeInfo volumeInfo = (VolumeInfo) data;
|
||||
CloudStackVolume cloudStackVolumeRequest = createDeleteCloudStackVolumeRequest(storagePool, details, volumeInfo);
|
||||
storageStrategy.deleteCloudStackVolume(cloudStackVolumeRequest);
|
||||
logger.info("deleteAsync: Volume deleted: " + volumeInfo.getId());
|
||||
commandResult.setResult(null);
|
||||
commandResult.setSuccess(true);
|
||||
} else if (data.getType() == DataObjectType.SNAPSHOT) {
|
||||
// Delete the ONTAP FlexVolume snapshot that was created by takeSnapshot
|
||||
deleteOntapSnapshot((SnapshotInfo) data, commandResult);
|
||||
} else {
|
||||
throw new CloudRuntimeException("Unsupported data object type: " + data.getType());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("deleteAsync: Failed for data object [{}]: {}", data, e.getMessage());
|
||||
commandResult.setSuccess(false);
|
||||
commandResult.setResult(e.getMessage());
|
||||
} finally {
|
||||
callback.complete(commandResult);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes an ONTAP FlexVolume snapshot.
|
||||
*
|
||||
* <p>Retrieves the snapshot details stored during takeSnapshot and calls the ONTAP
|
||||
* REST API to delete the FlexVolume snapshot.</p>
|
||||
*
|
||||
* @param snapshotInfo The CloudStack snapshot to delete
|
||||
* @param commandResult Result object to populate with success/failure
|
||||
*/
|
||||
private void deleteOntapSnapshot(SnapshotInfo snapshotInfo, CommandResult commandResult) {
|
||||
long snapshotId = snapshotInfo.getId();
|
||||
logger.info("deleteOntapSnapshot: Deleting ONTAP FlexVolume snapshot for CloudStack snapshot [{}]", snapshotId);
|
||||
|
||||
try {
|
||||
// Retrieve snapshot details stored during takeSnapshot
|
||||
String flexVolUuid = getSnapshotDetail(snapshotId, OntapStorageConstants.BASE_ONTAP_FV_ID);
|
||||
String ontapSnapshotUuid = getSnapshotDetail(snapshotId, OntapStorageConstants.ONTAP_SNAP_ID);
|
||||
String snapshotName = getSnapshotDetail(snapshotId, OntapStorageConstants.ONTAP_SNAP_NAME);
|
||||
String poolIdStr = getSnapshotDetail(snapshotId, OntapStorageConstants.PRIMARY_POOL_ID);
|
||||
|
||||
if (flexVolUuid == null || ontapSnapshotUuid == null) {
|
||||
logger.warn("deleteOntapSnapshot: Missing ONTAP snapshot details for snapshot [{}]. " +
|
||||
"flexVolUuid={}, ontapSnapshotUuid={}. Snapshot may have been created by a different method or already deleted.",
|
||||
snapshotId, flexVolUuid, ontapSnapshotUuid);
|
||||
// Consider this a success since there's nothing to delete on ONTAP
|
||||
commandResult.setSuccess(true);
|
||||
commandResult.setResult(null);
|
||||
return;
|
||||
}
|
||||
|
||||
long poolId = Long.parseLong(poolIdStr);
|
||||
Map<String, String> poolDetails = storagePoolDetailsDao.listDetailsKeyPairs(poolId);
|
||||
|
||||
StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(poolDetails);
|
||||
SnapshotFeignClient snapshotClient = storageStrategy.getSnapshotFeignClient();
|
||||
String authHeader = storageStrategy.getAuthHeader();
|
||||
|
||||
logger.info("deleteOntapSnapshot: Deleting ONTAP snapshot [{}] (uuid={}) from FlexVol [{}]",
|
||||
snapshotName, ontapSnapshotUuid, flexVolUuid);
|
||||
|
||||
// Call ONTAP REST API to delete the snapshot
|
||||
JobResponse jobResponse = snapshotClient.deleteSnapshot(authHeader, flexVolUuid, ontapSnapshotUuid);
|
||||
|
||||
if (jobResponse != null && jobResponse.getJob() != null) {
|
||||
// Poll for job completion
|
||||
Boolean jobSucceeded = storageStrategy.jobPollForSuccess(jobResponse.getJob().getUuid(), 30, 2000);
|
||||
if (!jobSucceeded) {
|
||||
throw new CloudRuntimeException("Delete job failed for snapshot [" +
|
||||
snapshotName + "] on FlexVol [" + flexVolUuid + "]");
|
||||
}
|
||||
}
|
||||
|
||||
logger.info("deleteOntapSnapshot: Successfully deleted ONTAP snapshot [{}] (uuid={}) for CloudStack snapshot [{}]",
|
||||
snapshotName, ontapSnapshotUuid, snapshotId);
|
||||
|
||||
commandResult.setSuccess(true);
|
||||
commandResult.setResult(null);
|
||||
|
||||
} catch (Exception e) {
|
||||
// Check if the error indicates snapshot doesn't exist (already deleted)
|
||||
String errorMsg = e.getMessage();
|
||||
if (errorMsg != null && (errorMsg.contains("404") || errorMsg.contains("not found") ||
|
||||
errorMsg.contains("does not exist"))) {
|
||||
logger.warn("deleteOntapSnapshot: ONTAP snapshot for CloudStack snapshot [{}] not found, " +
|
||||
"may have been already deleted. Treating as success.", snapshotId);
|
||||
commandResult.setSuccess(true);
|
||||
commandResult.setResult(null);
|
||||
} else {
|
||||
logger.error("deleteOntapSnapshot: Failed to delete ONTAP snapshot for CloudStack snapshot [{}]: {}",
|
||||
snapshotId, e.getMessage(), e);
|
||||
commandResult.setSuccess(false);
|
||||
commandResult.setResult(e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -98,14 +349,231 @@ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver {
|
|||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Grants a host access to a volume.
|
||||
*/
|
||||
@Override
|
||||
public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) {
|
||||
return false;
|
||||
try {
|
||||
if (dataStore == null) {
|
||||
throw new InvalidParameterValueException("dataStore should not be null");
|
||||
}
|
||||
if (dataObject == null) {
|
||||
throw new InvalidParameterValueException("dataObject should not be null");
|
||||
}
|
||||
if (host == null) {
|
||||
throw new InvalidParameterValueException("host should not be null");
|
||||
}
|
||||
|
||||
StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
|
||||
if (storagePool == null) {
|
||||
logger.error("grantAccess: Storage Pool not found for id: " + dataStore.getId());
|
||||
throw new CloudRuntimeException("Storage Pool not found for id: " + dataStore.getId());
|
||||
}
|
||||
|
||||
// ONTAP managed storage only supports cluster and zone scoped pools
|
||||
if (storagePool.getScope() != ScopeType.CLUSTER && storagePool.getScope() != ScopeType.ZONE) {
|
||||
logger.error("grantAccess: Only Cluster and Zone scoped primary storage is supported for storage Pool: " + storagePool.getName());
|
||||
throw new CloudRuntimeException("Only Cluster and Zone scoped primary storage is supported for Storage Pool: " + storagePool.getName());
|
||||
}
|
||||
|
||||
if (dataObject.getType() == DataObjectType.VOLUME) {
|
||||
VolumeVO volumeVO = volumeDao.findById(dataObject.getId());
|
||||
if (volumeVO == null) {
|
||||
logger.error("grantAccess: CloudStack Volume not found for id: " + dataObject.getId());
|
||||
throw new CloudRuntimeException("CloudStack Volume not found for id: " + dataObject.getId());
|
||||
}
|
||||
|
||||
Map<String, String> details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId());
|
||||
String svmName = details.get(OntapStorageConstants.SVM_NAME);
|
||||
|
||||
if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(OntapStorageConstants.PROTOCOL))) {
|
||||
// Only retrieve LUN name for iSCSI volumes
|
||||
String cloudStackVolumeName = volumeDetailsDao.findDetail(volumeVO.getId(), OntapStorageConstants.LUN_DOT_NAME).getValue();
|
||||
UnifiedSANStrategy sanStrategy = (UnifiedSANStrategy) OntapStorageUtils.getStrategyByStoragePoolDetails(details);
|
||||
String accessGroupName = OntapStorageUtils.getIgroupName(svmName, host.getName());
|
||||
|
||||
// Validate if Igroup exist ONTAP for this host as we may be using delete_on_unmap= true and igroup may be deleted by ONTAP automatically
|
||||
Map<String, String> getAccessGroupMap = Map.of(
|
||||
OntapStorageConstants.NAME, accessGroupName,
|
||||
OntapStorageConstants.SVM_DOT_NAME, svmName
|
||||
);
|
||||
AccessGroup accessGroup = sanStrategy.getAccessGroup(getAccessGroupMap);
|
||||
if(accessGroup == null || accessGroup.getIgroup() == null) {
|
||||
logger.info("grantAccess: Igroup {} does not exist for the host {} : Need to create Igroup for the host ", accessGroupName, host.getName());
|
||||
// create the igroup for the host and perform lun-mapping
|
||||
accessGroup = new AccessGroup();
|
||||
List<HostVO> hosts = new ArrayList<>();
|
||||
hosts.add((HostVO) host);
|
||||
accessGroup.setHostsToConnect(hosts);
|
||||
accessGroup.setStoragePoolId(storagePool.getId());
|
||||
accessGroup = sanStrategy.createAccessGroup(accessGroup);
|
||||
}else{
|
||||
logger.info("grantAccess: Igroup {} already exist for the host {}: ", accessGroup.getIgroup().getName() ,host.getName());
|
||||
/* TODO Below cases will be covered later, for now they will be a pre-requisite on customer side
|
||||
1. Igroup exist with the same name but host initiator has been rempved
|
||||
2. Igroup exist with the same name but host initiator has been changed may be due to new NIC or new adapter
|
||||
In both cases we need to verify current host initiator is registered in the igroup before allowing access
|
||||
Incase it is not , add it and proceed for lun-mapping
|
||||
*/
|
||||
}
|
||||
logger.info("grantAccess: Igroup {} is present now with initiators {} ", accessGroup.getIgroup().getName(), accessGroup.getIgroup().getInitiators());
|
||||
// Create or retrieve existing LUN mapping
|
||||
String lunNumber = sanStrategy.ensureLunMapped(svmName, cloudStackVolumeName, accessGroupName);
|
||||
|
||||
// Update volume path if changed (e.g., after migration or re-mapping)
|
||||
String iscsiPath = OntapStorageConstants.SLASH + storagePool.getPath() + OntapStorageConstants.SLASH + lunNumber;
|
||||
if (volumeVO.getPath() == null || !volumeVO.getPath().equals(iscsiPath)) {
|
||||
volumeVO.set_iScsiName(iscsiPath);
|
||||
volumeVO.setPath(iscsiPath);
|
||||
}
|
||||
} else if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(OntapStorageConstants.PROTOCOL))) {
|
||||
// For NFS, no access grant needed - file is accessible via mount
|
||||
logger.debug("grantAccess: NFS volume [{}], no igroup mapping required", volumeVO.getUuid());
|
||||
return true;
|
||||
}
|
||||
volumeVO.setPoolType(storagePool.getPoolType());
|
||||
volumeVO.setPoolId(storagePool.getId());
|
||||
volumeDao.update(volumeVO.getId(), volumeVO);
|
||||
} else {
|
||||
logger.error("Invalid DataObjectType (" + dataObject.getType() + ") passed to grantAccess");
|
||||
throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to grantAccess");
|
||||
}
|
||||
return true;
|
||||
} catch (Exception e) {
|
||||
logger.error("grantAccess: Failed for dataObject [{}]: {}", dataObject, e.getMessage());
|
||||
throw new CloudRuntimeException("Failed with error: " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Revokes a host's access to a volume.
|
||||
*/
|
||||
@Override
|
||||
public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) {
|
||||
throw new UnsupportedOperationException("Revoke access operation is not supported for ONTAP primary storage.");
|
||||
try {
|
||||
if (dataStore == null) {
|
||||
throw new InvalidParameterValueException("dataStore should not be null");
|
||||
}
|
||||
if (dataObject == null) {
|
||||
throw new InvalidParameterValueException("dataObject should not be null");
|
||||
}
|
||||
if (host == null) {
|
||||
throw new InvalidParameterValueException("host should not be null");
|
||||
}
|
||||
|
||||
StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
|
||||
if (storagePool == null) {
|
||||
logger.error("revokeAccess: Storage Pool not found for id: " + dataStore.getId());
|
||||
throw new CloudRuntimeException("Storage Pool not found for id: " + dataStore.getId());
|
||||
}
|
||||
|
||||
if (storagePool.getScope() != ScopeType.CLUSTER && storagePool.getScope() != ScopeType.ZONE) {
|
||||
logger.error("revokeAccess: Only Cluster and Zone scoped primary storage is supported for storage Pool: " + storagePool.getName());
|
||||
throw new CloudRuntimeException("Only Cluster and Zone scoped primary storage is supported for Storage Pool: " + storagePool.getName());
|
||||
}
|
||||
|
||||
if (dataObject.getType() == DataObjectType.VOLUME) {
|
||||
VolumeVO volumeVO = volumeDao.findById(dataObject.getId());
|
||||
if (volumeVO == null) {
|
||||
logger.error("revokeAccess: CloudStack Volume not found for id: " + dataObject.getId());
|
||||
throw new CloudRuntimeException("CloudStack Volume not found for id: " + dataObject.getId());
|
||||
}
|
||||
revokeAccessForVolume(storagePool, volumeVO, host);
|
||||
} else {
|
||||
logger.error("revokeAccess: Invalid DataObjectType (" + dataObject.getType() + ") passed to revokeAccess");
|
||||
throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to revokeAccess");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("revokeAccess: Failed for dataObject [{}]: {}", dataObject, e.getMessage());
|
||||
throw new CloudRuntimeException("Failed with error: " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Revokes volume access for the specified host.
|
||||
*/
|
||||
private void revokeAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, Host host) {
|
||||
logger.info("revokeAccessForVolume: Revoking access to volume [{}] for host [{}]", volumeVO.getName(), host.getName());
|
||||
|
||||
Map<String, String> details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId());
|
||||
StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(details);
|
||||
String svmName = details.get(OntapStorageConstants.SVM_NAME);
|
||||
|
||||
if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(OntapStorageConstants.PROTOCOL))) {
|
||||
String accessGroupName = OntapStorageUtils.getIgroupName(svmName, host.getName());
|
||||
|
||||
// Retrieve LUN name from volume details; if missing, volume may not have been fully created
|
||||
VolumeDetailVO lunDetail = volumeDetailsDao.findDetail(volumeVO.getId(), OntapStorageConstants.LUN_DOT_NAME);
|
||||
String lunName = lunDetail != null ? lunDetail.getValue() : null;
|
||||
if (lunName == null) {
|
||||
logger.warn("revokeAccessForVolume: No LUN name found for volume [{}]; skipping revoke", volumeVO.getId());
|
||||
return;
|
||||
}
|
||||
|
||||
// Verify LUN still exists on ONTAP (may have been manually deleted)
|
||||
CloudStackVolume cloudStackVolume = getCloudStackVolumeByName(storageStrategy, svmName, lunName);
|
||||
if (cloudStackVolume == null || cloudStackVolume.getLun() == null || cloudStackVolume.getLun().getUuid() == null) {
|
||||
logger.warn("revokeAccessForVolume: LUN for volume [{}] not found on ONTAP, skipping revoke", volumeVO.getId());
|
||||
return;
|
||||
}
|
||||
|
||||
// Verify igroup still exists on ONTAP
|
||||
AccessGroup accessGroup = getAccessGroupByName(storageStrategy, svmName, accessGroupName);
|
||||
if (accessGroup == null || accessGroup.getIgroup() == null || accessGroup.getIgroup().getUuid() == null) {
|
||||
logger.warn("revokeAccessForVolume: iGroup [{}] not found on ONTAP, skipping revoke", accessGroupName);
|
||||
return;
|
||||
}
|
||||
|
||||
// Verify host initiator is in the igroup before attempting to remove mapping
|
||||
SANStrategy sanStrategy = (UnifiedSANStrategy) storageStrategy;
|
||||
if (!sanStrategy.validateInitiatorInAccessGroup(host.getStorageUrl(), svmName, accessGroup.getIgroup())) {
|
||||
logger.warn("revokeAccessForVolume: Initiator [{}] is not in iGroup [{}], skipping revoke",
|
||||
host.getStorageUrl(), accessGroupName);
|
||||
return;
|
||||
}
|
||||
|
||||
// Remove the LUN mapping from the igroup
|
||||
Map<String, String> disableLogicalAccessMap = new HashMap<>();
|
||||
disableLogicalAccessMap.put(OntapStorageConstants.LUN_DOT_UUID, cloudStackVolume.getLun().getUuid());
|
||||
disableLogicalAccessMap.put(OntapStorageConstants.IGROUP_DOT_UUID, accessGroup.getIgroup().getUuid());
|
||||
storageStrategy.disableLogicalAccess(disableLogicalAccessMap);
|
||||
|
||||
logger.info("revokeAccessForVolume: Successfully revoked access to LUN [{}] for host [{}]",
|
||||
lunName, host.getName());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves a volume from ONTAP by name.
|
||||
*/
|
||||
private CloudStackVolume getCloudStackVolumeByName(StorageStrategy storageStrategy, String svmName, String cloudStackVolumeName) {
|
||||
Map<String, String> getCloudStackVolumeMap = new HashMap<>();
|
||||
getCloudStackVolumeMap.put(OntapStorageConstants.NAME, cloudStackVolumeName);
|
||||
getCloudStackVolumeMap.put(OntapStorageConstants.SVM_DOT_NAME, svmName);
|
||||
|
||||
CloudStackVolume cloudStackVolume = storageStrategy.getCloudStackVolume(getCloudStackVolumeMap);
|
||||
if (cloudStackVolume == null || cloudStackVolume.getLun() == null || cloudStackVolume.getLun().getName() == null) {
|
||||
logger.warn("getCloudStackVolumeByName: LUN [{}] not found on ONTAP", cloudStackVolumeName);
|
||||
return null;
|
||||
}
|
||||
return cloudStackVolume;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves an access group from ONTAP by name.
|
||||
*/
|
||||
private AccessGroup getAccessGroupByName(StorageStrategy storageStrategy, String svmName, String accessGroupName) {
|
||||
Map<String, String> getAccessGroupMap = new HashMap<>();
|
||||
getAccessGroupMap.put(OntapStorageConstants.NAME, accessGroupName);
|
||||
getAccessGroupMap.put(OntapStorageConstants.SVM_DOT_NAME, svmName);
|
||||
|
||||
AccessGroup accessGroup = storageStrategy.getAccessGroup(getAccessGroupMap);
|
||||
if (accessGroup == null || accessGroup.getIgroup() == null || accessGroup.getIgroup().getName() == null) {
|
||||
logger.warn("getAccessGroupByName: iGroup [{}] not found on ONTAP", accessGroupName);
|
||||
return null;
|
||||
}
|
||||
return accessGroup;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -128,11 +596,267 @@ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver {
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Takes a snapshot by creating an ONTAP FlexVolume-level snapshot.
|
||||
*
|
||||
* <p>This method creates a point-in-time, space-efficient snapshot of the entire
|
||||
* FlexVolume containing the CloudStack volume. FlexVolume snapshots are atomic
|
||||
* and capture all files/LUNs within the volume at the moment of creation.</p>
|
||||
*
|
||||
* <p>Both NFS and iSCSI protocols use the same FlexVolume snapshot approach:
|
||||
* <ul>
|
||||
* <li>NFS: The QCOW2 file is captured within the FlexVolume snapshot</li>
|
||||
* <li>iSCSI: The LUN is captured within the FlexVolume snapshot</li>
|
||||
* </ul>
|
||||
* </p>
|
||||
*
|
||||
* <p>With {@code STORAGE_SYSTEM_SNAPSHOT=true}, {@code StorageSystemSnapshotStrategy}
|
||||
* handles the workflow.</p>
|
||||
*/
|
||||
@Override
|
||||
public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CreateCmdResult> callback) {}
|
||||
public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback<CreateCmdResult> callback) {
|
||||
logger.info("OntapPrimaryDatastoreDriver.takeSnapshot: Creating FlexVolume snapshot for snapshot [{}]", snapshot.getId());
|
||||
CreateCmdResult result;
|
||||
|
||||
try {
|
||||
VolumeInfo volumeInfo = snapshot.getBaseVolume();
|
||||
|
||||
VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId());
|
||||
if (volumeVO == null) {
|
||||
throw new CloudRuntimeException("VolumeVO not found for id: " + volumeInfo.getId());
|
||||
}
|
||||
|
||||
StoragePoolVO storagePool = storagePoolDao.findById(volumeVO.getPoolId());
|
||||
if (storagePool == null) {
|
||||
logger.error("takeSnapshot: Storage Pool not found for id: {}", volumeVO.getPoolId());
|
||||
throw new CloudRuntimeException("Storage Pool not found for id: " + volumeVO.getPoolId());
|
||||
}
|
||||
|
||||
Map<String, String> poolDetails = storagePoolDetailsDao.listDetailsKeyPairs(volumeVO.getPoolId());
|
||||
String protocol = poolDetails.get(OntapStorageConstants.PROTOCOL);
|
||||
String flexVolUuid = poolDetails.get(OntapStorageConstants.VOLUME_UUID);
|
||||
|
||||
if (flexVolUuid == null || flexVolUuid.isEmpty()) {
|
||||
throw new CloudRuntimeException("FlexVolume UUID not found in pool details for pool " + volumeVO.getPoolId());
|
||||
}
|
||||
|
||||
StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(poolDetails);
|
||||
SnapshotFeignClient snapshotClient = storageStrategy.getSnapshotFeignClient();
|
||||
String authHeader = storageStrategy.getAuthHeader();
|
||||
|
||||
SnapshotObjectTO snapshotObjectTo = (SnapshotObjectTO) snapshot.getTO();
|
||||
|
||||
// Build snapshot name using volume name and snapshot UUID
|
||||
String snapshotName = buildSnapshotName(volumeInfo.getName(), snapshot.getUuid());
|
||||
|
||||
// Resolve the volume path for storing in snapshot details (for revert operation)
|
||||
String volumePath = resolveVolumePathOnOntap(volumeVO, protocol, poolDetails);
|
||||
|
||||
// For iSCSI, retrieve LUN UUID for restore operations
|
||||
String lunUuid = null;
|
||||
if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) {
|
||||
VolumeDetailVO lunDetail = volumeDetailsDao.findDetail(volumeVO.getId(), OntapStorageConstants.LUN_DOT_UUID);
|
||||
String lunUUID = lunDetail != null ? lunDetail.getValue() : null;
|
||||
if (lunUUID == null) {
|
||||
throw new CloudRuntimeException("LUN UUID not found for iSCSI volume " + volumeVO.getId());
|
||||
}
|
||||
}
|
||||
|
||||
// Create FlexVolume snapshot via ONTAP REST API
|
||||
FlexVolSnapshot snapshotRequest = new FlexVolSnapshot(snapshotName,
|
||||
"CloudStack volume snapshot for volume " + volumeInfo.getName());
|
||||
|
||||
logger.info("takeSnapshot: Creating ONTAP FlexVolume snapshot [{}] on FlexVol UUID [{}] for volume [{}]",
|
||||
snapshotName, flexVolUuid, volumeVO.getId());
|
||||
|
||||
JobResponse jobResponse = snapshotClient.createSnapshot(authHeader, flexVolUuid, snapshotRequest);
|
||||
if (jobResponse == null || jobResponse.getJob() == null) {
|
||||
throw new CloudRuntimeException("Failed to initiate FlexVolume snapshot on FlexVol UUID [" + flexVolUuid + "]");
|
||||
}
|
||||
|
||||
// Poll for job completion
|
||||
Boolean jobSucceeded = storageStrategy.jobPollForSuccess(jobResponse.getJob().getUuid(), 30, 2000);
|
||||
if (!jobSucceeded) {
|
||||
throw new CloudRuntimeException("FlexVolume snapshot job failed on FlexVol UUID [" + flexVolUuid + "]");
|
||||
}
|
||||
|
||||
// Retrieve the created snapshot UUID by name
|
||||
String ontapSnapshotUuid = resolveSnapshotUuid(snapshotClient, authHeader, flexVolUuid, snapshotName);
|
||||
if (ontapSnapshotUuid == null || ontapSnapshotUuid.isEmpty()) {
|
||||
throw new CloudRuntimeException("Failed to resolve snapshot UUID for snapshot name [" + snapshotName + "]");
|
||||
}
|
||||
|
||||
// Set snapshot path for CloudStack (format: snapshotName for identification)
|
||||
snapshotObjectTo.setPath(OntapStorageConstants.ONTAP_SNAP_ID + "=" + ontapSnapshotUuid);
|
||||
|
||||
// Persist snapshot details for revert/delete operations
|
||||
updateSnapshotDetails(snapshot.getId(), volumeInfo.getId(), flexVolUuid,
|
||||
ontapSnapshotUuid, snapshotName, volumePath, volumeVO.getPoolId(), protocol, lunUuid);
|
||||
|
||||
CreateObjectAnswer createObjectAnswer = new CreateObjectAnswer(snapshotObjectTo);
|
||||
result = new CreateCmdResult(null, createObjectAnswer);
|
||||
result.setResult(null);
|
||||
|
||||
logger.info("takeSnapshot: Successfully created FlexVolume snapshot [{}] (uuid={}) for volume [{}]",
|
||||
snapshotName, ontapSnapshotUuid, volumeVO.getId());
|
||||
|
||||
} catch (Exception ex) {
|
||||
logger.error("takeSnapshot: Failed due to ", ex);
|
||||
result = new CreateCmdResult(null, new CreateObjectAnswer(ex.toString()));
|
||||
result.setResult(ex.toString());
|
||||
}
|
||||
|
||||
callback.complete(result);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves the volume path on ONTAP for snapshot restore operations.
|
||||
*
|
||||
* @param volumeVO The CloudStack volume
|
||||
* @param protocol Storage protocol (NFS3 or ISCSI)
|
||||
* @param poolDetails Pool configuration details
|
||||
* @return The ONTAP path (file path for NFS, LUN name for iSCSI)
|
||||
*/
|
||||
private String resolveVolumePathOnOntap(VolumeVO volumeVO, String protocol, Map<String, String> poolDetails) {
|
||||
if (ProtocolType.NFS3.name().equalsIgnoreCase(protocol)) {
|
||||
// For NFS, use the volume's file path
|
||||
return volumeVO.getPath();
|
||||
} else if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) {
|
||||
// For iSCSI, retrieve the LUN name from volume details
|
||||
String lunName = volumeDetailsDao.findDetail(volumeVO.getId(), OntapStorageConstants.LUN_DOT_NAME) != null ?
|
||||
volumeDetailsDao.findDetail(volumeVO.getId(), OntapStorageConstants.LUN_DOT_NAME).getValue() : null;
|
||||
if (lunName == null) {
|
||||
throw new CloudRuntimeException("No LUN name found for volume " + volumeVO.getId());
|
||||
}
|
||||
return lunName;
|
||||
}
|
||||
throw new CloudRuntimeException("Unsupported protocol " + protocol);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves the ONTAP snapshot UUID by querying for the snapshot by name.
|
||||
*
|
||||
* @param snapshotClient The ONTAP snapshot Feign client
|
||||
* @param authHeader Authorization header
|
||||
* @param flexVolUuid FlexVolume UUID
|
||||
* @param snapshotName Name of the snapshot to find
|
||||
* @return The UUID of the snapshot, or null if not found
|
||||
*/
|
||||
private String resolveSnapshotUuid(SnapshotFeignClient snapshotClient, String authHeader,
|
||||
String flexVolUuid, String snapshotName) {
|
||||
Map<String, Object> queryParams = new HashMap<>();
|
||||
queryParams.put("name", snapshotName);
|
||||
queryParams.put("fields", "uuid,name");
|
||||
|
||||
OntapResponse<FlexVolSnapshot> response = snapshotClient.getSnapshots(authHeader, flexVolUuid, queryParams);
|
||||
if (response != null && response.getRecords() != null && !response.getRecords().isEmpty()) {
|
||||
return response.getRecords().get(0).getUuid();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reverts a volume to a snapshot using protocol-specific ONTAP restore APIs.
|
||||
*
|
||||
* <p>This method delegates to the appropriate StorageStrategy to restore the
|
||||
* specific file (NFS) or LUN (iSCSI) from the FlexVolume snapshot directly
|
||||
* via ONTAP REST API, without involving the hypervisor agent.</p>
|
||||
*
|
||||
* <p><b>Protocol-specific handling (delegated to strategy classes):</b></p>
|
||||
* <ul>
|
||||
* <li><b>NFS (UnifiedNASStrategy):</b> Uses the single-file restore API:
|
||||
* {@code POST /api/storage/volumes/{volume_uuid}/snapshots/{snapshot_uuid}/files/{file_path}/restore}
|
||||
* Restores the QCOW2 file from the FlexVolume snapshot to its original location.</li>
|
||||
* <li><b>iSCSI (UnifiedSANStrategy):</b> Uses the LUN restore API:
|
||||
* {@code POST /api/storage/luns/{lun.uuid}/restore}
|
||||
* Restores the LUN data from the snapshot to the specified destination path.</li>
|
||||
* </ul>
|
||||
*/
|
||||
@Override
|
||||
public void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback<CommandResult> callback) {}
|
||||
public void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore,
|
||||
AsyncCompletionCallback<CommandResult> callback) {
|
||||
logger.info("OntapPrimaryDatastoreDriver.revertSnapshot: Reverting snapshot [{}]",
|
||||
snapshotOnImageStore.getId());
|
||||
|
||||
CommandResult result = new CommandResult();
|
||||
|
||||
try {
|
||||
// Use the snapshot that has the ONTAP details stored
|
||||
SnapshotInfo snapshot = snapshotOnPrimaryStore != null ? snapshotOnPrimaryStore : snapshotOnImageStore;
|
||||
long snapshotId = snapshot.getId();
|
||||
|
||||
// Retrieve snapshot details stored during takeSnapshot
|
||||
String flexVolUuid = getSnapshotDetail(snapshotId, OntapStorageConstants.BASE_ONTAP_FV_ID);
|
||||
String ontapSnapshotUuid = getSnapshotDetail(snapshotId, OntapStorageConstants.ONTAP_SNAP_ID);
|
||||
String snapshotName = getSnapshotDetail(snapshotId, OntapStorageConstants.ONTAP_SNAP_NAME);
|
||||
String volumePath = getSnapshotDetail(snapshotId, OntapStorageConstants.VOLUME_PATH);
|
||||
String poolIdStr = getSnapshotDetail(snapshotId, OntapStorageConstants.PRIMARY_POOL_ID);
|
||||
String protocol = getSnapshotDetail(snapshotId, OntapStorageConstants.PROTOCOL);
|
||||
|
||||
if (flexVolUuid == null || snapshotName == null || volumePath == null || poolIdStr == null) {
|
||||
throw new CloudRuntimeException("Missing required snapshot details for snapshot " + snapshotId +
|
||||
" (flexVolUuid=" + flexVolUuid + ", snapshotName=" + snapshotName +
|
||||
", volumePath=" + volumePath + ", poolId=" + poolIdStr + ")");
|
||||
}
|
||||
|
||||
long poolId = Long.parseLong(poolIdStr);
|
||||
Map<String, String> poolDetails = storagePoolDetailsDao.listDetailsKeyPairs(poolId);
|
||||
|
||||
StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(poolDetails);
|
||||
|
||||
// Get the FlexVolume name (required for CLI-based restore API for all protocols)
|
||||
String flexVolName = poolDetails.get(OntapStorageConstants.VOLUME_NAME);
|
||||
if (flexVolName == null || flexVolName.isEmpty()) {
|
||||
throw new CloudRuntimeException("FlexVolume name not found in pool details for pool " + poolId);
|
||||
}
|
||||
|
||||
// Prepare protocol-specific parameters (lunUuid is only needed for backward compatibility)
|
||||
String lunUuid = null;
|
||||
if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) {
|
||||
lunUuid = getSnapshotDetail(snapshotId, OntapStorageConstants.LUN_DOT_UUID);
|
||||
}
|
||||
|
||||
// Delegate to strategy class for protocol-specific restore
|
||||
JobResponse jobResponse = storageStrategy.revertSnapshotForCloudStackVolume(
|
||||
snapshotName, flexVolUuid, ontapSnapshotUuid, volumePath, lunUuid, flexVolName);
|
||||
|
||||
if (jobResponse == null || jobResponse.getJob() == null) {
|
||||
throw new CloudRuntimeException("Failed to initiate restore from snapshot [" +
|
||||
snapshotName + "]");
|
||||
}
|
||||
|
||||
// Poll for job completion (use longer timeout for large LUNs/files)
|
||||
Boolean jobSucceeded = storageStrategy.jobPollForSuccess(jobResponse.getJob().getUuid(), 60, 2000);
|
||||
if (!jobSucceeded) {
|
||||
throw new CloudRuntimeException("Restore job failed for snapshot [" +
|
||||
snapshotName + "]");
|
||||
}
|
||||
|
||||
logger.info("revertSnapshot: Successfully restored {} [{}] from snapshot [{}]",
|
||||
ProtocolType.ISCSI.name().equalsIgnoreCase(protocol) ? "LUN" : "file",
|
||||
volumePath, snapshotName);
|
||||
|
||||
result.setResult(null); // Success
|
||||
|
||||
} catch (Exception ex) {
|
||||
logger.error("revertSnapshot: Failed to revert snapshot {}", snapshotOnImageStore, ex);
|
||||
result.setResult(ex.toString());
|
||||
}
|
||||
|
||||
callback.complete(result);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves a snapshot detail value by key.
|
||||
*
|
||||
* @param snapshotId The CloudStack snapshot ID
|
||||
* @param key The detail key
|
||||
* @return The detail value, or null if not found
|
||||
*/
|
||||
private String getSnapshotDetail(long snapshotId, String key) {
|
||||
SnapshotDetailsVO detail = snapshotDetailsDao.findDetail(snapshotId, key);
|
||||
return detail != null ? detail.getValue() : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) {}
|
||||
|
|
@ -149,7 +873,7 @@ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver {
|
|||
|
||||
@Override
|
||||
public boolean canProvideVolumeStats() {
|
||||
return true;
|
||||
return false; // Not yet implemented for RAW managed NFS
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -184,5 +908,111 @@ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void detachVolumeFromAllStorageNodes(Volume volume) {}
|
||||
public void detachVolumeFromAllStorageNodes(Volume volume) {
|
||||
}
|
||||
|
||||
private CloudStackVolume createDeleteCloudStackVolumeRequest(StoragePool storagePool, Map<String, String> details, VolumeInfo volumeInfo) {
|
||||
CloudStackVolume cloudStackVolumeDeleteRequest = null;
|
||||
|
||||
String protocol = details.get(OntapStorageConstants.PROTOCOL);
|
||||
ProtocolType protocolType = ProtocolType.valueOf(protocol);
|
||||
switch (protocolType) {
|
||||
case NFS3:
|
||||
cloudStackVolumeDeleteRequest = new CloudStackVolume();
|
||||
cloudStackVolumeDeleteRequest.setDatastoreId(String.valueOf(storagePool.getId()));
|
||||
cloudStackVolumeDeleteRequest.setVolumeInfo(volumeInfo);
|
||||
break;
|
||||
case ISCSI:
|
||||
// Retrieve LUN identifiers stored during volume creation
|
||||
String lunName = volumeDetailsDao.findDetail(volumeInfo.getId(), OntapStorageConstants.LUN_DOT_NAME).getValue();
|
||||
String lunUUID = volumeDetailsDao.findDetail(volumeInfo.getId(), OntapStorageConstants.LUN_DOT_UUID).getValue();
|
||||
if (lunName == null) {
|
||||
throw new CloudRuntimeException("Missing LUN name for volume " + volumeInfo.getId());
|
||||
}
|
||||
cloudStackVolumeDeleteRequest = new CloudStackVolume();
|
||||
Lun lun = new Lun();
|
||||
lun.setName(lunName);
|
||||
lun.setUuid(lunUUID);
|
||||
cloudStackVolumeDeleteRequest.setLun(lun);
|
||||
break;
|
||||
default:
|
||||
throw new CloudRuntimeException("Unsupported protocol " + protocol);
|
||||
|
||||
}
|
||||
return cloudStackVolumeDeleteRequest;
|
||||
|
||||
}
|
||||
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
// Snapshot Helper Methods
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Builds a snapshot name with proper length constraints.
|
||||
* Format: {@code <volumeName>-<snapshotUuid>}
|
||||
*/
|
||||
private String buildSnapshotName(String volumeName, String snapshotUuid) {
|
||||
String name = volumeName + "-" + snapshotUuid;
|
||||
int maxLength = OntapStorageConstants.MAX_SNAPSHOT_NAME_LENGTH;
|
||||
int trimRequired = name.length() - maxLength;
|
||||
|
||||
if (trimRequired > 0) {
|
||||
name = StringUtils.left(volumeName, volumeName.length() - trimRequired) + "-" + snapshotUuid;
|
||||
}
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Persists snapshot metadata in snapshot_details table.
|
||||
*
|
||||
* @param csSnapshotId CloudStack snapshot ID
|
||||
* @param csVolumeId Source CloudStack volume ID
|
||||
* @param flexVolUuid ONTAP FlexVolume UUID
|
||||
* @param ontapSnapshotUuid ONTAP FlexVolume snapshot UUID
|
||||
* @param snapshotName ONTAP snapshot name
|
||||
* @param volumePath Path of the volume file/LUN within the FlexVolume (for restore)
|
||||
* @param storagePoolId Primary storage pool ID
|
||||
* @param protocol Storage protocol (NFS3 or ISCSI)
|
||||
* @param lunUuid LUN UUID (only for iSCSI, null for NFS)
|
||||
*/
|
||||
private void updateSnapshotDetails(long csSnapshotId, long csVolumeId, String flexVolUuid,
|
||||
String ontapSnapshotUuid, String snapshotName,
|
||||
String volumePath, long storagePoolId, String protocol,
|
||||
String lunUuid) {
|
||||
SnapshotDetailsVO snapshotDetail = new SnapshotDetailsVO(csSnapshotId,
|
||||
OntapStorageConstants.SRC_CS_VOLUME_ID, String.valueOf(csVolumeId), false);
|
||||
snapshotDetailsDao.persist(snapshotDetail);
|
||||
|
||||
snapshotDetail = new SnapshotDetailsVO(csSnapshotId,
|
||||
OntapStorageConstants.BASE_ONTAP_FV_ID, flexVolUuid, false);
|
||||
snapshotDetailsDao.persist(snapshotDetail);
|
||||
|
||||
snapshotDetail = new SnapshotDetailsVO(csSnapshotId,
|
||||
OntapStorageConstants.ONTAP_SNAP_ID, ontapSnapshotUuid, false);
|
||||
snapshotDetailsDao.persist(snapshotDetail);
|
||||
|
||||
snapshotDetail = new SnapshotDetailsVO(csSnapshotId,
|
||||
OntapStorageConstants.ONTAP_SNAP_NAME, snapshotName, false);
|
||||
snapshotDetailsDao.persist(snapshotDetail);
|
||||
|
||||
snapshotDetail = new SnapshotDetailsVO(csSnapshotId,
|
||||
OntapStorageConstants.VOLUME_PATH, volumePath, false);
|
||||
snapshotDetailsDao.persist(snapshotDetail);
|
||||
|
||||
snapshotDetail = new SnapshotDetailsVO(csSnapshotId,
|
||||
OntapStorageConstants.PRIMARY_POOL_ID, String.valueOf(storagePoolId), false);
|
||||
snapshotDetailsDao.persist(snapshotDetail);
|
||||
|
||||
snapshotDetail = new SnapshotDetailsVO(csSnapshotId,
|
||||
OntapStorageConstants.PROTOCOL, protocol, false);
|
||||
snapshotDetailsDao.persist(snapshotDetail);
|
||||
|
||||
// Store LUN UUID for iSCSI volumes (required for LUN restore API)
|
||||
if (lunUuid != null && !lunUuid.isEmpty()) {
|
||||
snapshotDetail = new SnapshotDetailsVO(csSnapshotId,
|
||||
OntapStorageConstants.LUN_DOT_UUID, lunUuid, false);
|
||||
snapshotDetailsDao.persist(snapshotDetail);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ import java.util.Map;
|
|||
public interface NASFeignClient {
|
||||
|
||||
// File Operations
|
||||
@RequestLine("GET /api/storage/volumes/{volumeUuid}/files/{path}")
|
||||
@RequestLine("GET /api/storage/volumes/{volumeUuid}/files/{path}?return_metadata=true")
|
||||
@Headers({"Authorization: {authHeader}"})
|
||||
OntapResponse<FileInfo> getFileResponse(@Param("authHeader") String authHeader,
|
||||
@Param("volumeUuid") String volumeUUID,
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@ import org.apache.cloudstack.storage.feign.model.Igroup;
|
|||
import org.apache.cloudstack.storage.feign.model.IscsiService;
|
||||
import org.apache.cloudstack.storage.feign.model.Lun;
|
||||
import org.apache.cloudstack.storage.feign.model.LunMap;
|
||||
import org.apache.cloudstack.storage.feign.model.LunRestoreRequest;
|
||||
import org.apache.cloudstack.storage.feign.model.response.JobResponse;
|
||||
import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
|
||||
import feign.Headers;
|
||||
import feign.Param;
|
||||
|
|
@ -88,4 +90,24 @@ public interface SANFeignClient {
|
|||
void deleteLunMap(@Param("authHeader") String authHeader,
|
||||
@Param("lunUuid") String lunUUID,
|
||||
@Param("igroupUuid") String igroupUUID);
|
||||
|
||||
// LUN Restore API
|
||||
/**
|
||||
* Restores a LUN from a FlexVolume snapshot.
|
||||
*
|
||||
* <p>ONTAP REST: {@code POST /api/storage/luns/{lun.uuid}/restore}</p>
|
||||
*
|
||||
* <p>This API restores the LUN data from a specified snapshot to a destination path.
|
||||
* The LUN must exist and the snapshot must contain the LUN data.</p>
|
||||
*
|
||||
* @param authHeader Basic auth header
|
||||
* @param lunUuid UUID of the LUN to restore
|
||||
* @param request Request body with snapshot name and destination path
|
||||
* @return JobResponse containing the async job reference
|
||||
*/
|
||||
@RequestLine("POST /api/storage/luns/{lunUuid}/restore")
|
||||
@Headers({"Authorization: {authHeader}", "Content-Type: application/json"})
|
||||
JobResponse restoreLun(@Param("authHeader") String authHeader,
|
||||
@Param("lunUuid") String lunUuid,
|
||||
LunRestoreRequest request);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,184 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.feign.client;
|
||||
|
||||
import feign.Headers;
|
||||
import feign.Param;
|
||||
import feign.QueryMap;
|
||||
import feign.RequestLine;
|
||||
import org.apache.cloudstack.storage.feign.model.CliSnapshotRestoreRequest;
|
||||
import org.apache.cloudstack.storage.feign.model.FlexVolSnapshot;
|
||||
import org.apache.cloudstack.storage.feign.model.SnapshotFileRestoreRequest;
|
||||
import org.apache.cloudstack.storage.feign.model.response.JobResponse;
|
||||
import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Feign client for ONTAP FlexVolume snapshot operations.
|
||||
*
|
||||
* <p>Maps to the ONTAP REST API endpoint:
|
||||
* {@code /api/storage/volumes/{volume_uuid}/snapshots}</p>
|
||||
*
|
||||
* <p>FlexVolume snapshots are point-in-time, space-efficient copies of an entire
|
||||
* FlexVolume. Unlike file-level clones, a single FlexVolume snapshot atomically
|
||||
* captures <b>all</b> files/LUNs within the volume, making it ideal for VM-level
|
||||
* snapshots when multiple CloudStack disks reside on the same FlexVolume.</p>
|
||||
*/
|
||||
public interface SnapshotFeignClient {
|
||||
|
||||
/**
|
||||
* Creates a new snapshot for the specified FlexVolume.
|
||||
*
|
||||
* <p>ONTAP REST: {@code POST /api/storage/volumes/{volume_uuid}/snapshots}</p>
|
||||
*
|
||||
* @param authHeader Basic auth header
|
||||
* @param volumeUuid UUID of the ONTAP FlexVolume
|
||||
* @param snapshot Snapshot request body (at minimum, the {@code name} field)
|
||||
* @return JobResponse containing the async job reference
|
||||
*/
|
||||
@RequestLine("POST /api/storage/volumes/{volumeUuid}/snapshots")
|
||||
@Headers({"Authorization: {authHeader}", "Content-Type: application/json"})
|
||||
JobResponse createSnapshot(@Param("authHeader") String authHeader,
|
||||
@Param("volumeUuid") String volumeUuid,
|
||||
FlexVolSnapshot snapshot);
|
||||
|
||||
/**
|
||||
* Lists snapshots for the specified FlexVolume.
|
||||
*
|
||||
* <p>ONTAP REST: {@code GET /api/storage/volumes/{volume_uuid}/snapshots}</p>
|
||||
*
|
||||
* @param authHeader Basic auth header
|
||||
* @param volumeUuid UUID of the ONTAP FlexVolume
|
||||
* @param queryParams Optional query parameters (e.g., {@code name}, {@code fields})
|
||||
* @return Paginated response of FlexVolSnapshot records
|
||||
*/
|
||||
@RequestLine("GET /api/storage/volumes/{volumeUuid}/snapshots")
|
||||
@Headers({"Authorization: {authHeader}"})
|
||||
OntapResponse<FlexVolSnapshot> getSnapshots(@Param("authHeader") String authHeader,
|
||||
@Param("volumeUuid") String volumeUuid,
|
||||
@QueryMap Map<String, Object> queryParams);
|
||||
|
||||
/**
|
||||
* Retrieves a specific snapshot by UUID.
|
||||
*
|
||||
* <p>ONTAP REST: {@code GET /api/storage/volumes/{volume_uuid}/snapshots/{uuid}}</p>
|
||||
*
|
||||
* @param authHeader Basic auth header
|
||||
* @param volumeUuid UUID of the ONTAP FlexVolume
|
||||
* @param snapshotUuid UUID of the snapshot
|
||||
* @return The FlexVolSnapshot object
|
||||
*/
|
||||
@RequestLine("GET /api/storage/volumes/{volumeUuid}/snapshots/{snapshotUuid}")
|
||||
@Headers({"Authorization: {authHeader}"})
|
||||
FlexVolSnapshot getSnapshotByUuid(@Param("authHeader") String authHeader,
|
||||
@Param("volumeUuid") String volumeUuid,
|
||||
@Param("snapshotUuid") String snapshotUuid);
|
||||
|
||||
/**
|
||||
* Deletes a specific snapshot.
|
||||
*
|
||||
* <p>ONTAP REST: {@code DELETE /api/storage/volumes/{volume_uuid}/snapshots/{uuid}}</p>
|
||||
*
|
||||
* @param authHeader Basic auth header
|
||||
* @param volumeUuid UUID of the ONTAP FlexVolume
|
||||
* @param snapshotUuid UUID of the snapshot to delete
|
||||
* @return JobResponse containing the async job reference
|
||||
*/
|
||||
@RequestLine("DELETE /api/storage/volumes/{volumeUuid}/snapshots/{snapshotUuid}")
|
||||
@Headers({"Authorization: {authHeader}"})
|
||||
JobResponse deleteSnapshot(@Param("authHeader") String authHeader,
|
||||
@Param("volumeUuid") String volumeUuid,
|
||||
@Param("snapshotUuid") String snapshotUuid);
|
||||
|
||||
/**
|
||||
* Restores a volume to a specific snapshot.
|
||||
*
|
||||
* <p>ONTAP REST: {@code PATCH /api/storage/volumes/{volume_uuid}/snapshots/{uuid}}
|
||||
* with body {@code {"restore": true}} triggers a snapshot restore operation.</p>
|
||||
*
|
||||
* <p><b>Note:</b> This is a destructive operation — all data written after the
|
||||
* snapshot was taken will be lost.</p>
|
||||
*
|
||||
* @param authHeader Basic auth header
|
||||
* @param volumeUuid UUID of the ONTAP FlexVolume
|
||||
* @param snapshotUuid UUID of the snapshot to restore to
|
||||
* @param body Request body, typically {@code {"restore": true}}
|
||||
* @return JobResponse containing the async job reference
|
||||
*/
|
||||
@RequestLine("PATCH /api/storage/volumes/{volumeUuid}/snapshots/{snapshotUuid}?restore_to_snapshot=true")
|
||||
@Headers({"Authorization: {authHeader}", "Content-Type: application/json"})
|
||||
JobResponse restoreSnapshot(@Param("authHeader") String authHeader,
|
||||
@Param("volumeUuid") String volumeUuid,
|
||||
@Param("snapshotUuid") String snapshotUuid);
|
||||
|
||||
/**
|
||||
* Restores a single file or LUN from a FlexVolume snapshot.
|
||||
*
|
||||
* <p>ONTAP REST:
|
||||
* {@code POST /api/storage/volumes/{volume_uuid}/snapshots/{snapshot_uuid}/files/{file_path}/restore}</p>
|
||||
*
|
||||
* <p>This restores only the specified file/LUN from the snapshot to the
|
||||
* given {@code destination_path}, without reverting the entire FlexVolume.
|
||||
* Ideal when multiple VMs share the same FlexVolume.</p>
|
||||
*
|
||||
* @param authHeader Basic auth header
|
||||
* @param volumeUuid UUID of the ONTAP FlexVolume
|
||||
* @param snapshotUuid UUID of the snapshot containing the file
|
||||
* @param filePath path of the file within the snapshot (URL-encoded if needed)
|
||||
* @param request request body with {@code destination_path}
|
||||
* @return JobResponse containing the async job reference
|
||||
*/
|
||||
@RequestLine("POST /api/storage/volumes/{volumeUuid}/snapshots/{snapshotUuid}/files/{filePath}/restore")
|
||||
@Headers({"Authorization: {authHeader}", "Content-Type: application/json"})
|
||||
JobResponse restoreFileFromSnapshot(@Param("authHeader") String authHeader,
|
||||
@Param("volumeUuid") String volumeUuid,
|
||||
@Param("snapshotUuid") String snapshotUuid,
|
||||
@Param("filePath") String filePath,
|
||||
SnapshotFileRestoreRequest request);
|
||||
|
||||
/**
|
||||
* Restores a single file or LUN from a FlexVolume snapshot using the CLI native API.
|
||||
*
|
||||
* <p>ONTAP REST (CLI passthrough):
|
||||
* {@code POST /api/private/cli/volume/snapshot/restore-file}</p>
|
||||
*
|
||||
* <p>This CLI-based API is more reliable and works for both NFS files and iSCSI LUNs.
|
||||
* The request body contains all required parameters: vserver, volume, snapshot, and path.</p>
|
||||
*
|
||||
* <p>Example payload:
|
||||
* <pre>
|
||||
* {
|
||||
* "vserver": "vs0",
|
||||
* "volume": "rajiv_ONTAP_SP1",
|
||||
* "snapshot": "DATA-3-428726fe-7440-4b41-8d47-3f654e5d9814",
|
||||
* "path": "/d266bb2c-d479-47ad-81c3-a070e8bb58c0"
|
||||
* }
|
||||
* </pre>
|
||||
* </p>
|
||||
*
|
||||
* @param authHeader Basic auth header
|
||||
* @param request CLI snapshot restore request containing vserver, volume, snapshot, and path
|
||||
* @return JobResponse containing the async job reference (if applicable)
|
||||
*/
|
||||
@RequestLine("POST /api/private/cli/volume/snapshot/restore-file")
|
||||
@Headers({"Authorization: {authHeader}", "Content-Type: application/json"})
|
||||
JobResponse restoreFileFromSnapshotCli(@Param("authHeader") String authHeader,
|
||||
CliSnapshotRestoreRequest request);
|
||||
}
|
||||
|
|
@ -0,0 +1,121 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.feign.model;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
/**
|
||||
* Request body for the ONTAP CLI-based Snapshot File Restore API.
|
||||
*
|
||||
* <p>ONTAP REST endpoint (CLI passthrough):
|
||||
* {@code POST /api/private/cli/volume/snapshot/restore-file}</p>
|
||||
*
|
||||
* <p>This API restores a single file or LUN from a FlexVolume snapshot to a
|
||||
* specified destination path using the CLI native implementation.
|
||||
* It works for both NFS files and iSCSI LUNs.</p>
|
||||
*
|
||||
* <p>Example payload:
|
||||
* <pre>
|
||||
* {
|
||||
* "vserver": "vs0",
|
||||
* "volume": "rajiv_ONTAP_SP1",
|
||||
* "snapshot": "DATA-3-428726fe-7440-4b41-8d47-3f654e5d9814",
|
||||
* "path": "/d266bb2c-d479-47ad-81c3-a070e8bb58c0"
|
||||
* }
|
||||
* </pre>
|
||||
* </p>
|
||||
*/
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
public class CliSnapshotRestoreRequest {
|
||||
|
||||
@JsonProperty("vserver")
|
||||
private String vserver;
|
||||
|
||||
@JsonProperty("volume")
|
||||
private String volume;
|
||||
|
||||
@JsonProperty("snapshot")
|
||||
private String snapshot;
|
||||
|
||||
@JsonProperty("path")
|
||||
private String path;
|
||||
|
||||
public CliSnapshotRestoreRequest() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a CLI snapshot restore request.
|
||||
*
|
||||
* @param vserver The SVM (vserver) name
|
||||
* @param volume The FlexVolume name
|
||||
* @param snapshot The snapshot name
|
||||
* @param path The file/LUN path to restore (e.g., "/uuid.qcow2" or "/lun_name")
|
||||
*/
|
||||
public CliSnapshotRestoreRequest(String vserver, String volume, String snapshot, String path) {
|
||||
this.vserver = vserver;
|
||||
this.volume = volume;
|
||||
this.snapshot = snapshot;
|
||||
this.path = path;
|
||||
}
|
||||
|
||||
public String getVserver() {
|
||||
return vserver;
|
||||
}
|
||||
|
||||
public void setVserver(String vserver) {
|
||||
this.vserver = vserver;
|
||||
}
|
||||
|
||||
public String getVolume() {
|
||||
return volume;
|
||||
}
|
||||
|
||||
public void setVolume(String volume) {
|
||||
this.volume = volume;
|
||||
}
|
||||
|
||||
public String getSnapshot() {
|
||||
return snapshot;
|
||||
}
|
||||
|
||||
public void setSnapshot(String snapshot) {
|
||||
this.snapshot = snapshot;
|
||||
}
|
||||
|
||||
public String getPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
public void setPath(String path) {
|
||||
this.path = path;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "CliSnapshotRestoreRequest{" +
|
||||
"vserver='" + vserver + '\'' +
|
||||
", volume='" + volume + '\'' +
|
||||
", snapshot='" + snapshot + '\'' +
|
||||
", path='" + path + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
@ -25,7 +25,6 @@ import com.fasterxml.jackson.annotation.JsonInclude;
|
|||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.fasterxml.jackson.annotation.JsonValue;
|
||||
|
||||
import java.time.OffsetDateTime;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
|
|
@ -36,8 +35,6 @@ import java.util.Objects;
|
|||
public class FileInfo {
|
||||
@JsonProperty("bytes_used")
|
||||
private Long bytesUsed = null;
|
||||
@JsonProperty("creation_time")
|
||||
private OffsetDateTime creationTime = null;
|
||||
@JsonProperty("fill_enabled")
|
||||
private Boolean fillEnabled = null;
|
||||
@JsonProperty("is_empty")
|
||||
|
|
@ -46,8 +43,6 @@ public class FileInfo {
|
|||
private Boolean isSnapshot = null;
|
||||
@JsonProperty("is_vm_aligned")
|
||||
private Boolean isVmAligned = null;
|
||||
@JsonProperty("modified_time")
|
||||
private OffsetDateTime modifiedTime = null;
|
||||
@JsonProperty("name")
|
||||
private String name = null;
|
||||
@JsonProperty("overwrite_enabled")
|
||||
|
|
@ -110,10 +105,6 @@ public class FileInfo {
|
|||
return bytesUsed;
|
||||
}
|
||||
|
||||
public OffsetDateTime getCreationTime() {
|
||||
return creationTime;
|
||||
}
|
||||
|
||||
public FileInfo fillEnabled(Boolean fillEnabled) {
|
||||
this.fillEnabled = fillEnabled;
|
||||
return this;
|
||||
|
|
@ -149,11 +140,6 @@ public class FileInfo {
|
|||
return isVmAligned;
|
||||
}
|
||||
|
||||
|
||||
public OffsetDateTime getModifiedTime() {
|
||||
return modifiedTime;
|
||||
}
|
||||
|
||||
public FileInfo name(String name) {
|
||||
this.name = name;
|
||||
return this;
|
||||
|
|
@ -266,12 +252,10 @@ public class FileInfo {
|
|||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("class FileInfo {\n");
|
||||
sb.append(" bytesUsed: ").append(toIndentedString(bytesUsed)).append("\n");
|
||||
sb.append(" creationTime: ").append(toIndentedString(creationTime)).append("\n");
|
||||
sb.append(" fillEnabled: ").append(toIndentedString(fillEnabled)).append("\n");
|
||||
sb.append(" isEmpty: ").append(toIndentedString(isEmpty)).append("\n");
|
||||
sb.append(" isSnapshot: ").append(toIndentedString(isSnapshot)).append("\n");
|
||||
sb.append(" isVmAligned: ").append(toIndentedString(isVmAligned)).append("\n");
|
||||
sb.append(" modifiedTime: ").append(toIndentedString(modifiedTime)).append("\n");
|
||||
sb.append(" name: ").append(toIndentedString(name)).append("\n");
|
||||
sb.append(" overwriteEnabled: ").append(toIndentedString(overwriteEnabled)).append("\n");
|
||||
sb.append(" path: ").append(toIndentedString(path)).append("\n");
|
||||
|
|
|
|||
|
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.feign.model;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
/**
|
||||
* Model representing an ONTAP FlexVolume-level snapshot.
|
||||
*
|
||||
* <p>Maps to the ONTAP REST API resource at
|
||||
* {@code /api/storage/volumes/{volume.uuid}/snapshots}.</p>
|
||||
*
|
||||
* <p>For creation, only the {@code name} field is required in the POST body.
|
||||
* ONTAP returns the full representation including {@code uuid}, {@code name},
|
||||
* and {@code create_time} on GET requests.</p>
|
||||
*
|
||||
* @see <a href="https://docs.netapp.com/us-en/ontap-restapi/ontap/storage_volumes_volume.uuid_snapshots_endpoint_overview.html">
|
||||
* ONTAP REST API - Volume Snapshots</a>
|
||||
*/
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
public class FlexVolSnapshot {
|
||||
|
||||
@JsonProperty("uuid")
|
||||
private String uuid;
|
||||
|
||||
@JsonProperty("name")
|
||||
private String name;
|
||||
|
||||
@JsonProperty("create_time")
|
||||
private String createTime;
|
||||
|
||||
@JsonProperty("comment")
|
||||
private String comment;
|
||||
|
||||
/** Concise reference to the parent volume (returned in GET responses). */
|
||||
@JsonProperty("volume")
|
||||
private VolumeConcise volume;
|
||||
|
||||
public FlexVolSnapshot() {
|
||||
// default constructor for Jackson
|
||||
}
|
||||
|
||||
public FlexVolSnapshot(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public FlexVolSnapshot(String name, String comment) {
|
||||
this.name = name;
|
||||
this.comment = comment;
|
||||
}
|
||||
|
||||
// ── Getters / Setters ────────────────────────────────────────────────────
|
||||
|
||||
public String getUuid() {
|
||||
return uuid;
|
||||
}
|
||||
|
||||
public void setUuid(String uuid) {
|
||||
this.uuid = uuid;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getCreateTime() {
|
||||
return createTime;
|
||||
}
|
||||
|
||||
public void setCreateTime(String createTime) {
|
||||
this.createTime = createTime;
|
||||
}
|
||||
|
||||
public String getComment() {
|
||||
return comment;
|
||||
}
|
||||
|
||||
public void setComment(String comment) {
|
||||
this.comment = comment;
|
||||
}
|
||||
|
||||
public VolumeConcise getVolume() {
|
||||
return volume;
|
||||
}
|
||||
|
||||
public void setVolume(VolumeConcise volume) {
|
||||
this.volume = volume;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "FlexVolSnapshot{" +
|
||||
"uuid='" + uuid + '\'' +
|
||||
", name='" + name + '\'' +
|
||||
", createTime='" + createTime + '\'' +
|
||||
", comment='" + comment + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
@ -19,9 +19,11 @@
|
|||
|
||||
package org.apache.cloudstack.storage.feign.model;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.fasterxml.jackson.annotation.JsonValue;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
|
@ -71,6 +73,7 @@ public class Igroup {
|
|||
this.value = value;
|
||||
}
|
||||
|
||||
@JsonValue
|
||||
public String getValue() {
|
||||
return value;
|
||||
}
|
||||
|
|
@ -80,9 +83,11 @@ public class Igroup {
|
|||
return String.valueOf(value);
|
||||
}
|
||||
|
||||
@JsonCreator
|
||||
public static OsTypeEnum fromValue(String text) {
|
||||
if (text == null) return null;
|
||||
for (OsTypeEnum b : OsTypeEnum.values()) {
|
||||
if (String.valueOf(b.value).equals(text)) {
|
||||
if (text.equalsIgnoreCase(b.value)) {
|
||||
return b;
|
||||
}
|
||||
}
|
||||
|
|
@ -122,6 +127,7 @@ public class Igroup {
|
|||
this.value = value;
|
||||
}
|
||||
|
||||
@JsonValue
|
||||
public String getValue() {
|
||||
return value;
|
||||
}
|
||||
|
|
@ -131,9 +137,11 @@ public class Igroup {
|
|||
return String.valueOf(value);
|
||||
}
|
||||
|
||||
@JsonCreator
|
||||
public static ProtocolEnum fromValue(String text) {
|
||||
if (text == null) return null;
|
||||
for (ProtocolEnum b : ProtocolEnum.values()) {
|
||||
if (String.valueOf(b.value).equals(text)) {
|
||||
if (text.equalsIgnoreCase(b.value)) {
|
||||
return b;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,132 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.feign.model;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
/**
|
||||
* Request body for the ONTAP LUN Restore API.
|
||||
*
|
||||
* <p>ONTAP REST endpoint:
|
||||
* {@code POST /api/storage/luns/{lun.uuid}/restore}</p>
|
||||
*
|
||||
* <p>This API restores a LUN from a FlexVolume snapshot to a specified
|
||||
* destination path. Unlike file restore, this is LUN-specific.</p>
|
||||
*
|
||||
* <p>Example payload:
|
||||
* <pre>
|
||||
* {
|
||||
* "snapshot": {
|
||||
* "name": "snapshot_name"
|
||||
* },
|
||||
* "destination": {
|
||||
* "path": "/vol/volume_name/lun_name"
|
||||
* }
|
||||
* }
|
||||
* </pre>
|
||||
* </p>
|
||||
*/
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
public class LunRestoreRequest {
|
||||
|
||||
@JsonProperty("snapshot")
|
||||
private SnapshotRef snapshot;
|
||||
|
||||
@JsonProperty("destination")
|
||||
private Destination destination;
|
||||
|
||||
public LunRestoreRequest() {
|
||||
}
|
||||
|
||||
public LunRestoreRequest(String snapshotName, String destinationPath) {
|
||||
this.snapshot = new SnapshotRef(snapshotName);
|
||||
this.destination = new Destination(destinationPath);
|
||||
}
|
||||
|
||||
public SnapshotRef getSnapshot() {
|
||||
return snapshot;
|
||||
}
|
||||
|
||||
public void setSnapshot(SnapshotRef snapshot) {
|
||||
this.snapshot = snapshot;
|
||||
}
|
||||
|
||||
public Destination getDestination() {
|
||||
return destination;
|
||||
}
|
||||
|
||||
public void setDestination(Destination destination) {
|
||||
this.destination = destination;
|
||||
}
|
||||
|
||||
/**
|
||||
* Nested class for snapshot reference.
|
||||
*/
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
public static class SnapshotRef {
|
||||
|
||||
@JsonProperty("name")
|
||||
private String name;
|
||||
|
||||
public SnapshotRef() {
|
||||
}
|
||||
|
||||
public SnapshotRef(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Nested class for destination path.
|
||||
*/
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
public static class Destination {
|
||||
|
||||
@JsonProperty("path")
|
||||
private String path;
|
||||
|
||||
public Destination() {
|
||||
}
|
||||
|
||||
public Destination(String path) {
|
||||
this.path = path;
|
||||
}
|
||||
|
||||
public String getPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
public void setPath(String path) {
|
||||
this.path = path;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -24,20 +24,18 @@ import org.apache.cloudstack.storage.service.model.ProtocolType;
|
|||
public class OntapStorage {
|
||||
private final String username;
|
||||
private final String password;
|
||||
private final String managementLIF;
|
||||
private final String storageIP;
|
||||
private final String svmName;
|
||||
private final Long size;
|
||||
private final ProtocolType protocolType;
|
||||
private final Boolean isDisaggregated;
|
||||
|
||||
public OntapStorage(String username, String password, String managementLIF, String svmName, Long size, ProtocolType protocolType, Boolean isDisaggregated) {
|
||||
public OntapStorage(String username, String password, String storageIP, String svmName, Long size, ProtocolType protocolType) {
|
||||
this.username = username;
|
||||
this.password = password;
|
||||
this.managementLIF = managementLIF;
|
||||
this.storageIP = storageIP;
|
||||
this.svmName = svmName;
|
||||
this.size = size;
|
||||
this.protocolType = protocolType;
|
||||
this.isDisaggregated = isDisaggregated;
|
||||
}
|
||||
|
||||
public String getUsername() {
|
||||
|
|
@ -48,13 +46,9 @@ public class OntapStorage {
|
|||
return password;
|
||||
}
|
||||
|
||||
public String getManagementLIF() {
|
||||
return managementLIF;
|
||||
}
|
||||
public String getStorageIP() { return storageIP; }
|
||||
|
||||
public String getSvmName() {
|
||||
return svmName;
|
||||
}
|
||||
public String getSvmName() { return svmName; }
|
||||
|
||||
public Long getSize() {
|
||||
return size;
|
||||
|
|
@ -63,8 +57,4 @@ public class OntapStorage {
|
|||
public ProtocolType getProtocol() {
|
||||
return protocolType;
|
||||
}
|
||||
|
||||
public Boolean getIsDisaggregated() {
|
||||
return isDisaggregated;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.feign.model;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
/**
|
||||
* Request body for the ONTAP Snapshot File Restore API.
|
||||
*
|
||||
* <p>ONTAP REST endpoint:
|
||||
* {@code POST /api/storage/volumes/{volume.uuid}/snapshots/{snapshot.uuid}/files/{file.path}/restore}</p>
|
||||
*
|
||||
* <p>This API restores a single file or LUN from a FlexVolume snapshot to a
|
||||
* specified destination path, without reverting the entire FlexVolume.</p>
|
||||
*/
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
public class SnapshotFileRestoreRequest {
|
||||
|
||||
@JsonProperty("destination_path")
|
||||
private String destinationPath;
|
||||
|
||||
public SnapshotFileRestoreRequest() {
|
||||
}
|
||||
|
||||
public SnapshotFileRestoreRequest(String destinationPath) {
|
||||
this.destinationPath = destinationPath;
|
||||
}
|
||||
|
||||
public String getDestinationPath() {
|
||||
return destinationPath;
|
||||
}
|
||||
|
||||
public void setDestinationPath(String destinationPath) {
|
||||
this.destinationPath = destinationPath;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.cloudstack.storage.feign.model;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
public class VolumeConcise {
|
||||
@JsonProperty("uuid")
|
||||
private String uuid;
|
||||
|
||||
@JsonProperty("name")
|
||||
private String name;
|
||||
|
||||
public String getUuid() {
|
||||
return uuid;
|
||||
}
|
||||
|
||||
public void setUuid(String uuid) {
|
||||
this.uuid = uuid;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
}
|
||||
|
|
@ -31,15 +31,16 @@ import com.cloud.storage.Storage;
|
|||
import com.cloud.storage.StorageManager;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.StoragePoolAutomation;
|
||||
import com.cloud.utils.StringUtils;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.cloudstack.api.ApiConstants;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
|
|
@ -55,6 +56,7 @@ import org.apache.cloudstack.storage.service.model.ProtocolType;
|
|||
import org.apache.cloudstack.storage.utils.OntapStorageConstants;
|
||||
import org.apache.cloudstack.storage.utils.OntapStorageUtils;
|
||||
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
|
|
@ -79,12 +81,16 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
|
||||
private static final long ONTAP_MIN_VOLUME_SIZE_IN_BYTES = 1677721600L;
|
||||
|
||||
/**
|
||||
* Creates primary storage on NetApp storage
|
||||
* @param dsInfos datastore information map
|
||||
* @return DataStore instance
|
||||
*/
|
||||
@Override
|
||||
public DataStore initialize(Map<String, Object> dsInfos) {
|
||||
if (dsInfos == null) {
|
||||
throw new CloudRuntimeException("Datastore info map is null, cannot create primary storage");
|
||||
}
|
||||
String url = (String) dsInfos.get("url");
|
||||
Long zoneId = (Long) dsInfos.get("zoneId");
|
||||
Long podId = (Long) dsInfos.get("podId");
|
||||
Long clusterId = (Long) dsInfos.get("clusterId");
|
||||
|
|
@ -99,10 +105,11 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
", zoneId: " + zoneId + ", podId: " + podId + ", clusterId: " + clusterId);
|
||||
logger.debug("Received capacityBytes from UI: " + capacityBytes);
|
||||
|
||||
// Additional details requested for ONTAP primary storage pool creation
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, String> details = (Map<String, String>) dsInfos.get("details");
|
||||
|
||||
capacityBytes = validateInitializeInputs(capacityBytes, podId, clusterId, zoneId, storagePoolName, providerName, managed, url, details);
|
||||
capacityBytes = validateInitializeInputs(capacityBytes, podId, clusterId, zoneId, storagePoolName, providerName, managed, details);
|
||||
|
||||
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
|
||||
if (clusterId != null) {
|
||||
|
|
@ -115,23 +122,21 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
}
|
||||
|
||||
details.put(OntapStorageConstants.SIZE, capacityBytes.toString());
|
||||
details.putIfAbsent(OntapStorageConstants.IS_DISAGGREGATED, "false");
|
||||
|
||||
ProtocolType protocol = ProtocolType.valueOf(details.get(OntapStorageConstants.PROTOCOL));
|
||||
|
||||
// long volumeSize = Long.parseLong(details.get(OntapStorageConstants.SIZE));
|
||||
OntapStorage ontapStorage = new OntapStorage(
|
||||
details.get(OntapStorageConstants.USERNAME),
|
||||
details.get(OntapStorageConstants.PASSWORD),
|
||||
details.get(OntapStorageConstants.MANAGEMENT_LIF),
|
||||
details.get(OntapStorageConstants.STORAGE_IP),
|
||||
details.get(OntapStorageConstants.SVM_NAME),
|
||||
capacityBytes,
|
||||
protocol,
|
||||
Boolean.parseBoolean(details.get(OntapStorageConstants.IS_DISAGGREGATED).toLowerCase()));
|
||||
protocol);
|
||||
|
||||
StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage);
|
||||
boolean isValid = storageStrategy.connect();
|
||||
if (isValid) {
|
||||
// Get the DataLIF for data access
|
||||
String dataLIF = storageStrategy.getNetworkInterface();
|
||||
if (dataLIF == null || dataLIF.isEmpty()) {
|
||||
throw new CloudRuntimeException("Failed to retrieve Data LIF from ONTAP, cannot create primary storage");
|
||||
|
|
@ -157,6 +162,7 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage");
|
||||
}
|
||||
|
||||
// Determine storage pool type, path and port based on protocol
|
||||
String path;
|
||||
int port;
|
||||
switch (protocol) {
|
||||
|
|
@ -164,7 +170,9 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
parameters.setType(Storage.StoragePoolType.NetworkFilesystem);
|
||||
path = OntapStorageConstants.SLASH + storagePoolName;
|
||||
port = OntapStorageConstants.NFS3_PORT;
|
||||
logger.info("Setting NFS path for storage pool: " + path + ", port: " + port);
|
||||
// Force NFSv3 for ONTAP managed storage to avoid NFSv4 ID mapping issues
|
||||
details.put(ApiConstants.NFS_MOUNT_OPTIONS, OntapStorageConstants.NFS3_MOUNT_OPTIONS_VER_3);
|
||||
logger.info("Setting NFS path for storage pool: " + path + ", port: " + port + " with mount option: vers=3");
|
||||
break;
|
||||
case ISCSI:
|
||||
parameters.setType(Storage.StoragePoolType.Iscsi);
|
||||
|
|
@ -196,9 +204,9 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
}
|
||||
|
||||
private long validateInitializeInputs(Long capacityBytes, Long podId, Long clusterId, Long zoneId,
|
||||
String storagePoolName, String providerName, boolean managed, String url, Map<String, String> details) {
|
||||
String storagePoolName, String providerName, boolean managed, Map<String, String> details) {
|
||||
|
||||
// Capacity validation
|
||||
// Validate and set capacity
|
||||
if (capacityBytes == null || capacityBytes <= 0) {
|
||||
logger.warn("capacityBytes not provided or invalid (" + capacityBytes + "), using ONTAP minimum size: " + ONTAP_MIN_VOLUME_SIZE_IN_BYTES);
|
||||
capacityBytes = ONTAP_MIN_VOLUME_SIZE_IN_BYTES;
|
||||
|
|
@ -207,60 +215,56 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
capacityBytes = ONTAP_MIN_VOLUME_SIZE_IN_BYTES;
|
||||
}
|
||||
|
||||
// Scope (pod/cluster/zone) validation
|
||||
// Validate scope
|
||||
if (podId == null ^ clusterId == null) {
|
||||
throw new CloudRuntimeException("Cluster Id or Pod Id is null, cannot create primary storage");
|
||||
}
|
||||
if (podId == null && clusterId == null) {
|
||||
if (zoneId != null) {
|
||||
logger.info("Both Pod Id and Cluster Id are null, Primary storage pool will be associated with a Zone");
|
||||
} else {
|
||||
throw new CloudRuntimeException("Pod Id, Cluster Id and Zone Id are all null, cannot create primary storage");
|
||||
}
|
||||
|
||||
if (podId == null && zoneId == null) {
|
||||
throw new CloudRuntimeException("Pod Id, Cluster Id and Zone Id are all null, cannot create primary storage");
|
||||
}
|
||||
|
||||
if (podId == null) {
|
||||
logger.info("Both Pod Id and Cluster Id are null, Primary storage pool will be associated with a Zone");
|
||||
}
|
||||
|
||||
// Basic parameter validation
|
||||
if (StringUtils.isBlank(storagePoolName)) {
|
||||
throw new CloudRuntimeException("Storage pool name is null or empty, cannot create primary storage");
|
||||
}
|
||||
|
||||
if (StringUtils.isBlank(providerName)) {
|
||||
throw new CloudRuntimeException("Provider name is null or empty, cannot create primary storage");
|
||||
}
|
||||
|
||||
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
|
||||
if (clusterId != null) {
|
||||
ClusterVO clusterVO = _clusterDao.findById(clusterId);
|
||||
Preconditions.checkNotNull(clusterVO, "Unable to locate the specified cluster");
|
||||
if (clusterVO.getHypervisorType() != Hypervisor.HypervisorType.KVM) {
|
||||
throw new CloudRuntimeException("ONTAP primary storage is supported only for KVM hypervisor");
|
||||
}
|
||||
parameters.setHypervisorType(clusterVO.getHypervisorType());
|
||||
}
|
||||
|
||||
logger.debug("ONTAP primary storage will be created as " + (managed ? "managed" : "unmanaged"));
|
||||
if (!managed) {
|
||||
throw new CloudRuntimeException("ONTAP primary storage must be managed");
|
||||
}
|
||||
|
||||
// Details key validation
|
||||
//Required ONTAP detail keys
|
||||
Set<String> requiredKeys = Set.of(
|
||||
OntapStorageConstants.USERNAME,
|
||||
OntapStorageConstants.PASSWORD,
|
||||
OntapStorageConstants.SVM_NAME,
|
||||
OntapStorageConstants.PROTOCOL,
|
||||
OntapStorageConstants.MANAGEMENT_LIF
|
||||
OntapStorageConstants.STORAGE_IP
|
||||
);
|
||||
Set<String> optionalKeys = Set.of(
|
||||
OntapStorageConstants.IS_DISAGGREGATED
|
||||
);
|
||||
Set<String> allowedKeys = new java.util.HashSet<>(requiredKeys);
|
||||
allowedKeys.addAll(optionalKeys);
|
||||
|
||||
if (StringUtils.isNotBlank(url)) {
|
||||
for (String segment : url.split(OntapStorageConstants.SEMICOLON)) {
|
||||
if (segment.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
String[] kv = segment.split(OntapStorageConstants.EQUALS, 2);
|
||||
if (kv.length == 2) {
|
||||
details.put(kv[0].trim(), kv[1].trim());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate existing entries (reject unexpected keys, empty values)
|
||||
for (Map.Entry<String, String> e : details.entrySet()) {
|
||||
String key = e.getKey();
|
||||
String val = e.getValue();
|
||||
if (!allowedKeys.contains(key)) {
|
||||
if (!requiredKeys.contains(key)) {
|
||||
throw new CloudRuntimeException("Unexpected ONTAP detail key in URL: " + key);
|
||||
}
|
||||
if (StringUtils.isBlank(val)) {
|
||||
|
|
@ -268,9 +272,10 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
}
|
||||
}
|
||||
|
||||
Set<String> providedKeys = new HashSet<>(details.keySet());
|
||||
// Detect missing required keys
|
||||
HashSet<String> providedKeys = new HashSet<>(details.keySet());
|
||||
if (!providedKeys.containsAll(requiredKeys)) {
|
||||
Set<String> missing = new HashSet<>(requiredKeys);
|
||||
HashSet<String> missing = new HashSet<>(requiredKeys);
|
||||
missing.removeAll(providedKeys);
|
||||
throw new CloudRuntimeException("ONTAP primary storage creation failed, missing detail(s): " + missing);
|
||||
}
|
||||
|
|
@ -282,16 +287,16 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
|
||||
logger.debug("In attachCluster for ONTAP primary storage");
|
||||
if (dataStore == null) {
|
||||
throw new InvalidParameterValueException("attachCluster: dataStore should not be null");
|
||||
throw new InvalidParameterValueException("dataStore should not be null");
|
||||
}
|
||||
if (scope == null) {
|
||||
throw new InvalidParameterValueException("attachCluster: scope should not be null");
|
||||
throw new InvalidParameterValueException("scope should not be null");
|
||||
}
|
||||
List<String> hostsIdentifier = new ArrayList<>();
|
||||
StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
|
||||
if (storagePool == null) {
|
||||
logger.error("attachCluster : Storage Pool not found for id: " + dataStore.getId());
|
||||
throw new CloudRuntimeException("attachCluster : Storage Pool not found for id: " + dataStore.getId());
|
||||
throw new CloudRuntimeException("Storage Pool not found for id: " + dataStore.getId());
|
||||
}
|
||||
PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore;
|
||||
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore);
|
||||
|
|
@ -306,21 +311,10 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
logger.error(errMsg);
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
|
||||
logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId());
|
||||
if (hostsIdentifier != null && hostsIdentifier.size() > 0) {
|
||||
try {
|
||||
AccessGroup accessGroupRequest = new AccessGroup();
|
||||
accessGroupRequest.setHostsToConnect(hostsToConnect);
|
||||
accessGroupRequest.setScope(scope);
|
||||
primaryStore.setDetails(details);
|
||||
accessGroupRequest.setPrimaryDataStoreInfo(primaryStore);
|
||||
strategy.createAccessGroup(accessGroupRequest);
|
||||
} catch (Exception e) {
|
||||
logger.error("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage());
|
||||
throw new CloudRuntimeException("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
// We need to create export policy at pool level and igroup at host level(in grantAccess)
|
||||
createNfsAccessGroupIfNeeded(details, hostsIdentifier, hostsToConnect, scope, storagePool, strategy);
|
||||
|
||||
logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId());
|
||||
for (HostVO host : hostsToConnect) {
|
||||
try {
|
||||
|
|
@ -343,16 +337,16 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) {
|
||||
logger.debug("In attachZone for ONTAP primary storage");
|
||||
if (dataStore == null) {
|
||||
throw new InvalidParameterValueException("attachZone: dataStore should not be null");
|
||||
throw new InvalidParameterValueException("dataStore should not be null");
|
||||
}
|
||||
if (scope == null) {
|
||||
throw new InvalidParameterValueException("attachZone: scope should not be null");
|
||||
throw new InvalidParameterValueException("scope should not be null");
|
||||
}
|
||||
List<String> hostsIdentifier = new ArrayList<>();
|
||||
StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
|
||||
if (storagePool == null) {
|
||||
logger.error("attachZone : Storage Pool not found for id: " + dataStore.getId());
|
||||
throw new CloudRuntimeException("attachZone : Storage Pool not found for id: " + dataStore.getId());
|
||||
throw new CloudRuntimeException("Storage Pool not found for id: " + dataStore.getId());
|
||||
}
|
||||
|
||||
PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore;
|
||||
|
|
@ -369,19 +363,10 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
logger.error(errMsg);
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
if (hostsIdentifier != null && !hostsIdentifier.isEmpty()) {
|
||||
try {
|
||||
AccessGroup accessGroupRequest = new AccessGroup();
|
||||
accessGroupRequest.setHostsToConnect(hostsToConnect);
|
||||
accessGroupRequest.setScope(scope);
|
||||
primaryStore.setDetails(details);
|
||||
accessGroupRequest.setPrimaryDataStoreInfo(primaryStore);
|
||||
strategy.createAccessGroup(accessGroupRequest);
|
||||
} catch (Exception e) {
|
||||
logger.error("attachZone: Failed to create access group on storage system for zone with Exception: " + e.getMessage());
|
||||
throw new CloudRuntimeException("attachZone: Failed to create access group on storage system for zone with Exception: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
// We need to create export policy at pool level and igroup at host level
|
||||
createNfsAccessGroupIfNeeded(details, hostsIdentifier, hostsToConnect, scope, storagePool, strategy);
|
||||
|
||||
for (HostVO host : hostsToConnect) {
|
||||
try {
|
||||
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
|
||||
|
|
@ -393,7 +378,6 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
_dataStoreHelper.attachZone(dataStore);
|
||||
return true;
|
||||
}
|
||||
|
||||
private boolean validateProtocolSupportAndFetchHostsIdentifier(List<HostVO> hosts, ProtocolType protocolType, List<String> hostIdentifiers) {
|
||||
switch (protocolType) {
|
||||
case ISCSI:
|
||||
|
|
@ -401,7 +385,8 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
for (HostVO host : hosts) {
|
||||
if (host == null || host.getStorageUrl() == null || host.getStorageUrl().trim().isEmpty()
|
||||
|| !host.getStorageUrl().startsWith(protocolPrefix)) {
|
||||
return false;
|
||||
// TODO we will inform customer through alert for excluded host because of protocol enabled on host
|
||||
continue;
|
||||
}
|
||||
hostIdentifiers.add(host.getStorageUrl());
|
||||
}
|
||||
|
|
@ -411,23 +396,49 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
for (HostVO host : hosts) {
|
||||
if (host != null) {
|
||||
ip = host.getStorageIpAddress() != null ? host.getStorageIpAddress().trim() : "";
|
||||
if (ip.isEmpty()) {
|
||||
if (host.getPrivateIpAddress() == null || host.getPrivateIpAddress().trim().isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
ip = host.getPrivateIpAddress().trim();
|
||||
if (ip.isEmpty() && StringUtils.isBlank(host.getPrivateIpAddress() )) {
|
||||
// TODO we will inform customer through alert for excluded host because of protocol enabled on host
|
||||
continue;
|
||||
} else {
|
||||
ip = ip.isEmpty() ? host.getPrivateIpAddress().trim() : ip;
|
||||
}
|
||||
}
|
||||
hostIdentifiers.add(ip);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new CloudRuntimeException("validateProtocolSupportAndFetchHostsIdentifier : Unsupported protocol: " + protocolType.name());
|
||||
throw new CloudRuntimeException("Unsupported protocol: " + protocolType.name());
|
||||
}
|
||||
logger.info("validateProtocolSupportAndFetchHostsIdentifier: All hosts support the protocol: " + protocolType.name());
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an NFS export policy (access group) on the ONTAP storage if the protocol is NFS3
|
||||
* and there are eligible hosts. Skipped for iSCSI (igroups are created per-host in grantAccess).
|
||||
*/
|
||||
private void createNfsAccessGroupIfNeeded(Map<String, String> details, List<String> hostsIdentifier,
|
||||
List<HostVO> hostsToConnect, Scope scope,
|
||||
StoragePoolVO storagePool, StorageStrategy strategy) {
|
||||
if (!ProtocolType.NFS3.name().equalsIgnoreCase(details.get(OntapStorageConstants.PROTOCOL))) {
|
||||
return;
|
||||
}
|
||||
if (hostsIdentifier.isEmpty()) {
|
||||
// No eligible hosts — export policy will be created later via HostListener when hosts come up
|
||||
return;
|
||||
}
|
||||
try {
|
||||
AccessGroup accessGroupRequest = new AccessGroup();
|
||||
accessGroupRequest.setHostsToConnect(hostsToConnect);
|
||||
accessGroupRequest.setScope(scope);
|
||||
accessGroupRequest.setStoragePoolId(storagePool.getId());
|
||||
strategy.createAccessGroup(accessGroupRequest);
|
||||
} catch (Exception e) {
|
||||
logger.error("Failed to create NFS access group on storage for pool {}: {}", storagePool.getName(), e.getMessage());
|
||||
throw new CloudRuntimeException("Failed to create NFS access group on storage for pool " + storagePool.getName() + ": " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean maintain(DataStore store) {
|
||||
logger.info("Placing storage pool {} in maintenance mode", store);
|
||||
|
|
@ -453,13 +464,15 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
logger.info("deleteDataStore: Starting deletion process for storage pool id: {}", store.getId());
|
||||
|
||||
long storagePoolId = store.getId();
|
||||
// Get the StoragePool details
|
||||
StoragePool storagePool = _storageMgr.getStoragePool(storagePoolId);
|
||||
if (storagePool == null) {
|
||||
logger.warn("deleteDataStore: Storage pool not found for id: {}, skipping deletion", storagePoolId);
|
||||
return true;
|
||||
return true; // Return true since the entity doesn't exist
|
||||
}
|
||||
|
||||
try {
|
||||
// Fetch storage pool details
|
||||
Map<String, String> details = _datastoreDetailsDao.listDetailsKeyPairs(storagePoolId);
|
||||
if (details == null || details.isEmpty()) {
|
||||
logger.warn("deleteDataStore: No details found for storage pool id: {}, proceeding with CS entity deletion only", storagePoolId);
|
||||
|
|
@ -468,11 +481,14 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
|
||||
logger.info("deleteDataStore: Deleting access groups for storage pool '{}'", storagePool.getName());
|
||||
|
||||
// Get the storage strategy to interact with ONTAP
|
||||
StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(details);
|
||||
|
||||
// Cast DataStore to PrimaryDataStoreInfo to get full details
|
||||
PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) store;
|
||||
primaryDataStoreInfo.setDetails(details);
|
||||
|
||||
// Call deleteStorageVolume to delete the underlying ONTAP volume
|
||||
logger.info("deleteDataStore: Deleting ONTAP volume for storage pool '{}'", storagePool.getName());
|
||||
Volume volume = new Volume();
|
||||
volume.setUuid(details.get(OntapStorageConstants.VOLUME_UUID));
|
||||
|
|
@ -490,16 +506,19 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
|
|||
storagePoolId, e.getMessage(), e);
|
||||
}
|
||||
AccessGroup accessGroup = new AccessGroup();
|
||||
accessGroup.setPrimaryDataStoreInfo(primaryDataStoreInfo);
|
||||
accessGroup.setStoragePoolId(storagePoolId);
|
||||
// Delete access groups associated with this storage pool
|
||||
storageStrategy.deleteAccessGroup(accessGroup);
|
||||
logger.info("deleteDataStore: Successfully deleted access groups for storage pool '{}'", storagePool.getName());
|
||||
|
||||
} catch (Exception e) {
|
||||
logger.error("deleteDataStore: Failed to delete access groups for storage pool id: {}. Error: {}",
|
||||
storagePoolId, e.getMessage(), e);
|
||||
// Continue with CloudStack entity deletion even if ONTAP cleanup fails
|
||||
logger.warn("deleteDataStore: Proceeding with CloudStack entity deletion despite ONTAP cleanup failure");
|
||||
}
|
||||
|
||||
// Delete the CloudStack primary data store entity
|
||||
return _dataStoreHelper.deletePrimaryDataStore(store);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import com.cloud.agent.api.ModifyStoragePoolCommand;
|
|||
import com.cloud.agent.api.ModifyStoragePoolAnswer;
|
||||
import com.cloud.agent.api.StoragePoolInfo;
|
||||
import com.cloud.alert.AlertManager;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.storage.StoragePoolHostVO;
|
||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
|
@ -37,9 +38,12 @@ import com.cloud.storage.StoragePool;
|
|||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
||||
import com.cloud.host.dao.HostDao;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
public class OntapHostListener implements HypervisorHostListener {
|
||||
protected Logger logger = LogManager.getLogger(getClass());
|
||||
|
||||
|
|
@ -53,6 +57,9 @@ public class OntapHostListener implements HypervisorHostListener {
|
|||
private HostDao _hostDao;
|
||||
@Inject
|
||||
private StoragePoolHostDao storagePoolHostDao;
|
||||
@Inject
|
||||
private StoragePoolDetailsDao _storagePoolDetailsDao;
|
||||
|
||||
|
||||
@Override
|
||||
public boolean hostConnect(long hostId, long poolId) {
|
||||
|
|
@ -62,6 +69,10 @@ public class OntapHostListener implements HypervisorHostListener {
|
|||
logger.error("host was not found with id : {}", hostId);
|
||||
return false;
|
||||
}
|
||||
if (!host.getHypervisorType().equals(Hypervisor.HypervisorType.KVM)) {
|
||||
logger.error("ONTAP plugin does not support {} type host currently ", host.getHypervisorType());
|
||||
return false;
|
||||
}
|
||||
|
||||
StoragePool pool = _storagePoolDao.findById(poolId);
|
||||
if (pool == null) {
|
||||
|
|
@ -70,7 +81,12 @@ public class OntapHostListener implements HypervisorHostListener {
|
|||
}
|
||||
logger.info("Connecting host {} to ONTAP storage pool {}", host.getName(), pool.getName());
|
||||
try {
|
||||
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool);
|
||||
// Load storage pool details from database to pass mount options and other config to agent
|
||||
Map<String, String> detailsMap = _storagePoolDetailsDao.listDetailsKeyPairs(poolId);
|
||||
// Create the ModifyStoragePoolCommand to send to the agent
|
||||
// Note: Always send command even if database entry exists, because agent may have restarted
|
||||
// and lost in-memory pool registration. The command handler is idempotent.
|
||||
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool, detailsMap);
|
||||
|
||||
Answer answer = _agentMgr.easySend(hostId, cmd);
|
||||
|
||||
|
|
@ -87,9 +103,12 @@ public class OntapHostListener implements HypervisorHostListener {
|
|||
"Unable to establish a connection from agent to storage pool %s due to %s", pool, answer.getDetails()));
|
||||
}
|
||||
|
||||
// Get the mount path from the answer
|
||||
|
||||
if (!(answer instanceof ModifyStoragePoolAnswer)) {
|
||||
logger.error("Received unexpected answer type {} for storage pool {}", answer.getClass().getName(), pool.getName());
|
||||
throw new CloudRuntimeException("Failed to connect to storage pool. Please check agent logs for details.");
|
||||
throw new CloudRuntimeException(String.format(
|
||||
"Unexpected answer type %s returned for modify storage pool command for pool %s on host %d",
|
||||
answer.getClass().getName(), pool, hostId));
|
||||
}
|
||||
|
||||
ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer;
|
||||
|
|
@ -101,6 +120,7 @@ public class OntapHostListener implements HypervisorHostListener {
|
|||
String localPath = poolInfo.getLocalPath();
|
||||
logger.info("Storage pool {} successfully mounted at: {}", pool.getName(), localPath);
|
||||
|
||||
// Update or create the storage_pool_host_ref entry with the correct local_path
|
||||
StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId);
|
||||
|
||||
if (storagePoolHost == null) {
|
||||
|
|
@ -113,6 +133,7 @@ public class OntapHostListener implements HypervisorHostListener {
|
|||
logger.info("Updated storage_pool_host_ref entry with local_path: {}", localPath);
|
||||
}
|
||||
|
||||
// Update pool capacity/usage information
|
||||
StoragePoolVO poolVO = _storagePoolDao.findById(poolId);
|
||||
if (poolVO != null && poolInfo.getCapacityBytes() > 0) {
|
||||
poolVO.setCapacityBytes(poolInfo.getCapacityBytes());
|
||||
|
|
@ -123,46 +144,47 @@ public class OntapHostListener implements HypervisorHostListener {
|
|||
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception while connecting host {} to storage pool {}", host.getName(), pool.getName(), e);
|
||||
// CRITICAL: Don't throw exception - it crashes the agent and causes restart loops
|
||||
// Return false to indicate failure without crashing
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hostDisconnected(Host host, StoragePool pool) {
|
||||
logger.info("Disconnect from host " + host.getId() + " from pool " + pool.getName());
|
||||
public boolean hostDisconnected(long hostId, long poolId) {
|
||||
logger.info("Disconnect from host " + hostId + " from pool " + poolId);
|
||||
|
||||
Host hostToremove = _hostDao.findById(host.getId());
|
||||
Host hostToremove = _hostDao.findById(hostId);
|
||||
if (hostToremove == null) {
|
||||
logger.error("Failed to add host by HostListener as host was not found with id : {}", host.getId());
|
||||
logger.error("Failed to add host by HostListener as host was not found with id : {}", hostId);
|
||||
return false;
|
||||
}
|
||||
logger.info("Disconnecting host {} from ONTAP storage pool {}", host.getName(), pool.getName());
|
||||
|
||||
StoragePool pool = _storagePoolDao.findById(poolId);
|
||||
if (pool == null) {
|
||||
logger.error("Failed to disconnect host - storage pool not found with id: {}", poolId);
|
||||
return false;
|
||||
}
|
||||
logger.info("Disconnecting host {} from ONTAP storage pool {}", hostToremove.getName(), pool.getName());
|
||||
|
||||
try {
|
||||
DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand(pool);
|
||||
long hostId = host.getId();
|
||||
Answer answer = _agentMgr.easySend(hostId, cmd);
|
||||
|
||||
if (answer != null && answer.getResult()) {
|
||||
logger.info("Successfully disconnected host {} from ONTAP storage pool {}", host.getName(), pool.getName());
|
||||
logger.info("Successfully disconnected host {} from ONTAP storage pool {}", hostToremove.getName(), pool.getName());
|
||||
return true;
|
||||
} else {
|
||||
String errMsg = (answer != null) ? answer.getDetails() : "Unknown error";
|
||||
logger.warn("Failed to disconnect host {} from storage pool {}. Error: {}", host.getName(), pool.getName(), errMsg);
|
||||
logger.warn("Failed to disconnect host {} from storage pool {}. Error: {}", hostToremove.getName(), pool.getName(), errMsg);
|
||||
return false;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception while disconnecting host {} from storage pool {}", host.getName(), pool.getName(), e);
|
||||
logger.error("Exception while disconnecting host {} from storage pool {}", hostToremove.getName(), pool.getName(), e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hostDisconnected(long hostId, long poolId) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hostAboutToBeRemoved(long hostId) {
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -21,6 +21,9 @@ package org.apache.cloudstack.storage.provider;
|
|||
|
||||
import com.cloud.utils.component.ComponentContext;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import org.apache.cloudstack.storage.feign.model.OntapStorage;
|
||||
import org.apache.cloudstack.storage.service.StorageStrategy;
|
||||
import org.apache.cloudstack.storage.service.UnifiedNASStrategy;
|
||||
|
|
@ -36,23 +39,23 @@ public class StorageProviderFactory {
|
|||
public static StorageStrategy getStrategy(OntapStorage ontapStorage) {
|
||||
ProtocolType protocol = ontapStorage.getProtocol();
|
||||
logger.info("Initializing StorageProviderFactory with protocol: " + protocol);
|
||||
String decodedPassword = new String(java.util.Base64.getDecoder().decode(ontapStorage.getPassword()), StandardCharsets.UTF_8);
|
||||
ontapStorage = new OntapStorage(
|
||||
ontapStorage.getUsername(),
|
||||
decodedPassword,
|
||||
ontapStorage.getStorageIP(),
|
||||
ontapStorage.getSvmName(),
|
||||
ontapStorage.getSize(),
|
||||
protocol);
|
||||
switch (protocol) {
|
||||
case NFS3:
|
||||
if (!ontapStorage.getIsDisaggregated()) {
|
||||
UnifiedNASStrategy unifiedNASStrategy = new UnifiedNASStrategy(ontapStorage);
|
||||
ComponentContext.inject(unifiedNASStrategy);
|
||||
unifiedNASStrategy.setOntapStorage(ontapStorage);
|
||||
return unifiedNASStrategy;
|
||||
}
|
||||
throw new CloudRuntimeException("Unsupported configuration: Disaggregated ONTAP is not supported.");
|
||||
UnifiedNASStrategy unifiedNASStrategy = new UnifiedNASStrategy(ontapStorage);
|
||||
ComponentContext.inject(unifiedNASStrategy);
|
||||
return unifiedNASStrategy;
|
||||
case ISCSI:
|
||||
if (!ontapStorage.getIsDisaggregated()) {
|
||||
UnifiedSANStrategy unifiedSANStrategy = new UnifiedSANStrategy(ontapStorage);
|
||||
ComponentContext.inject(unifiedSANStrategy);
|
||||
unifiedSANStrategy.setOntapStorage(ontapStorage);
|
||||
return unifiedSANStrategy;
|
||||
}
|
||||
throw new CloudRuntimeException("Unsupported configuration: Disaggregated ONTAP is not supported.");
|
||||
UnifiedSANStrategy unifiedSANStrategy = new UnifiedSANStrategy(ontapStorage);
|
||||
ComponentContext.inject(unifiedSANStrategy);
|
||||
return unifiedSANStrategy;
|
||||
default:
|
||||
throw new CloudRuntimeException("Unsupported protocol: " + protocol);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,11 +19,54 @@
|
|||
|
||||
package org.apache.cloudstack.storage.service;
|
||||
|
||||
import org.apache.cloudstack.storage.feign.model.Igroup;
|
||||
import org.apache.cloudstack.storage.feign.model.Initiator;
|
||||
import org.apache.cloudstack.storage.feign.model.OntapStorage;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
public abstract class SANStrategy extends StorageStrategy {
|
||||
private static final Logger s_logger = LogManager.getLogger(SANStrategy.class);
|
||||
public SANStrategy(OntapStorage ontapStorage) {
|
||||
super(ontapStorage);
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures the LUN is mapped to the specified access group (igroup).
|
||||
* If a mapping already exists, returns the existing LUN number.
|
||||
* If not, creates a new mapping and returns the assigned LUN number.
|
||||
*
|
||||
* @param svmName the SVM name
|
||||
* @param lunName the LUN name
|
||||
* @param accessGroupName the igroup name
|
||||
* @return the logical unit number as a String
|
||||
*/
|
||||
public abstract String ensureLunMapped(String svmName, String lunName, String accessGroupName);
|
||||
|
||||
/**
|
||||
* Validates that the host initiator is present in the access group (igroup).
|
||||
*
|
||||
* @param hostInitiator the host initiator IQN
|
||||
* @param svmName the SVM name
|
||||
* @param igroup the igroup
|
||||
* @return true if the initiator is found in the igroup, false otherwise
|
||||
*/
|
||||
public boolean validateInitiatorInAccessGroup(String hostInitiator, String svmName, Igroup igroup) {
|
||||
s_logger.info("validateInitiatorInAccessGroup: Validating initiator [{}] is in igroup [{}] on SVM [{}]", hostInitiator, igroup, svmName);
|
||||
|
||||
if (hostInitiator == null || hostInitiator.isEmpty()) {
|
||||
s_logger.warn("validateInitiatorInAccessGroup: host initiator is null or empty");
|
||||
return false;
|
||||
}
|
||||
if (igroup.getInitiators() != null) {
|
||||
for (Initiator initiator : igroup.getInitiators()) {
|
||||
if (initiator.getName().equalsIgnoreCase(hostInitiator)) {
|
||||
s_logger.info("validateInitiatorInAccessGroup: Initiator [{}] validated successfully in igroup [{}]", hostInitiator, igroup);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
s_logger.warn("validateInitiatorInAccessGroup: Initiator [{}] NOT found in igroup [{}]", hostInitiator, igroup);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.cloudstack.storage.service;
|
||||
package org.apache.cloudstack.storage.service;
|
||||
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import feign.FeignException;
|
||||
|
|
@ -25,7 +25,9 @@ import org.apache.cloudstack.storage.feign.FeignClientFactory;
|
|||
import org.apache.cloudstack.storage.feign.client.AggregateFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.JobFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.NetworkFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.NASFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.SANFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.SnapshotFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.SvmFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.model.Aggregate;
|
||||
|
|
@ -51,25 +53,39 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Storage Strategy represents the communication path for all the ONTAP storage options
|
||||
*
|
||||
* ONTAP storage operation would vary based on
|
||||
* Supported protocols: NFS3.0, NFS4.1, FC, iSCSI, Nvme/TCP and Nvme/FC
|
||||
* Supported platform: Unified and Disaggregated
|
||||
*/
|
||||
public abstract class StorageStrategy {
|
||||
private final FeignClientFactory feignClientFactory;
|
||||
private final AggregateFeignClient aggregateFeignClient;
|
||||
private final VolumeFeignClient volumeFeignClient;
|
||||
private final SvmFeignClient svmFeignClient;
|
||||
private final JobFeignClient jobFeignClient;
|
||||
private final NetworkFeignClient networkFeignClient;
|
||||
private final SANFeignClient sanFeignClient;
|
||||
// Replace @Inject Feign clients with FeignClientFactory
|
||||
protected FeignClientFactory feignClientFactory;
|
||||
protected AggregateFeignClient aggregateFeignClient;
|
||||
protected VolumeFeignClient volumeFeignClient;
|
||||
protected SvmFeignClient svmFeignClient;
|
||||
protected JobFeignClient jobFeignClient;
|
||||
protected NetworkFeignClient networkFeignClient;
|
||||
protected SANFeignClient sanFeignClient;
|
||||
protected NASFeignClient nasFeignClient;
|
||||
protected SnapshotFeignClient snapshotFeignClient;
|
||||
|
||||
protected OntapStorage storage;
|
||||
|
||||
/**
|
||||
* Presents aggregate object for the unified storage, not eligible for disaggregated
|
||||
*/
|
||||
private List<Aggregate> aggregates;
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(StorageStrategy.class);
|
||||
|
||||
public StorageStrategy(OntapStorage ontapStorage) {
|
||||
storage = ontapStorage;
|
||||
String baseURL = OntapStorageConstants.HTTPS + storage.getManagementLIF();
|
||||
String baseURL = OntapStorageConstants.HTTPS + storage.getStorageIP();
|
||||
logger.info("Initializing StorageStrategy with base URL: " + baseURL);
|
||||
// Initialize FeignClientFactory and create clients
|
||||
this.feignClientFactory = new FeignClientFactory();
|
||||
this.aggregateFeignClient = feignClientFactory.createClient(AggregateFeignClient.class, baseURL);
|
||||
this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class, baseURL);
|
||||
|
|
@ -77,14 +93,18 @@ public abstract class StorageStrategy {
|
|||
this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL);
|
||||
this.networkFeignClient = feignClientFactory.createClient(NetworkFeignClient.class, baseURL);
|
||||
this.sanFeignClient = feignClientFactory.createClient(SANFeignClient.class, baseURL);
|
||||
this.nasFeignClient = feignClientFactory.createClient(NASFeignClient.class, baseURL);
|
||||
this.snapshotFeignClient = feignClientFactory.createClient(SnapshotFeignClient.class, baseURL);
|
||||
}
|
||||
|
||||
// Connect method to validate ONTAP cluster, credentials, protocol, and SVM
|
||||
public boolean connect() {
|
||||
logger.info("Attempting to connect to ONTAP cluster at " + storage.getManagementLIF() + " and validate SVM " +
|
||||
logger.info("Attempting to connect to ONTAP cluster at " + storage.getStorageIP() + " and validate SVM " +
|
||||
storage.getSvmName() + ", protocol " + storage.getProtocol());
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
String svmName = storage.getSvmName();
|
||||
try {
|
||||
// Call the SVM API to check if the SVM exists
|
||||
Svm svm = new Svm();
|
||||
logger.info("Fetching the SVM details...");
|
||||
Map<String, Object> queryParams = Map.of(OntapStorageConstants.NAME, svmName, OntapStorageConstants.FIELDS, OntapStorageConstants.AGGREGATES +
|
||||
|
|
@ -146,6 +166,17 @@ public abstract class StorageStrategy {
|
|||
return true;
|
||||
}
|
||||
|
||||
// Common methods like create/delete etc., should be here
|
||||
|
||||
/**
|
||||
* Creates ONTAP Flex-Volume
|
||||
* Eligible only for Unified ONTAP storage
|
||||
* throw exception in case of disaggregated ONTAP storage
|
||||
*
|
||||
* @param volumeName the name of the volume to create
|
||||
* @param size the size of the volume in bytes
|
||||
* @return the created Volume object
|
||||
*/
|
||||
public Volume createStorageVolume(String volumeName, Long size) {
|
||||
logger.info("Creating volume: " + volumeName + " of size: " + size + " bytes");
|
||||
|
||||
|
|
@ -160,6 +191,7 @@ public abstract class StorageStrategy {
|
|||
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
|
||||
// Generate the Create Volume Request
|
||||
Volume volumeRequest = new Volume();
|
||||
Svm svm = new Svm();
|
||||
svm.setName(svmName);
|
||||
|
|
@ -169,6 +201,7 @@ public abstract class StorageStrategy {
|
|||
volumeRequest.setName(volumeName);
|
||||
volumeRequest.setSvm(svm);
|
||||
|
||||
// Pick the best aggregate for this specific request (largest available, online, and sufficient space).
|
||||
long maxAvailableAggregateSpaceBytes = -1L;
|
||||
Aggregate aggrChosen = null;
|
||||
for (Aggregate aggr : aggregates) {
|
||||
|
|
@ -224,7 +257,7 @@ public abstract class StorageStrategy {
|
|||
}
|
||||
String jobUUID = jobResponse.getJob().getUuid();
|
||||
|
||||
Boolean jobSucceeded = jobPollForSuccess(jobUUID);
|
||||
Boolean jobSucceeded = jobPollForSuccess(jobUUID,10, 1000);
|
||||
if (!jobSucceeded) {
|
||||
logger.error("Volume creation job failed for volume: " + volumeName);
|
||||
throw new CloudRuntimeException("Volume creation job failed for volume: " + volumeName);
|
||||
|
|
@ -234,6 +267,8 @@ public abstract class StorageStrategy {
|
|||
logger.error("Exception while creating volume: ", e);
|
||||
throw new CloudRuntimeException("Failed to create volume: " + e.getMessage());
|
||||
}
|
||||
// Verify if the Volume has been created and set the Volume object
|
||||
// Call the VolumeFeignClient to get the created volume details
|
||||
OntapResponse<Volume> volumesResponse = volumeFeignClient.getAllVolumes(authHeader, Map.of(OntapStorageConstants.NAME, volumeName));
|
||||
if (volumesResponse == null || volumesResponse.getRecords() == null || volumesResponse.getRecords().isEmpty()) {
|
||||
logger.error("Volume " + volumeName + " not found after creation.");
|
||||
|
|
@ -281,16 +316,32 @@ public abstract class StorageStrategy {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates ONTAP Flex-Volume
|
||||
* Eligible only for Unified ONTAP storage
|
||||
* throw exception in case of disaggregated ONTAP storage
|
||||
*
|
||||
* @param volume the volume to update
|
||||
* @return the updated Volume object
|
||||
*/
|
||||
public Volume updateStorageVolume(Volume volume) {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete ONTAP Flex-Volume
|
||||
* Eligible only for Unified ONTAP storage
|
||||
* throw exception in case of disaggregated ONTAP storage
|
||||
*
|
||||
* @param volume the volume to delete
|
||||
*/
|
||||
public void deleteStorageVolume(Volume volume) {
|
||||
logger.info("Deleting ONTAP volume by name: " + volume.getName() + " and uuid: " + volume.getUuid());
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
try {
|
||||
// TODO: Implement lun and file deletion, if any, before deleting the volume
|
||||
JobResponse jobResponse = volumeFeignClient.deleteVolume(authHeader, volume.getUuid());
|
||||
Boolean jobSucceeded = jobPollForSuccess(jobResponse.getJob().getUuid());
|
||||
Boolean jobSucceeded = jobPollForSuccess(jobResponse.getJob().getUuid(), 10, 1000);
|
||||
if (!jobSucceeded) {
|
||||
logger.error("Volume deletion job failed for volume: " + volume.getName());
|
||||
throw new CloudRuntimeException("Volume deletion job failed for volume: " + volume.getName());
|
||||
|
|
@ -303,10 +354,25 @@ public abstract class StorageStrategy {
|
|||
logger.info("ONTAP volume deletion process completed for volume: " + volume.getName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets ONTAP Flex-Volume
|
||||
* Eligible only for Unified ONTAP storage
|
||||
* throw exception in case of disaggregated ONTAP storage
|
||||
*
|
||||
* @param volume the volume to retrieve
|
||||
* @return the retrieved Volume object
|
||||
*/
|
||||
public Volume getStorageVolume(Volume volume) {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the storage path based on protocol.
|
||||
* For iSCSI: Returns the iSCSI target IQN (e.g., iqn.1992-08.com.netapp:sn.xxx:vs.3)
|
||||
* For NFS: Returns the mount path (to be implemented)
|
||||
*
|
||||
* @return the storage path as a String
|
||||
*/
|
||||
public String getStoragePath() {
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
String targetIqn = null;
|
||||
|
|
@ -336,6 +402,7 @@ public abstract class StorageStrategy {
|
|||
return targetIqn;
|
||||
|
||||
} else if (storage.getProtocol() == ProtocolType.NFS3) {
|
||||
// TODO: Implement NFS path retrieval logic
|
||||
} else {
|
||||
throw new CloudRuntimeException("Unsupported protocol for path retrieval: " + storage.getProtocol());
|
||||
}
|
||||
|
|
@ -347,6 +414,14 @@ public abstract class StorageStrategy {
|
|||
return targetIqn;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Get the network ip interface
|
||||
*
|
||||
* @return the network interface ip as a String
|
||||
*/
|
||||
|
||||
public String getNetworkInterface() {
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
try {
|
||||
|
|
@ -371,6 +446,7 @@ public abstract class StorageStrategy {
|
|||
networkFeignClient.getNetworkIpInterfaces(authHeader, queryParams);
|
||||
if (response != null && response.getRecords() != null && !response.getRecords().isEmpty()) {
|
||||
IpInterface ipInterface = null;
|
||||
// For simplicity, return the first interface's name (Of IPv4 type for NFS3)
|
||||
if (storage.getProtocol() == ProtocolType.ISCSI) {
|
||||
ipInterface = response.getRecords().get(0);
|
||||
} else if (storage.getProtocol() == ProtocolType.NFS3) {
|
||||
|
|
@ -394,37 +470,189 @@ public abstract class StorageStrategy {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Method encapsulates the behavior based on the opted protocol in subclasses.
|
||||
* it is going to mimic
|
||||
* createLun for iSCSI, FC protocols
|
||||
* createFile for NFS3.0 and NFS4.1 protocols
|
||||
* createNameSpace for Nvme/TCP and Nvme/FC protocol
|
||||
*
|
||||
* @param cloudstackVolume the CloudStack volume to create
|
||||
* @return the created CloudStackVolume object
|
||||
*/
|
||||
abstract public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume);
|
||||
|
||||
/**
|
||||
* Method encapsulates the behavior based on the opted protocol in subclasses.
|
||||
* it is going to mimic
|
||||
* updateLun for iSCSI, FC protocols
|
||||
* updateFile for NFS3.0 and NFS4.1 protocols
|
||||
* updateNameSpace for Nvme/TCP and Nvme/FC protocol
|
||||
*
|
||||
* @param cloudstackVolume the CloudStack volume to update
|
||||
* @return the updated CloudStackVolume object
|
||||
*/
|
||||
abstract CloudStackVolume updateCloudStackVolume(CloudStackVolume cloudstackVolume);
|
||||
|
||||
/**
|
||||
* Method encapsulates the behavior based on the opted protocol in subclasses.
|
||||
* it is going to mimic
|
||||
* deleteLun for iSCSI, FC protocols
|
||||
* deleteFile for NFS3.0 and NFS4.1 protocols
|
||||
* deleteNameSpace for Nvme/TCP and Nvme/FC protocol
|
||||
*
|
||||
* @param cloudstackVolume the CloudStack volume to delete
|
||||
*/
|
||||
abstract public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume);
|
||||
|
||||
/**
|
||||
* Method encapsulates the behavior based on the opted protocol in subclasses.
|
||||
* it is going to mimic
|
||||
* cloneLun for iSCSI, FC protocols
|
||||
* cloneFile for NFS3.0 and NFS4.1 protocols
|
||||
* cloneNameSpace for Nvme/TCP and Nvme/FC protocol
|
||||
* @param cloudstackVolume the CloudStack volume to copy
|
||||
*/
|
||||
abstract public void copyCloudStackVolume(CloudStackVolume cloudstackVolume);
|
||||
|
||||
/**
|
||||
* Method encapsulates the behavior based on the opted protocol in subclasses.
|
||||
* it is going to mimic
|
||||
* getLun for iSCSI, FC protocols
|
||||
* getFile for NFS3.0 and NFS4.1 protocols
|
||||
* getNameSpace for Nvme/TCP and Nvme/FC protocol
|
||||
* @param cloudStackVolumeMap the CloudStack volume to retrieve
|
||||
* @return the retrieved CloudStackVolume object
|
||||
*/
|
||||
abstract public CloudStackVolume getCloudStackVolume(Map<String, String> cloudStackVolumeMap);
|
||||
|
||||
/**
|
||||
* Reverts a CloudStack volume to a snapshot using protocol-specific ONTAP APIs.
|
||||
*
|
||||
* <p>This method encapsulates the snapshot revert behavior based on protocol:</p>
|
||||
* <ul>
|
||||
* <li><b>iSCSI/FC:</b> Uses {@code POST /api/storage/luns/{lun.uuid}/restore}
|
||||
* to restore LUN data from the FlexVolume snapshot.</li>
|
||||
* <li><b>NFS:</b> Uses {@code POST /api/storage/volumes/{vol.uuid}/snapshots/{snap.uuid}/files/{path}/restore}
|
||||
* to restore a single file from the FlexVolume snapshot.</li>
|
||||
* </ul>
|
||||
*
|
||||
* @param snapshotName The ONTAP FlexVolume snapshot name
|
||||
* @param flexVolUuid The FlexVolume UUID containing the snapshot
|
||||
* @param snapshotUuid The ONTAP snapshot UUID (used for NFS file restore)
|
||||
* @param volumePath The path of the file/LUN within the FlexVolume
|
||||
* @param lunUuid The LUN UUID (only for iSCSI, null for NFS)
|
||||
* @param flexVolName The FlexVolume name (only for iSCSI, for constructing destination path)
|
||||
* @return JobResponse for the async restore operation
|
||||
*/
|
||||
public abstract JobResponse revertSnapshotForCloudStackVolume(String snapshotName, String flexVolUuid,
|
||||
String snapshotUuid, String volumePath,
|
||||
String lunUuid, String flexVolName);
|
||||
|
||||
|
||||
/**
|
||||
* Method encapsulates the behavior based on the opted protocol in subclasses
|
||||
* createiGroup for iSCSI and FC protocols
|
||||
* createExportPolicy for NFS 3.0 and NFS 4.1 protocols
|
||||
* createSubsystem for Nvme/TCP and Nvme/FC protocols
|
||||
* @param accessGroup the access group to create
|
||||
* @return the created AccessGroup object
|
||||
*/
|
||||
abstract public AccessGroup createAccessGroup(AccessGroup accessGroup);
|
||||
|
||||
/**
|
||||
* Method encapsulates the behavior based on the opted protocol in subclasses
|
||||
* deleteiGroup for iSCSI and FC protocols
|
||||
* deleteExportPolicy for NFS 3.0 and NFS 4.1 protocols
|
||||
* deleteSubsystem for Nvme/TCP and Nvme/FC protocols
|
||||
* @param accessGroup the access group to delete
|
||||
*/
|
||||
abstract public void deleteAccessGroup(AccessGroup accessGroup);
|
||||
|
||||
/**
|
||||
* Method encapsulates the behavior based on the opted protocol in subclasses
|
||||
* updateiGroup example add/remove-Iqn for iSCSI and FC protocols
|
||||
* updateExportPolicy example add/remove-Rule for NFS 3.0 and NFS 4.1 protocols
|
||||
* //TODO for Nvme/TCP and Nvme/FC protocols
|
||||
* @param accessGroup the access group to update
|
||||
* @return the updated AccessGroup object
|
||||
*/
|
||||
abstract AccessGroup updateAccessGroup(AccessGroup accessGroup);
|
||||
|
||||
/**
|
||||
* Method encapsulates the behavior based on the opted protocol in subclasses
|
||||
* e.g., getIGroup for iSCSI and FC protocols
|
||||
* e.g., getExportPolicy for NFS 3.0 and NFS 4.1 protocols
|
||||
* //TODO for Nvme/TCP and Nvme/FC protocols
|
||||
* @param values map to get access group values like name, svm name etc.
|
||||
*/
|
||||
abstract public AccessGroup getAccessGroup(Map<String, String> values);
|
||||
|
||||
/**
|
||||
* Method encapsulates the behavior based on the opted protocol in subclasses
|
||||
* lunMap for iSCSI and FC protocols
|
||||
* //TODO for NFS 3.0 and NFS 4.1 protocols (e.g., export rule management)
|
||||
* //TODO for Nvme/TCP and Nvme/FC protocols
|
||||
* @param values map including SVM name, LUN name, and igroup name (for SAN) or equivalent for NAS
|
||||
* @return map containing logical unit number for the new/existing mapping (SAN) or relevant info for NAS
|
||||
*/
|
||||
abstract public Map<String,String> enableLogicalAccess(Map<String,String> values);
|
||||
|
||||
/**
|
||||
* Method encapsulates the behavior based on the opted protocol in subclasses
|
||||
* lunUnmap for iSCSI and FC protocols
|
||||
* @param values map including LUN UUID and iGroup UUID (for SAN) or equivalent for NAS
|
||||
*/
|
||||
abstract public void disableLogicalAccess(Map<String, String> values);
|
||||
|
||||
/**
|
||||
* Method encapsulates the behavior based on the opted protocol in subclasses
|
||||
* lunMap lookup for iSCSI/FC protocols (GET-only, no side-effects)
|
||||
* @param values map with SVM name, LUN name, and igroup name (for SAN) or equivalent for NAS
|
||||
* @return map containing logical unit number if mapping exists; otherwise null
|
||||
*/
|
||||
abstract public Map<String, String> getLogicalAccess(Map<String, String> values);
|
||||
|
||||
private Boolean jobPollForSuccess(String jobUUID) {
|
||||
// ── FlexVolume Snapshot accessors ────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Returns the {@link SnapshotFeignClient} for ONTAP FlexVolume snapshot operations.
|
||||
*/
|
||||
public SnapshotFeignClient getSnapshotFeignClient() {
|
||||
return snapshotFeignClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link NASFeignClient} for ONTAP NAS file operations
|
||||
* (including file clone for single-file SnapRestore).
|
||||
*/
|
||||
public NASFeignClient getNasFeignClient() {
|
||||
return nasFeignClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates the Basic-auth header for ONTAP REST calls.
|
||||
*/
|
||||
public String getAuthHeader() {
|
||||
return OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
}
|
||||
|
||||
/**
|
||||
* Polls an ONTAP async job for successful completion.
|
||||
*
|
||||
* @param jobUUID UUID of the ONTAP job to poll
|
||||
* @param maxRetries maximum number of poll attempts
|
||||
* @param sleepTimeInMilliSecs seconds to sleep between poll attempts
|
||||
* @return true if the job completed successfully
|
||||
*/
|
||||
public Boolean jobPollForSuccess(String jobUUID, int maxRetries, int sleepTimeInMilliSecs) {
|
||||
//Create URI for GET Job API
|
||||
int jobRetryCount = 0;
|
||||
Job jobResp = null;
|
||||
try {
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
while (jobResp == null || !jobResp.getState().equals(OntapStorageConstants.JOB_SUCCESS)) {
|
||||
if (jobRetryCount >= OntapStorageConstants.JOB_MAX_RETRIES) {
|
||||
if (jobRetryCount >= maxRetries) {
|
||||
logger.error("Job did not complete within expected time.");
|
||||
throw new CloudRuntimeException("Job did not complete within expected time.");
|
||||
}
|
||||
|
|
@ -441,7 +669,7 @@ public abstract class StorageStrategy {
|
|||
}
|
||||
|
||||
jobRetryCount++;
|
||||
Thread.sleep(OntapStorageConstants.CREATE_VOLUME_CHECK_SLEEP_TIME);
|
||||
Thread.sleep(sleepTimeInMilliSecs);
|
||||
}
|
||||
if (jobResp == null || !jobResp.getState().equals(OntapStorageConstants.JOB_SUCCESS)) {
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -19,19 +19,22 @@
|
|||
|
||||
package org.apache.cloudstack.storage.service;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import feign.FeignException;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
|
||||
import org.apache.cloudstack.storage.command.CreateObjectCommand;
|
||||
import org.apache.cloudstack.storage.command.DeleteCommand;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.storage.feign.FeignClientFactory;
|
||||
import org.apache.cloudstack.storage.feign.client.JobFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.NASFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.model.ExportPolicy;
|
||||
import org.apache.cloudstack.storage.feign.model.ExportRule;
|
||||
import org.apache.cloudstack.storage.feign.model.FileInfo;
|
||||
import org.apache.cloudstack.storage.feign.model.Job;
|
||||
import org.apache.cloudstack.storage.feign.model.Nas;
|
||||
import org.apache.cloudstack.storage.feign.model.OntapStorage;
|
||||
|
|
@ -39,8 +42,10 @@ import org.apache.cloudstack.storage.feign.model.Svm;
|
|||
import org.apache.cloudstack.storage.feign.model.Volume;
|
||||
import org.apache.cloudstack.storage.feign.model.response.JobResponse;
|
||||
import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
|
||||
import org.apache.cloudstack.storage.feign.model.CliSnapshotRestoreRequest;
|
||||
import org.apache.cloudstack.storage.service.model.AccessGroup;
|
||||
import org.apache.cloudstack.storage.service.model.CloudStackVolume;
|
||||
import org.apache.cloudstack.storage.volume.VolumeObject;
|
||||
import org.apache.cloudstack.storage.utils.OntapStorageConstants;
|
||||
import org.apache.cloudstack.storage.utils.OntapStorageUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
|
|
@ -52,23 +57,13 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
public class UnifiedNASStrategy extends NASStrategy {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(UnifiedNASStrategy.class);
|
||||
private final FeignClientFactory feignClientFactory;
|
||||
private final NASFeignClient nasFeignClient;
|
||||
private final VolumeFeignClient volumeFeignClient;
|
||||
private final JobFeignClient jobFeignClient;
|
||||
@Inject private VolumeDao volumeDao;
|
||||
@Inject private EndPointSelector epSelector;
|
||||
@Inject private StoragePoolDetailsDao storagePoolDetailsDao;
|
||||
|
||||
public UnifiedNASStrategy(OntapStorage ontapStorage) {
|
||||
super(ontapStorage);
|
||||
String baseURL = OntapStorageConstants.HTTPS + ontapStorage.getManagementLIF();
|
||||
this.feignClientFactory = new FeignClientFactory();
|
||||
this.nasFeignClient = feignClientFactory.createClient(NASFeignClient.class, baseURL);
|
||||
this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class,baseURL );
|
||||
this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL );
|
||||
}
|
||||
|
||||
public void setOntapStorage(OntapStorage ontapStorage) {
|
||||
|
|
@ -77,7 +72,22 @@ public class UnifiedNASStrategy extends NASStrategy {
|
|||
|
||||
@Override
|
||||
public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume) {
|
||||
return null;
|
||||
logger.info("createCloudStackVolume: Create cloudstack volume " + cloudstackVolume);
|
||||
try {
|
||||
// Step 1: set cloudstack volume metadata
|
||||
updateCloudStackVolumeMetadata(cloudstackVolume.getDatastoreId(), cloudstackVolume.getVolumeInfo());
|
||||
// Step 2: Send command to KVM host to create qcow2 file using qemu-img
|
||||
Answer answer = createVolumeOnKVMHost(cloudstackVolume.getVolumeInfo());
|
||||
if (answer == null || !answer.getResult()) {
|
||||
String errMsg = answer != null ? answer.getDetails() : "Failed to create qcow2 on KVM host";
|
||||
logger.error("createCloudStackVolume: " + errMsg);
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
return cloudstackVolume;
|
||||
}catch (Exception e) {
|
||||
logger.error("createCloudStackVolume: error occured " + e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -87,6 +97,19 @@ public class UnifiedNASStrategy extends NASStrategy {
|
|||
|
||||
@Override
|
||||
public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) {
|
||||
logger.info("deleteCloudStackVolume: Delete cloudstack volume " + cloudstackVolume);
|
||||
try {
|
||||
// Step 1: Send command to KVM host to delete qcow2 file using qemu-img
|
||||
Answer answer = deleteVolumeOnKVMHost(cloudstackVolume.getVolumeInfo());
|
||||
if (answer == null || !answer.getResult()) {
|
||||
String errMsg = answer != null ? answer.getDetails() : "Failed to delete qcow2 on KVM host";
|
||||
logger.error("deleteCloudStackVolume: " + errMsg);
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("deleteCloudStackVolume: error occured " + e);
|
||||
throw new CloudRuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -96,24 +119,40 @@ public class UnifiedNASStrategy extends NASStrategy {
|
|||
|
||||
@Override
|
||||
public CloudStackVolume getCloudStackVolume(Map<String, String> cloudStackVolumeMap) {
|
||||
return null;
|
||||
logger.info("getCloudStackVolume: Get cloudstack volume " + cloudStackVolumeMap);
|
||||
CloudStackVolume cloudStackVolume = null;
|
||||
FileInfo fileInfo = getFile(cloudStackVolumeMap.get(OntapStorageConstants.VOLUME_UUID),cloudStackVolumeMap.get(OntapStorageConstants.FILE_PATH));
|
||||
|
||||
if (fileInfo != null){
|
||||
cloudStackVolume = new CloudStackVolume();
|
||||
cloudStackVolume.setFlexVolumeUuid(cloudStackVolumeMap.get(OntapStorageConstants.VOLUME_UUID));
|
||||
cloudStackVolume.setFile(fileInfo);
|
||||
} else {
|
||||
logger.warn("getCloudStackVolume: File not found for volume UUID: {} and file path: {}", cloudStackVolumeMap.get(OntapStorageConstants.VOLUME_UUID), cloudStackVolumeMap.get(OntapStorageConstants.FILE_PATH));
|
||||
}
|
||||
|
||||
return cloudStackVolume;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AccessGroup createAccessGroup(AccessGroup accessGroup) {
|
||||
logger.info("createAccessGroup: Create access group {}: " , accessGroup);
|
||||
Map<String, String> details = accessGroup.getPrimaryDataStoreInfo().getDetails();
|
||||
|
||||
Map<String, String> details = storagePoolDetailsDao.listDetailsKeyPairs(accessGroup.getStoragePoolId());
|
||||
String svmName = details.get(OntapStorageConstants.SVM_NAME);
|
||||
String volumeUUID = details.get(OntapStorageConstants.VOLUME_UUID);
|
||||
String volumeName = details.get(OntapStorageConstants.VOLUME_NAME);
|
||||
|
||||
// Create the export policy
|
||||
ExportPolicy policyRequest = createExportPolicyRequest(accessGroup,svmName,volumeName);
|
||||
try {
|
||||
ExportPolicy createdPolicy = createExportPolicy(svmName, policyRequest);
|
||||
logger.info("ExportPolicy created: {}, now attaching this policy to storage pool volume", createdPolicy.getName());
|
||||
logger.info("createAccessGroup: ExportPolicy created: {}, now attaching this policy to storage pool volume", createdPolicy.getName());
|
||||
// attach export policy to volume of storage pool
|
||||
assignExportPolicyToVolume(volumeUUID,createdPolicy.getName());
|
||||
storagePoolDetailsDao.addDetail(accessGroup.getPrimaryDataStoreInfo().getId(), OntapStorageConstants.EXPORT_POLICY_ID, String.valueOf(createdPolicy.getId()), true);
|
||||
storagePoolDetailsDao.addDetail(accessGroup.getPrimaryDataStoreInfo().getId(), OntapStorageConstants.EXPORT_POLICY_NAME, createdPolicy.getName(), true);
|
||||
// save the export policy details in storage pool details
|
||||
storagePoolDetailsDao.addDetail(accessGroup.getStoragePoolId(), OntapStorageConstants.EXPORT_POLICY_ID, String.valueOf(createdPolicy.getId()), true);
|
||||
storagePoolDetailsDao.addDetail(accessGroup.getStoragePoolId(), OntapStorageConstants.EXPORT_POLICY_NAME, createdPolicy.getName(), true);
|
||||
logger.info("Successfully assigned exportPolicy {} to volume {}", policyRequest.getName(), volumeName);
|
||||
accessGroup.setPolicy(policyRequest);
|
||||
return accessGroup;
|
||||
|
|
@ -128,23 +167,15 @@ public class UnifiedNASStrategy extends NASStrategy {
|
|||
logger.info("deleteAccessGroup: Deleting export policy");
|
||||
|
||||
if (accessGroup == null) {
|
||||
throw new CloudRuntimeException("deleteAccessGroup: Invalid accessGroup object - accessGroup is null");
|
||||
throw new CloudRuntimeException("Invalid accessGroup object - accessGroup is null");
|
||||
}
|
||||
|
||||
PrimaryDataStoreInfo primaryDataStoreInfo = accessGroup.getPrimaryDataStoreInfo();
|
||||
if (primaryDataStoreInfo == null) {
|
||||
throw new CloudRuntimeException("deleteAccessGroup: PrimaryDataStoreInfo is null in accessGroup");
|
||||
}
|
||||
logger.info("deleteAccessGroup: Deleting export policy for the storage pool {}", primaryDataStoreInfo.getName());
|
||||
try {
|
||||
Map<String, String> details = storagePoolDetailsDao.listDetailsKeyPairs(accessGroup.getStoragePoolId());
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
String svmName = storage.getSvmName();
|
||||
String exportPolicyName = primaryDataStoreInfo.getDetails().get(OntapStorageConstants.EXPORT_POLICY_NAME);
|
||||
String exportPolicyId = primaryDataStoreInfo.getDetails().get(OntapStorageConstants.EXPORT_POLICY_ID);
|
||||
if (exportPolicyId == null || exportPolicyId.isEmpty()) {
|
||||
logger.warn("deleteAccessGroup: Export policy ID not found in storage pool details for storage pool {}. Cannot delete export policy.", primaryDataStoreInfo.getName());
|
||||
throw new CloudRuntimeException("Export policy ID not found for storage pool: " + primaryDataStoreInfo.getName());
|
||||
}
|
||||
// Determine export policy attached to the storage pool
|
||||
String exportPolicyName = details.get(OntapStorageConstants.EXPORT_POLICY_NAME);
|
||||
String exportPolicyId = details.get(OntapStorageConstants.EXPORT_POLICY_ID);
|
||||
|
||||
try {
|
||||
nasFeignClient.deleteExportPolicyById(authHeader,exportPolicyId);
|
||||
|
|
@ -152,6 +183,7 @@ public class UnifiedNASStrategy extends NASStrategy {
|
|||
} catch (Exception e) {
|
||||
logger.error("deleteAccessGroup: Failed to delete export policy. Exception: {}", e.getMessage(), e);
|
||||
throw new CloudRuntimeException("Failed to delete export policy: " + e.getMessage(), e);
|
||||
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("deleteAccessGroup: Failed to delete export policy. Exception: {}", e.getMessage(), e);
|
||||
|
|
@ -180,11 +212,11 @@ public class UnifiedNASStrategy extends NASStrategy {
|
|||
|
||||
@Override
|
||||
public Map<String, String> getLogicalAccess(Map<String, String> values) {
|
||||
return null;
|
||||
return Map.of();
|
||||
}
|
||||
|
||||
private ExportPolicy createExportPolicy(String svmName, ExportPolicy policy) {
|
||||
logger.info("Creating export policy: {} for SVM: {}", policy, svmName);
|
||||
logger.info("createExportPolicy: Creating export policy: {} for SVM: {}", policy, svmName);
|
||||
|
||||
try {
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
|
|
@ -197,18 +229,18 @@ public class UnifiedNASStrategy extends NASStrategy {
|
|||
throw new CloudRuntimeException("Export policy " + policy.getName() + " was not created on ONTAP. " +
|
||||
"Received successful response but policy does not exist.");
|
||||
}
|
||||
logger.info("Export policy created and verified successfully: " + policy.getName());
|
||||
logger.info("createExportPolicy: Export policy created and verified successfully: " + policy.getName());
|
||||
} catch (FeignException e) {
|
||||
logger.error("Failed to verify export policy creation: " + policy.getName(), e);
|
||||
logger.error("createExportPolicy: Failed to verify export policy creation: " + policy.getName(), e);
|
||||
throw new CloudRuntimeException("Export policy creation verification failed: " + e.getMessage());
|
||||
}
|
||||
logger.info("Export policy created successfully with name {}", policy.getName());
|
||||
logger.info("createExportPolicy: Export policy created successfully with name {}", policy.getName());
|
||||
return policiesResponse.getRecords().get(0);
|
||||
} catch (FeignException e) {
|
||||
logger.error("Failed to create export policy: {}", policy, e);
|
||||
logger.error("createExportPolicy: Failed to create export policy: {}", policy, e);
|
||||
throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage());
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception while creating export policy: {}", policy, e);
|
||||
logger.error("createExportPolicy: Exception while creating export policy: {}", policy, e);
|
||||
throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
|
@ -231,6 +263,7 @@ public class UnifiedNASStrategy extends NASStrategy {
|
|||
throw new CloudRuntimeException("Failed to attach policy " + policyName + "to volume " + volumeUuid);
|
||||
}
|
||||
String jobUUID = jobResponse.getJob().getUuid();
|
||||
//Create URI for GET Job API
|
||||
int jobRetryCount = 0;
|
||||
Job createVolumeJob = null;
|
||||
while(createVolumeJob == null || !createVolumeJob.getState().equals(OntapStorageConstants.JOB_SUCCESS)) {
|
||||
|
|
@ -252,19 +285,72 @@ public class UnifiedNASStrategy extends NASStrategy {
|
|||
Thread.sleep(OntapStorageConstants.CREATE_VOLUME_CHECK_SLEEP_TIME);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception while updating volume: ", e);
|
||||
logger.error("assignExportPolicyToVolume: Exception while updating volume: ", e);
|
||||
throw new CloudRuntimeException("Failed to update volume: " + e.getMessage());
|
||||
}
|
||||
logger.info("Export policy successfully assigned to volume: {}", volumeUuid);
|
||||
logger.info("assignExportPolicyToVolume: Export policy successfully assigned to volume: {}", volumeUuid);
|
||||
} catch (FeignException e) {
|
||||
logger.error("Failed to assign export policy to volume: {}", volumeUuid, e);
|
||||
logger.error("assignExportPolicyToVolume: Failed to assign export policy to volume: {}", volumeUuid, e);
|
||||
throw new CloudRuntimeException("Failed to assign export policy: " + e.getMessage());
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception while assigning export policy to volume: {}", volumeUuid, e);
|
||||
logger.error("assignExportPolicyToVolume: Exception while assigning export policy to volume: {}", volumeUuid, e);
|
||||
throw new CloudRuntimeException("Failed to assign export policy: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private boolean deleteFile(String volumeUuid, String filePath) {
|
||||
logger.info("deleteFile: Deleting file: {} from volume: {}", filePath, volumeUuid);
|
||||
try {
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
nasFeignClient.deleteFile(authHeader, volumeUuid, filePath);
|
||||
logger.info("deleteFile: File deleted successfully: {} from volume: {}", filePath, volumeUuid);
|
||||
return true;
|
||||
} catch (FeignException e) {
|
||||
logger.error("deleteFile: Failed to delete file: {} from volume: {}", filePath, volumeUuid, e);
|
||||
return false;
|
||||
} catch (Exception e) {
|
||||
logger.error("deleteFile: Exception while deleting file: {} from volume: {}", filePath, volumeUuid, e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private OntapResponse<FileInfo> getFileInfo(String volumeUuid, String filePath) {
|
||||
logger.debug("getFileInfo: Getting file info for: {} in volume: {}", filePath, volumeUuid);
|
||||
try {
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
OntapResponse<FileInfo> response = nasFeignClient.getFileResponse(authHeader, volumeUuid, filePath);
|
||||
logger.debug("getFileInfo: Retrieved file info for: {} in volume: {}", filePath, volumeUuid);
|
||||
return response;
|
||||
} catch (FeignException e){
|
||||
if (e.status() == 404) {
|
||||
logger.debug("getFileInfo: File not found: {} in volume: {}", filePath, volumeUuid);
|
||||
return null;
|
||||
}
|
||||
logger.error("getFileInfo: Failed to get file info: {} in volume: {}", filePath, volumeUuid, e);
|
||||
throw new CloudRuntimeException("Failed to get file info: " + e.getMessage());
|
||||
} catch (Exception e){
|
||||
logger.error("getFileInfo: Exception while getting file info: {} in volume: {}", filePath, volumeUuid, e);
|
||||
throw new CloudRuntimeException("Failed to get file info: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private boolean updateFile(String volumeUuid, String filePath, FileInfo fileInfo) {
|
||||
logger.info("updateFile: Updating file: {} in volume: {}", filePath, volumeUuid);
|
||||
try {
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
nasFeignClient.updateFile( authHeader, volumeUuid, filePath, fileInfo);
|
||||
logger.info("updateFile: File updated successfully: {} in volume: {}", filePath, volumeUuid);
|
||||
return true;
|
||||
} catch (FeignException e) {
|
||||
logger.error("updateFile: Failed to update file: {} in volume: {}", filePath, volumeUuid, e);
|
||||
return false;
|
||||
} catch (Exception e){
|
||||
logger.error("updateFile: Exception while updating file: {} in volume: {}", filePath, volumeUuid, e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String svmName , String volumeName){
|
||||
|
||||
String exportPolicyName = OntapStorageUtils.generateExportPolicyName(svmName,volumeName);
|
||||
|
|
@ -280,13 +366,13 @@ public class UnifiedNASStrategy extends NASStrategy {
|
|||
String ip = (hostStorageIp != null && !hostStorageIp.isEmpty())
|
||||
? hostStorageIp
|
||||
: host.getPrivateIpAddress();
|
||||
String ipToUse = ip + "/31";
|
||||
String ipToUse = ip + "/32";
|
||||
ExportRule.ExportClient exportClient = new ExportRule.ExportClient();
|
||||
exportClient.setMatch(ipToUse);
|
||||
exportClients.add(exportClient);
|
||||
}
|
||||
exportRule.setClients(exportClients);
|
||||
exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.ANY));
|
||||
exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.NFS3));
|
||||
exportRule.setRoRule(List.of("sys"));
|
||||
exportRule.setRwRule(List.of("sys"));
|
||||
exportRule.setSuperuser(List.of("sys"));
|
||||
|
|
@ -300,4 +386,153 @@ public class UnifiedNASStrategy extends NASStrategy {
|
|||
|
||||
return exportPolicy;
|
||||
}
|
||||
|
||||
private String updateCloudStackVolumeMetadata(String dataStoreId, DataObject volumeInfo) {
|
||||
logger.info("updateCloudStackVolumeMetadata called with datastoreID: {} volumeInfo: {} ", dataStoreId, volumeInfo );
|
||||
try {
|
||||
VolumeObject volumeObject = (VolumeObject) volumeInfo;
|
||||
long volumeId = volumeObject.getId();
|
||||
logger.info("updateCloudStackVolumeMetadata: VolumeInfo ID from VolumeObject: {}", volumeId);
|
||||
VolumeVO volume = volumeDao.findById(volumeId);
|
||||
if (volume == null) {
|
||||
throw new CloudRuntimeException("Volume not found with id: " + volumeId);
|
||||
}
|
||||
String volumeUuid = volumeInfo.getUuid();
|
||||
volume.setPoolType(Storage.StoragePoolType.NetworkFilesystem);
|
||||
volume.setPoolId(Long.parseLong(dataStoreId));
|
||||
volume.setPath(volumeUuid); // Filename for qcow2 file
|
||||
volumeDao.update(volume.getId(), volume);
|
||||
logger.info("Updated volume path to {} for volume ID {}", volumeUuid, volumeId);
|
||||
return volumeUuid;
|
||||
} catch (Exception e){
|
||||
logger.error("updateCloudStackVolumeMetadata: Exception while updating volumeInfo: {} in volume: {}", dataStoreId, volumeInfo.getUuid(), e);
|
||||
throw new CloudRuntimeException("Exception while updating volumeInfo: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private Answer createVolumeOnKVMHost(DataObject volumeInfo) {
|
||||
logger.info("createVolumeOnKVMHost called with volumeInfo: {} ", volumeInfo);
|
||||
|
||||
try {
|
||||
logger.info("createVolumeOnKVMHost: Sending CreateObjectCommand to KVM agent for volume: {}", volumeInfo.getUuid());
|
||||
CreateObjectCommand cmd = new CreateObjectCommand(volumeInfo.getTO());
|
||||
EndPoint ep = epSelector.select(volumeInfo);
|
||||
if (ep == null) {
|
||||
String errMsg = "No remote endpoint to send CreateObjectCommand, check if host is up";
|
||||
logger.error(errMsg);
|
||||
return new Answer(cmd, false, errMsg);
|
||||
}
|
||||
logger.info("createVolumeOnKVMHost: Sending command to endpoint: {}", ep.getHostAddr());
|
||||
Answer answer = ep.sendMessage(cmd);
|
||||
if (answer != null && answer.getResult()) {
|
||||
logger.info("createVolumeOnKVMHost: Successfully created qcow2 file on KVM host");
|
||||
} else {
|
||||
logger.error("createVolumeOnKVMHost: Failed to create qcow2 file: {}",
|
||||
answer != null ? answer.getDetails() : "null answer");
|
||||
}
|
||||
return answer;
|
||||
} catch (Exception e) {
|
||||
logger.error("createVolumeOnKVMHost: Exception sending CreateObjectCommand", e);
|
||||
return new Answer(null, false, e.toString());
|
||||
}
|
||||
}
|
||||
|
||||
private Answer deleteVolumeOnKVMHost(DataObject volumeInfo) {
|
||||
logger.info("deleteVolumeOnKVMHost called with volumeInfo: {} ", volumeInfo);
|
||||
|
||||
try {
|
||||
logger.info("deleteVolumeOnKVMHost: Sending DeleteCommand to KVM agent for volume: {}", volumeInfo.getUuid());
|
||||
DeleteCommand cmd = new DeleteCommand(volumeInfo.getTO());
|
||||
EndPoint ep = epSelector.select(volumeInfo);
|
||||
if (ep == null) {
|
||||
String errMsg = "No remote endpoint to send DeleteCommand, check if host is up";
|
||||
logger.error(errMsg);
|
||||
return new Answer(cmd, false, errMsg);
|
||||
}
|
||||
logger.info("deleteVolumeOnKVMHost: Sending command to endpoint: {}", ep.getHostAddr());
|
||||
Answer answer = ep.sendMessage(cmd);
|
||||
if (answer != null && answer.getResult()) {
|
||||
logger.info("deleteVolumeOnKVMHost: Successfully deleted qcow2 file on KVM host");
|
||||
} else {
|
||||
logger.error("deleteVolumeOnKVMHost: Failed to delete qcow2 file: {}",
|
||||
answer != null ? answer.getDetails() : "null answer");
|
||||
}
|
||||
return answer;
|
||||
} catch (Exception e) {
|
||||
logger.error("deleteVolumeOnKVMHost: Exception sending DeleteCommand", e);
|
||||
return new Answer(null, false, e.toString());
|
||||
}
|
||||
}
|
||||
|
||||
private FileInfo getFile(String volumeUuid, String filePath) {
|
||||
logger.info("Get File: {} for volume: {}", filePath, volumeUuid);
|
||||
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
OntapResponse<FileInfo> fileResponse = null;
|
||||
try {
|
||||
fileResponse = nasFeignClient.getFileResponse(authHeader, volumeUuid, filePath);
|
||||
if (fileResponse == null || fileResponse.getRecords().isEmpty()) {
|
||||
throw new CloudRuntimeException("File " + filePath + " not found on ONTAP. " +
|
||||
"Received successful response but file does not exist.");
|
||||
}
|
||||
} catch (FeignException e) {
|
||||
logger.error("getFile: Failed to get file response: " + filePath, e);
|
||||
throw new CloudRuntimeException("File not found: " + e.getMessage());
|
||||
} catch (Exception e) {
|
||||
logger.error("getFile: Exception to get file: {}", filePath, e);
|
||||
throw new CloudRuntimeException("Failed to get the file: " + e.getMessage());
|
||||
}
|
||||
logger.info("getFile: File retrieved successfully with name {}", filePath);
|
||||
return fileResponse.getRecords().get(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reverts a file to a snapshot using the ONTAP CLI-based snapshot file restore API.
|
||||
*
|
||||
* <p>ONTAP REST API (CLI passthrough):
|
||||
* {@code POST /api/private/cli/volume/snapshot/restore-file}</p>
|
||||
*
|
||||
* <p>This method uses the CLI native API which is more reliable and works
|
||||
* consistently for both NFS files and iSCSI LUNs.</p>
|
||||
*
|
||||
* @param snapshotName The ONTAP FlexVolume snapshot name
|
||||
* @param flexVolUuid The FlexVolume UUID (not used in CLI API, kept for interface consistency)
|
||||
* @param snapshotUuid The ONTAP snapshot UUID (not used in CLI API, kept for interface consistency)
|
||||
* @param volumePath The file path within the FlexVolume
|
||||
* @param lunUuid Not used for NFS (null)
|
||||
* @param flexVolName The FlexVolume name (required for CLI API)
|
||||
* @return JobResponse for the async restore operation
|
||||
*/
|
||||
@Override
|
||||
public JobResponse revertSnapshotForCloudStackVolume(String snapshotName, String flexVolUuid,
|
||||
String snapshotUuid, String volumePath,
|
||||
String lunUuid, String flexVolName) {
|
||||
logger.info("revertSnapshotForCloudStackVolume [NFS]: Restoring file [{}] from snapshot [{}] on FlexVol [{}]",
|
||||
volumePath, snapshotName, flexVolName);
|
||||
|
||||
if (snapshotName == null || snapshotName.isEmpty()) {
|
||||
throw new CloudRuntimeException("Snapshot name is required for NFS snapshot revert");
|
||||
}
|
||||
if (volumePath == null || volumePath.isEmpty()) {
|
||||
throw new CloudRuntimeException("File path is required for NFS snapshot revert");
|
||||
}
|
||||
if (flexVolName == null || flexVolName.isEmpty()) {
|
||||
throw new CloudRuntimeException("FlexVolume name is required for NFS snapshot revert");
|
||||
}
|
||||
|
||||
String authHeader = getAuthHeader();
|
||||
String svmName = storage.getSvmName();
|
||||
|
||||
// Prepare the file path for ONTAP CLI API (ensure it starts with "/")
|
||||
String ontapFilePath = volumePath.startsWith("/") ? volumePath : "/" + volumePath;
|
||||
|
||||
// Create CLI snapshot restore request
|
||||
CliSnapshotRestoreRequest restoreRequest = new CliSnapshotRestoreRequest(
|
||||
svmName, flexVolName, snapshotName, ontapFilePath);
|
||||
|
||||
logger.info("revertSnapshotForCloudStackVolume: Calling CLI file restore API with vserver={}, volume={}, snapshot={}, path={}",
|
||||
svmName, flexVolName, snapshotName, ontapFilePath);
|
||||
|
||||
return getSnapshotFeignClient().restoreFileFromSnapshotCli(authHeader, restoreRequest);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,24 +20,27 @@
|
|||
package org.apache.cloudstack.storage.service;
|
||||
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
|
||||
import org.apache.cloudstack.storage.feign.FeignClientFactory;
|
||||
import org.apache.cloudstack.storage.feign.client.SANFeignClient;
|
||||
import feign.FeignException;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.storage.feign.model.Igroup;
|
||||
import org.apache.cloudstack.storage.feign.model.Initiator;
|
||||
import org.apache.cloudstack.storage.feign.model.Svm;
|
||||
import org.apache.cloudstack.storage.feign.model.OntapStorage;
|
||||
import org.apache.cloudstack.storage.feign.model.Lun;
|
||||
import org.apache.cloudstack.storage.feign.model.LunMap;
|
||||
import org.apache.cloudstack.storage.feign.model.CliSnapshotRestoreRequest;
|
||||
import org.apache.cloudstack.storage.feign.model.response.JobResponse;
|
||||
import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
|
||||
import org.apache.cloudstack.storage.service.model.AccessGroup;
|
||||
import org.apache.cloudstack.storage.service.model.CloudStackVolume;
|
||||
import org.apache.cloudstack.storage.service.model.ProtocolType;
|
||||
import org.apache.cloudstack.storage.utils.OntapStorageConstants;
|
||||
import org.apache.cloudstack.storage.utils.OntapStorageUtils;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
|
@ -45,14 +48,12 @@ import java.util.Map;
|
|||
public class UnifiedSANStrategy extends SANStrategy {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(UnifiedSANStrategy.class);
|
||||
private final FeignClientFactory feignClientFactory;
|
||||
private final SANFeignClient sanFeignClient;
|
||||
@Inject
|
||||
private StoragePoolDetailsDao storagePoolDetailsDao;
|
||||
|
||||
public UnifiedSANStrategy(OntapStorage ontapStorage) {
|
||||
super(ontapStorage);
|
||||
String baseURL = OntapStorageConstants.HTTPS + ontapStorage.getManagementLIF();
|
||||
this.feignClientFactory = new FeignClientFactory();
|
||||
this.sanFeignClient = feignClientFactory.createClient(SANFeignClient.class, baseURL);
|
||||
String baseURL = OntapStorageConstants.HTTPS + ontapStorage.getStorageIP();
|
||||
}
|
||||
|
||||
public void setOntapStorage(OntapStorage ontapStorage) {
|
||||
|
|
@ -61,7 +62,36 @@ public class UnifiedSANStrategy extends SANStrategy {
|
|||
|
||||
@Override
|
||||
public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume) {
|
||||
return null;
|
||||
logger.info("createCloudStackVolume : Creating Lun with cloudstackVolume request {} ", cloudstackVolume);
|
||||
if (cloudstackVolume == null || cloudstackVolume.getLun() == null) {
|
||||
logger.error("createCloudStackVolume: LUN creation failed. Invalid request: {}", cloudstackVolume);
|
||||
throw new CloudRuntimeException(" Failed to create Lun, invalid request");
|
||||
}
|
||||
try {
|
||||
// Get AuthHeader
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
// Create URI for lun creation
|
||||
//TODO: It is possible that Lun creation will take time and we may need to handle through async job.
|
||||
OntapResponse<Lun> createdLun = sanFeignClient.createLun(authHeader, true, cloudstackVolume.getLun());
|
||||
if (createdLun == null || createdLun.getRecords() == null || createdLun.getRecords().size() == 0) {
|
||||
logger.error("createCloudStackVolume: LUN creation failed for Lun {}", cloudstackVolume.getLun().getName());
|
||||
throw new CloudRuntimeException("Failed to create Lun: " + cloudstackVolume.getLun().getName());
|
||||
}
|
||||
Lun lun = createdLun.getRecords().get(0);
|
||||
logger.debug("createCloudStackVolume: LUN created successfully. Lun: {}", lun);
|
||||
logger.info("createCloudStackVolume: LUN created successfully. LunName: {}", lun.getName());
|
||||
|
||||
CloudStackVolume createdCloudStackVolume = new CloudStackVolume();
|
||||
createdCloudStackVolume.setLun(lun);
|
||||
return createdCloudStackVolume;
|
||||
} catch (FeignException e) {
|
||||
logger.error("FeignException occurred while creating LUN: {}, Status: {}, Exception: {}",
|
||||
cloudstackVolume.getLun().getName(), e.status(), e.getMessage());
|
||||
throw new CloudRuntimeException("Failed to create Lun: " + e.getMessage());
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception occurred while creating LUN: {}, Exception: {}", cloudstackVolume.getLun().getName(), e.getMessage());
|
||||
throw new CloudRuntimeException("Failed to create Lun: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -70,47 +100,104 @@ public class UnifiedSANStrategy extends SANStrategy {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) {}
|
||||
public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) {
|
||||
if (cloudstackVolume == null || cloudstackVolume.getLun() == null) {
|
||||
logger.error("deleteCloudStackVolume: Lun deletion failed. Invalid request: {}", cloudstackVolume);
|
||||
throw new CloudRuntimeException(" Failed to delete Lun, invalid request");
|
||||
}
|
||||
logger.info("deleteCloudStackVolume : Deleting Lun: {}", cloudstackVolume.getLun().getName());
|
||||
try {
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
Map<String, Object> queryParams = Map.of("allow_delete_while_mapped", "true");
|
||||
try {
|
||||
sanFeignClient.deleteLun(authHeader, cloudstackVolume.getLun().getUuid(), queryParams);
|
||||
} catch (FeignException feignEx) {
|
||||
if (feignEx.status() == 404) {
|
||||
logger.warn("deleteCloudStackVolume: Lun {} does not exist (status 404), skipping deletion", cloudstackVolume.getLun().getName());
|
||||
return;
|
||||
}
|
||||
throw feignEx;
|
||||
}
|
||||
logger.info("deleteCloudStackVolume: Lun deleted successfully. LunName: {}", cloudstackVolume.getLun().getName());
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception occurred while deleting Lun: {}, Exception: {}", cloudstackVolume.getLun().getName(), e.getMessage());
|
||||
throw new CloudRuntimeException("Failed to delete Lun: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copyCloudStackVolume(CloudStackVolume cloudstackVolume) {}
|
||||
|
||||
@Override
|
||||
public CloudStackVolume getCloudStackVolume(Map<String, String> values) {
|
||||
return null;
|
||||
logger.info("getCloudStackVolume : fetching Lun");
|
||||
logger.debug("getCloudStackVolume : fetching Lun with params {} ", values);
|
||||
if (values == null || values.isEmpty()) {
|
||||
logger.error("getCloudStackVolume: get Lun failed. Invalid request: {}", values);
|
||||
throw new CloudRuntimeException(" get Lun Failed, invalid request");
|
||||
}
|
||||
String svmName = values.get(OntapStorageConstants.SVM_DOT_NAME);
|
||||
String lunName = values.get(OntapStorageConstants.NAME);
|
||||
if (svmName == null || lunName == null || svmName.isEmpty() || lunName.isEmpty()) {
|
||||
logger.error("getCloudStackVolume: get Lun failed. Invalid svm:{} or Lun name: {}", svmName, lunName);
|
||||
throw new CloudRuntimeException("Failed to get Lun, invalid request");
|
||||
}
|
||||
try {
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
Map<String, Object> queryParams = Map.of(OntapStorageConstants.SVM_DOT_NAME, svmName, OntapStorageConstants.NAME, lunName);
|
||||
OntapResponse<Lun> lunResponse = sanFeignClient.getLunResponse(authHeader, queryParams);
|
||||
if (lunResponse == null || lunResponse.getRecords() == null || lunResponse.getRecords().isEmpty()) {
|
||||
logger.warn("getCloudStackVolume: Lun '{}' on SVM '{}' not found. Returning null.", lunName, svmName);
|
||||
return null;
|
||||
}
|
||||
Lun lun = lunResponse.getRecords().get(0);
|
||||
logger.debug("getCloudStackVolume: Lun Details : {}", lun);
|
||||
logger.info("getCloudStackVolume: Fetched the Lun successfully. LunName: {}", lun.getName());
|
||||
|
||||
CloudStackVolume cloudStackVolume = new CloudStackVolume();
|
||||
cloudStackVolume.setLun(lun);
|
||||
return cloudStackVolume;
|
||||
} catch (FeignException e) {
|
||||
if (e.status() == 404) {
|
||||
logger.warn("getCloudStackVolume: Lun '{}' on SVM '{}' not found (status 404). Returning null.", lunName, svmName);
|
||||
return null;
|
||||
}
|
||||
logger.error("FeignException occurred while fetching Lun, Status: {}, Exception: {}", e.status(), e.getMessage());
|
||||
throw new CloudRuntimeException("Failed to fetch Lun details: " + e.getMessage());
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception occurred while fetching Lun, Exception: {}", e.getMessage());
|
||||
throw new CloudRuntimeException("Failed to fetch Lun details: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public AccessGroup createAccessGroup(AccessGroup accessGroup) {
|
||||
logger.info("createAccessGroup : Create Igroup");
|
||||
String igroupName = "unknown";
|
||||
logger.debug("createAccessGroup : Creating Igroup with access group request {} ", accessGroup);
|
||||
if (accessGroup == null) {
|
||||
logger.error("createAccessGroup: Igroup creation failed. Invalid request: {}", accessGroup);
|
||||
throw new CloudRuntimeException("createAccessGroup : Failed to create Igroup, invalid request");
|
||||
throw new CloudRuntimeException("Failed to create Igroup, invalid request");
|
||||
}
|
||||
// Get StoragePool details
|
||||
if (accessGroup.getStoragePoolId() == null) {
|
||||
throw new CloudRuntimeException("Failed to create Igroup, invalid datastore details in the request");
|
||||
}
|
||||
if (accessGroup.getHostsToConnect() == null || accessGroup.getHostsToConnect().isEmpty()) {
|
||||
throw new CloudRuntimeException("Failed to create Igroup, no hosts to connect provided in the request");
|
||||
}
|
||||
|
||||
String igroupName = null;
|
||||
try {
|
||||
if (accessGroup.getPrimaryDataStoreInfo() == null || accessGroup.getPrimaryDataStoreInfo().getDetails() == null
|
||||
|| accessGroup.getPrimaryDataStoreInfo().getDetails().isEmpty()) {
|
||||
throw new CloudRuntimeException("createAccessGroup : Failed to create Igroup, invalid datastore details in the request");
|
||||
}
|
||||
Map<String, String> dataStoreDetails = accessGroup.getPrimaryDataStoreInfo().getDetails();
|
||||
Map<String, String> dataStoreDetails = storagePoolDetailsDao.listDetailsKeyPairs(accessGroup.getStoragePoolId());
|
||||
logger.debug("createAccessGroup: Successfully fetched datastore details.");
|
||||
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
|
||||
// Generate Igroup request
|
||||
Igroup igroupRequest = new Igroup();
|
||||
List<String> hostsIdentifier = new ArrayList<>();
|
||||
String svmName = dataStoreDetails.get(OntapStorageConstants.SVM_NAME);
|
||||
igroupName = OntapStorageUtils.getIgroupName(svmName, accessGroup.getScope().getScopeType(), accessGroup.getScope().getScopeId());
|
||||
Hypervisor.HypervisorType hypervisorType = accessGroup.getPrimaryDataStoreInfo().getHypervisor();
|
||||
|
||||
ProtocolType protocol = ProtocolType.valueOf(dataStoreDetails.get(OntapStorageConstants.PROTOCOL));
|
||||
if (accessGroup.getHostsToConnect() == null || accessGroup.getHostsToConnect().isEmpty()) {
|
||||
throw new CloudRuntimeException("createAccessGroup : Failed to create Igroup, no hosts to connect provided in the request");
|
||||
}
|
||||
if (!validateProtocolSupportAndFetchHostsIdentifier(accessGroup.getHostsToConnect(), protocol, hostsIdentifier)) {
|
||||
String errMsg = "createAccessGroup: Not all hosts in the " + accessGroup.getScope().getScopeType().toString() + " support the protocol: " + protocol.name();
|
||||
|
||||
// Check if all hosts support the protocol
|
||||
if (!validateProtocolSupport(accessGroup.getHostsToConnect(), protocol)) {
|
||||
String errMsg = "Not all hosts " + " support the protocol: " + protocol.name();
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
|
||||
|
|
@ -119,41 +206,42 @@ public class UnifiedSANStrategy extends SANStrategy {
|
|||
svm.setName(svmName);
|
||||
igroupRequest.setSvm(svm);
|
||||
}
|
||||
|
||||
if (igroupName != null && !igroupName.isEmpty()) {
|
||||
igroupRequest.setName(igroupName);
|
||||
}
|
||||
|
||||
// TODO: Defaulting to LINUX for zone scope for now, this has to be revisited when we support other hypervisors
|
||||
igroupRequest.setOsType(Igroup.OsTypeEnum.Linux);
|
||||
|
||||
if (hostsIdentifier != null && hostsIdentifier.size() > 0) {
|
||||
for (HostVO host : accessGroup.getHostsToConnect()) {
|
||||
igroupName = OntapStorageUtils.getIgroupName(svmName, host.getName());
|
||||
igroupRequest.setName(igroupName);
|
||||
|
||||
List<Initiator> initiators = new ArrayList<>();
|
||||
for (String hostIdentifier : hostsIdentifier) {
|
||||
Initiator initiator = new Initiator();
|
||||
initiator.setName(hostIdentifier);
|
||||
initiators.add(initiator);
|
||||
}
|
||||
Initiator initiator = new Initiator();
|
||||
initiator.setName(host.getStorageUrl());// CloudStack has one iqn for one host
|
||||
initiators.add(initiator);
|
||||
igroupRequest.setInitiators(initiators);
|
||||
igroupRequest.setDeleteOnUnmap(true);
|
||||
}
|
||||
igroupRequest.setProtocol(Igroup.ProtocolEnum.valueOf("iscsi"));
|
||||
igroupRequest.setProtocol(Igroup.ProtocolEnum.valueOf(OntapStorageConstants.ISCSI));
|
||||
// Create Igroup
|
||||
logger.debug("createAccessGroup: About to call sanFeignClient.createIgroup with igroupName: {}", igroupName);
|
||||
AccessGroup createdAccessGroup = new AccessGroup();
|
||||
OntapResponse<Igroup> createdIgroup = null;
|
||||
try {
|
||||
// Get AuthHeader
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
createdIgroup = sanFeignClient.createIgroup(authHeader, true, igroupRequest);
|
||||
} catch (Exception feignEx) {
|
||||
String errMsg = feignEx.getMessage();
|
||||
if (errMsg != null && errMsg.contains(("5374023"))) {
|
||||
logger.warn("createAccessGroup: Igroup with name {} already exists. Fetching existing Igroup.", igroupName);
|
||||
} catch (FeignException feignEx) {
|
||||
if (feignEx.status() == 409) {
|
||||
logger.warn("createAccessGroup: Igroup with name {} already exists (status 409). Fetching existing Igroup.", igroupName);
|
||||
// TODO: Currently we aren't doing anything with the returned AccessGroup object, so, haven't added code here to fetch the existing Igroup and set it in AccessGroup.
|
||||
return createdAccessGroup;
|
||||
}
|
||||
logger.error("createAccessGroup: Exception during Feign call: {}", feignEx.getMessage(), feignEx);
|
||||
logger.error("createAccessGroup: FeignException during Igroup creation: Status: {}, Exception: {}", feignEx.status(), feignEx.getMessage(), feignEx);
|
||||
throw feignEx;
|
||||
}
|
||||
|
||||
logger.debug("createAccessGroup: createdIgroup: {}", createdIgroup);
|
||||
logger.info("createAccessGroup: createdIgroup: {}", createdIgroup);
|
||||
logger.debug("createAccessGroup: createdIgroup Records: {}", createdIgroup.getRecords());
|
||||
if (createdIgroup == null || createdIgroup.getRecords() == null || createdIgroup.getRecords().isEmpty()) {
|
||||
if (createdIgroup.getRecords() == null || createdIgroup.getRecords().isEmpty()) {
|
||||
logger.error("createAccessGroup: Igroup creation failed for Igroup Name {}", igroupName);
|
||||
throw new CloudRuntimeException("Failed to create Igroup: " + igroupName);
|
||||
}
|
||||
|
|
@ -175,82 +263,77 @@ public class UnifiedSANStrategy extends SANStrategy {
|
|||
logger.info("deleteAccessGroup: Deleting iGroup");
|
||||
|
||||
if (accessGroup == null) {
|
||||
throw new CloudRuntimeException("deleteAccessGroup: Invalid accessGroup object - accessGroup is null");
|
||||
logger.error("deleteAccessGroup: Igroup deletion failed. Invalid request: {}", accessGroup);
|
||||
throw new CloudRuntimeException("Failed to delete Igroup, invalid request");
|
||||
}
|
||||
|
||||
PrimaryDataStoreInfo primaryDataStoreInfo = accessGroup.getPrimaryDataStoreInfo();
|
||||
if (primaryDataStoreInfo == null) {
|
||||
throw new CloudRuntimeException("deleteAccessGroup: PrimaryDataStoreInfo is null in accessGroup");
|
||||
// Get StoragePool details
|
||||
if (accessGroup.getStoragePoolId() == null) {
|
||||
throw new CloudRuntimeException("Failed to delete Igroup, invalid datastore details in the request");
|
||||
}
|
||||
|
||||
try {
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
|
||||
String svmName = storage.getSvmName();
|
||||
//Get iGroup name per host
|
||||
if(!CollectionUtils.isEmpty(accessGroup.getHostsToConnect())) {
|
||||
for (HostVO host : accessGroup.getHostsToConnect()) {
|
||||
String igroupName = OntapStorageUtils.getIgroupName(svmName, host.getName());
|
||||
logger.info("deleteAccessGroup: iGroup name '{}'", igroupName);
|
||||
|
||||
String igroupName;
|
||||
if (primaryDataStoreInfo.getClusterId() != null) {
|
||||
igroupName = OntapStorageUtils.getIgroupName(svmName, com.cloud.storage.ScopeType.CLUSTER, primaryDataStoreInfo.getClusterId());
|
||||
logger.info("deleteAccessGroup: Deleting cluster-scoped iGroup '{}'", igroupName);
|
||||
} else {
|
||||
igroupName = OntapStorageUtils.getIgroupName(svmName, com.cloud.storage.ScopeType.ZONE, primaryDataStoreInfo.getDataCenterId());
|
||||
logger.info("deleteAccessGroup: Deleting zone-scoped iGroup '{}'", igroupName);
|
||||
}
|
||||
// Get the iGroup to retrieve its UUID
|
||||
Map<String, Object> igroupParams = Map.of(
|
||||
OntapStorageConstants.SVM_DOT_NAME, svmName,
|
||||
OntapStorageConstants.NAME, igroupName
|
||||
);
|
||||
|
||||
Map<String, Object> igroupParams = Map.of(
|
||||
OntapStorageConstants.SVM_DOT_NAME, svmName,
|
||||
OntapStorageConstants.NAME, igroupName
|
||||
);
|
||||
try {
|
||||
OntapResponse<Igroup> igroupResponse = sanFeignClient.getIgroupResponse(authHeader, igroupParams);
|
||||
if (igroupResponse == null || igroupResponse.getRecords() == null || igroupResponse.getRecords().isEmpty()) {
|
||||
logger.warn("deleteAccessGroup: iGroup '{}' not found, may have been already deleted", igroupName);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
OntapResponse<Igroup> igroupResponse = sanFeignClient.getIgroupResponse(authHeader, igroupParams);
|
||||
if (igroupResponse == null || igroupResponse.getRecords() == null || igroupResponse.getRecords().isEmpty()) {
|
||||
logger.warn("deleteAccessGroup: iGroup '{}' not found, may have been already deleted", igroupName);
|
||||
return;
|
||||
}
|
||||
Igroup igroup = igroupResponse.getRecords().get(0);
|
||||
String igroupUuid = igroup.getUuid();
|
||||
|
||||
Igroup igroup = igroupResponse.getRecords().get(0);
|
||||
String igroupUuid = igroup.getUuid();
|
||||
if (igroupUuid == null || igroupUuid.isEmpty()) {
|
||||
throw new CloudRuntimeException("iGroup UUID is null or empty for iGroup: " + igroupName);
|
||||
}
|
||||
|
||||
if (igroupUuid == null || igroupUuid.isEmpty()) {
|
||||
throw new CloudRuntimeException("deleteAccessGroup: iGroup UUID is null or empty for iGroup: " + igroupName);
|
||||
}
|
||||
logger.info("deleteAccessGroup: Deleting iGroup '{}' with UUID '{}'", igroupName, igroupUuid);
|
||||
|
||||
logger.info("deleteAccessGroup: Deleting iGroup '{}' with UUID '{}'", igroupName, igroupUuid);
|
||||
// Delete the iGroup using the UUID
|
||||
sanFeignClient.deleteIgroup(authHeader, igroupUuid);
|
||||
|
||||
sanFeignClient.deleteIgroup(authHeader, igroupUuid);
|
||||
logger.info("deleteAccessGroup: Successfully deleted iGroup '{}'", igroupName);
|
||||
|
||||
logger.info("deleteAccessGroup: Successfully deleted iGroup '{}'", igroupName);
|
||||
|
||||
} catch (Exception e) {
|
||||
String errorMsg = e.getMessage();
|
||||
if (errorMsg != null && (errorMsg.contains("5374852") || errorMsg.contains("not found"))) {
|
||||
logger.warn("deleteAccessGroup: iGroup '{}' does not exist, skipping deletion", igroupName);
|
||||
} else {
|
||||
throw e;
|
||||
} catch (FeignException e) {
|
||||
if (e.status() == 404) {
|
||||
logger.warn("deleteAccessGroup: iGroup '{}' does not exist (status 404), skipping deletion", igroupName);
|
||||
} else {
|
||||
logger.error("deleteAccessGroup: FeignException occurred: Status: {}, Exception: {}", e.status(), e.getMessage(), e);
|
||||
throw e;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("deleteAccessGroup: Exception occurred: {}", e.getMessage(), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} catch (FeignException e) {
|
||||
logger.error("deleteAccessGroup: FeignException occurred while deleting iGroup. Status: {}, Exception: {}", e.status(), e.getMessage(), e);
|
||||
throw new CloudRuntimeException("Failed to delete iGroup: " + e.getMessage(), e);
|
||||
} catch (Exception e) {
|
||||
logger.error("deleteAccessGroup: Failed to delete iGroup. Exception: {}", e.getMessage(), e);
|
||||
throw new CloudRuntimeException("Failed to delete iGroup: " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean validateProtocolSupportAndFetchHostsIdentifier(List<HostVO> hosts, ProtocolType protocolType, List<String> hostIdentifiers) {
|
||||
switch (protocolType) {
|
||||
case ISCSI:
|
||||
String protocolPrefix = OntapStorageConstants.IQN;
|
||||
for (HostVO host : hosts) {
|
||||
if (host == null || host.getStorageUrl() == null || host.getStorageUrl().trim().isEmpty()
|
||||
|| !host.getStorageUrl().startsWith(protocolPrefix)) {
|
||||
return false;
|
||||
}
|
||||
hostIdentifiers.add(host.getStorageUrl());
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new CloudRuntimeException("validateProtocolSupportAndFetchHostsIdentifier : Unsupported protocol: " + protocolType.name());
|
||||
private boolean validateProtocolSupport(List<HostVO> hosts, ProtocolType protocolType) {
|
||||
String protocolPrefix = OntapStorageConstants.IQN;
|
||||
for (HostVO host : hosts) {
|
||||
if (host == null || host.getStorageUrl() == null || host.getStorageUrl().trim().isEmpty() || !host.getStorageUrl().startsWith(protocolPrefix)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
logger.info("validateProtocolSupportAndFetchHostsIdentifier: All hosts support the protocol: " + protocolType.name());
|
||||
return true;
|
||||
|
|
@ -261,18 +344,19 @@ public class UnifiedSANStrategy extends SANStrategy {
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AccessGroup getAccessGroup(Map<String, String> values) {
|
||||
logger.info("getAccessGroup : fetch Igroup");
|
||||
logger.debug("getAccessGroup : fetching Igroup with params {} ", values);
|
||||
if (values == null || values.isEmpty()) {
|
||||
logger.error("getAccessGroup: get Igroup failed. Invalid request: {}", values);
|
||||
throw new CloudRuntimeException("getAccessGroup : get Igroup Failed, invalid request");
|
||||
throw new CloudRuntimeException("get Igroup Failed, invalid request");
|
||||
}
|
||||
String svmName = values.get(OntapStorageConstants.SVM_DOT_NAME);
|
||||
String igroupName = values.get(OntapStorageConstants.NAME);
|
||||
if (svmName == null || igroupName == null || svmName.isEmpty() || igroupName.isEmpty()) {
|
||||
logger.error("getAccessGroup: get Igroup failed. Invalid svm:{} or igroup name: {}", svmName, igroupName);
|
||||
throw new CloudRuntimeException("getAccessGroup : Failed to get Igroup, invalid request");
|
||||
throw new CloudRuntimeException("Failed to get Igroup, invalid request");
|
||||
}
|
||||
try {
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
|
|
@ -286,24 +370,229 @@ public class UnifiedSANStrategy extends SANStrategy {
|
|||
AccessGroup accessGroup = new AccessGroup();
|
||||
accessGroup.setIgroup(igroup);
|
||||
return accessGroup;
|
||||
} catch (Exception e) {
|
||||
String errMsg = e.getMessage();
|
||||
if (errMsg != null && errMsg.contains("not found")) {
|
||||
logger.warn("getAccessGroup: Igroup '{}' not found on SVM '{}' ({}). Returning null.", igroupName, svmName, errMsg);
|
||||
} catch (FeignException e) {
|
||||
if (e.status() == 404) {
|
||||
logger.warn("getAccessGroup: Igroup '{}' not found on SVM '{}' (status 404). Returning null.", igroupName, svmName);
|
||||
return null;
|
||||
}
|
||||
logger.error("Exception occurred while fetching Igroup, Exception: {}", errMsg);
|
||||
throw new CloudRuntimeException("Failed to fetch Igroup details: " + errMsg);
|
||||
logger.error("FeignException occurred while fetching Igroup, Status: {}, Exception: {}", e.status(), e.getMessage());
|
||||
throw new CloudRuntimeException("Failed to fetch Igroup details: " + e.getMessage());
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception occurred while fetching Igroup, Exception: {}", e.getMessage());
|
||||
throw new CloudRuntimeException("Failed to fetch Igroup details: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, String> enableLogicalAccess(Map<String, String> values) {
|
||||
logger.info("enableLogicalAccess : Create LunMap");
|
||||
logger.debug("enableLogicalAccess : Creating LunMap with values {} ", values);
|
||||
Map<String, String> response = null;
|
||||
if (values == null) {
|
||||
logger.error("enableLogicalAccess: LunMap creation failed. Invalid request values: null");
|
||||
throw new CloudRuntimeException("Failed to create LunMap, invalid request");
|
||||
}
|
||||
String svmName = values.get(OntapStorageConstants.SVM_DOT_NAME);
|
||||
String lunName = values.get(OntapStorageConstants.LUN_DOT_NAME);
|
||||
String igroupName = values.get(OntapStorageConstants.IGROUP_DOT_NAME);
|
||||
if (svmName == null || lunName == null || igroupName == null || svmName.isEmpty() || lunName.isEmpty() || igroupName.isEmpty()) {
|
||||
logger.error("enableLogicalAccess: LunMap creation failed. Invalid request values: {}", values);
|
||||
throw new CloudRuntimeException("Failed to create LunMap, invalid request");
|
||||
}
|
||||
try {
|
||||
// Get AuthHeader
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
// Create LunMap
|
||||
LunMap lunMapRequest = new LunMap();
|
||||
Svm svm = new Svm();
|
||||
svm.setName(svmName);
|
||||
lunMapRequest.setSvm(svm);
|
||||
//Set Lun name
|
||||
Lun lun = new Lun();
|
||||
lun.setName(lunName);
|
||||
lunMapRequest.setLun(lun);
|
||||
//Set Igroup name
|
||||
Igroup igroup = new Igroup();
|
||||
igroup.setName(igroupName);
|
||||
lunMapRequest.setIgroup(igroup);
|
||||
try {
|
||||
sanFeignClient.createLunMap(authHeader, true, lunMapRequest);
|
||||
} catch (Exception feignEx) {
|
||||
String errMsg = feignEx.getMessage();
|
||||
if (errMsg != null && errMsg.contains(("LUN already mapped to this group"))) {
|
||||
logger.warn("enableLogicalAccess: LunMap for Lun: {} and igroup: {} already exists.", lunName, igroupName);
|
||||
} else {
|
||||
logger.error("enableLogicalAccess: Exception during Feign call: {}", feignEx.getMessage(), feignEx);
|
||||
throw feignEx;
|
||||
}
|
||||
}
|
||||
// Get the LunMap details
|
||||
OntapResponse<LunMap> lunMapResponse = null;
|
||||
try {
|
||||
lunMapResponse = sanFeignClient.getLunMapResponse(authHeader,
|
||||
Map.of(
|
||||
OntapStorageConstants.SVM_DOT_NAME, svmName,
|
||||
OntapStorageConstants.LUN_DOT_NAME, lunName,
|
||||
OntapStorageConstants.IGROUP_DOT_NAME, igroupName,
|
||||
OntapStorageConstants.FIELDS, OntapStorageConstants.LOGICAL_UNIT_NUMBER
|
||||
));
|
||||
response = Map.of(
|
||||
OntapStorageConstants.LOGICAL_UNIT_NUMBER, lunMapResponse.getRecords().get(0).getLogicalUnitNumber().toString()
|
||||
);
|
||||
} catch (Exception e) {
|
||||
logger.error("enableLogicalAccess: Failed to fetch LunMap details for Lun: {} and igroup: {}, Exception: {}", lunName, igroupName, e);
|
||||
throw new CloudRuntimeException("Failed to fetch LunMap details for Lun: " + lunName + " and igroup: " + igroupName);
|
||||
}
|
||||
logger.debug("enableLogicalAccess: LunMap created successfully, LunMap: {}", lunMapResponse.getRecords().get(0));
|
||||
logger.info("enableLogicalAccess: LunMap created successfully.");
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception occurred while creating LunMap", e);
|
||||
throw new CloudRuntimeException("Failed to create LunMap: " + e.getMessage());
|
||||
}
|
||||
return response;
|
||||
}
|
||||
|
||||
public void disableLogicalAccess(Map<String, String> values) {
|
||||
logger.info("disableLogicalAccess : Delete LunMap");
|
||||
logger.debug("disableLogicalAccess : Deleting LunMap with values {} ", values);
|
||||
if (values == null) {
|
||||
logger.error("disableLogicalAccess: LunMap deletion failed. Invalid request values: null");
|
||||
throw new CloudRuntimeException(" Failed to delete LunMap, invalid request");
|
||||
}
|
||||
String lunUUID = values.get(OntapStorageConstants.LUN_DOT_UUID);
|
||||
String igroupUUID = values.get(OntapStorageConstants.IGROUP_DOT_UUID);
|
||||
if (lunUUID == null || igroupUUID == null || lunUUID.isEmpty() || igroupUUID.isEmpty()) {
|
||||
logger.error("disableLogicalAccess: LunMap deletion failed. Invalid request values: {}", values);
|
||||
throw new CloudRuntimeException(" Failed to delete LunMap, invalid request");
|
||||
}
|
||||
try {
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
sanFeignClient.deleteLunMap(authHeader, lunUUID, igroupUUID);
|
||||
logger.info("disableLogicalAccess: LunMap deleted successfully.");
|
||||
} catch (FeignException e) {
|
||||
if (e.status() == 404) {
|
||||
logger.warn("disableLogicalAccess: LunMap with Lun UUID: {} and igroup UUID: {} does not exist, skipping deletion", lunUUID, igroupUUID);
|
||||
return;
|
||||
}
|
||||
logger.error("FeignException occurred while deleting LunMap, Status: {}, Exception: {}", e.status(), e.getMessage());
|
||||
throw new CloudRuntimeException("Failed to delete LunMap: " + e.getMessage());
|
||||
} catch (Exception e) {
|
||||
logger.error("Exception occurred while deleting LunMap, Exception: {}", e.getMessage());
|
||||
throw new CloudRuntimeException("Failed to delete LunMap: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
// GET-only helper: fetch LUN-map and return logical unit number if it exists; otherwise return null
|
||||
public Map<String, String> getLogicalAccess(Map<String, String> values) {
|
||||
logger.info("getLogicalAccess : Fetch LunMap");
|
||||
logger.debug("getLogicalAccess : Fetching LunMap with values {} ", values);
|
||||
if (values == null) {
|
||||
logger.error("getLogicalAccess: Invalid request values: null");
|
||||
throw new CloudRuntimeException(" Invalid request");
|
||||
}
|
||||
String svmName = values.get(OntapStorageConstants.SVM_DOT_NAME);
|
||||
String lunName = values.get(OntapStorageConstants.LUN_DOT_NAME);
|
||||
String igroupName = values.get(OntapStorageConstants.IGROUP_DOT_NAME);
|
||||
if (svmName == null || lunName == null || igroupName == null || svmName.isEmpty() || lunName.isEmpty() || igroupName.isEmpty()) {
|
||||
logger.error("getLogicalAccess: Invalid request values: {}", values);
|
||||
throw new CloudRuntimeException(" Invalid request");
|
||||
}
|
||||
try {
|
||||
String authHeader = OntapStorageUtils.generateAuthHeader(storage.getUsername(), storage.getPassword());
|
||||
OntapResponse<LunMap> lunMapResponse = sanFeignClient.getLunMapResponse(authHeader,
|
||||
Map.of(
|
||||
OntapStorageConstants.SVM_DOT_NAME, svmName,
|
||||
OntapStorageConstants.LUN_DOT_NAME, lunName,
|
||||
OntapStorageConstants.IGROUP_DOT_NAME, igroupName,
|
||||
OntapStorageConstants.FIELDS, OntapStorageConstants.LOGICAL_UNIT_NUMBER
|
||||
));
|
||||
if (lunMapResponse != null && lunMapResponse.getRecords() != null && !lunMapResponse.getRecords().isEmpty()) {
|
||||
Integer lunLogicalUnitNum = lunMapResponse.getRecords().get(0).getLogicalUnitNumber();
|
||||
String lunNumber = lunLogicalUnitNum != null ? lunLogicalUnitNum.toString() : null;
|
||||
return lunNumber != null ? Map.of(OntapStorageConstants.LOGICAL_UNIT_NUMBER, lunNumber) : null;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("getLogicalAccess: LunMap not found for Lun: {} and igroup: {} ({}).", lunName, igroupName, e.getMessage());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public void disableLogicalAccess(Map<String, String> values) {}
|
||||
@Override
|
||||
public String ensureLunMapped(String svmName, String lunName, String accessGroupName) {
|
||||
logger.info("ensureLunMapped: Ensuring LUN [{}] is mapped to igroup [{}] on SVM [{}]", lunName, accessGroupName, svmName);
|
||||
|
||||
public Map<String, String> getLogicalAccess(Map<String, String> values) {
|
||||
return null;
|
||||
// Check existing map first
|
||||
Map<String, String> getMap = Map.of(
|
||||
OntapStorageConstants.LUN_DOT_NAME, lunName,
|
||||
OntapStorageConstants.SVM_DOT_NAME, svmName,
|
||||
OntapStorageConstants.IGROUP_DOT_NAME, accessGroupName
|
||||
);
|
||||
Map<String, String> mapResp = getLogicalAccess(getMap);
|
||||
if (mapResp != null && mapResp.containsKey(OntapStorageConstants.LOGICAL_UNIT_NUMBER)) {
|
||||
String lunNumber = mapResp.get(OntapStorageConstants.LOGICAL_UNIT_NUMBER);
|
||||
logger.info("ensureLunMapped: Existing LunMap found for LUN [{}] in igroup [{}] with LUN number [{}]", lunName, accessGroupName, lunNumber);
|
||||
return lunNumber;
|
||||
}
|
||||
|
||||
// Create if not exists
|
||||
Map<String, String> enableMap = Map.of(
|
||||
OntapStorageConstants.LUN_DOT_NAME, lunName,
|
||||
OntapStorageConstants.SVM_DOT_NAME, svmName,
|
||||
OntapStorageConstants.IGROUP_DOT_NAME, accessGroupName
|
||||
);
|
||||
Map<String, String> response = enableLogicalAccess(enableMap);
|
||||
if (response == null || !response.containsKey(OntapStorageConstants.LOGICAL_UNIT_NUMBER)) {
|
||||
throw new CloudRuntimeException("Failed to map LUN [" + lunName + "] to iGroup [" + accessGroupName + "]");
|
||||
}
|
||||
logger.info("ensureLunMapped: Successfully mapped LUN [{}] to igroup [{}] with LUN number [{}]", lunName, accessGroupName, response.get(OntapStorageConstants.LOGICAL_UNIT_NUMBER));
|
||||
return response.get(OntapStorageConstants.LOGICAL_UNIT_NUMBER);
|
||||
}
|
||||
/**
|
||||
* Reverts a LUN to a snapshot using the ONTAP CLI-based snapshot file restore API.
|
||||
*
|
||||
* <p>ONTAP REST API (CLI passthrough):
|
||||
* {@code POST /api/private/cli/volume/snapshot/restore-file}</p>
|
||||
*
|
||||
* <p>This method uses the CLI native API which is more reliable and works
|
||||
* consistently for both NFS files and iSCSI LUNs.</p>
|
||||
*
|
||||
* @param snapshotName The ONTAP FlexVolume snapshot name
|
||||
* @param flexVolUuid The FlexVolume UUID (not used in CLI API, kept for interface consistency)
|
||||
* @param snapshotUuid The ONTAP snapshot UUID (not used in CLI API, kept for interface consistency)
|
||||
* @param volumePath The LUN name (used to construct the path)
|
||||
* @param lunUuid The LUN UUID (not used in CLI API, kept for interface consistency)
|
||||
* @param flexVolName The FlexVolume name (required for CLI API)
|
||||
* @return JobResponse for the async restore operation
|
||||
*/
|
||||
@Override
|
||||
public JobResponse revertSnapshotForCloudStackVolume(String snapshotName, String flexVolUuid,
|
||||
String snapshotUuid, String volumePath,
|
||||
String lunUuid, String flexVolName) {
|
||||
logger.info("revertSnapshotForCloudStackVolume [iSCSI]: Restoring LUN [{}] from snapshot [{}] on FlexVol [{}]",
|
||||
volumePath, snapshotName, flexVolName);
|
||||
|
||||
if (snapshotName == null || snapshotName.isEmpty()) {
|
||||
throw new CloudRuntimeException("Snapshot name is required for iSCSI snapshot revert");
|
||||
}
|
||||
if (flexVolName == null || flexVolName.isEmpty()) {
|
||||
throw new CloudRuntimeException("FlexVolume name is required for iSCSI snapshot revert");
|
||||
}
|
||||
if (volumePath == null || volumePath.isEmpty()) {
|
||||
throw new CloudRuntimeException("LUN path is required for iSCSI snapshot revert");
|
||||
}
|
||||
|
||||
String authHeader = getAuthHeader();
|
||||
String svmName = storage.getSvmName();
|
||||
|
||||
// Prepare the LUN path for ONTAP CLI API (ensure it starts with "/")
|
||||
String ontapLunPath = volumePath.startsWith("/") ? volumePath : "/" + volumePath;
|
||||
|
||||
// Create CLI snapshot restore request
|
||||
CliSnapshotRestoreRequest restoreRequest = new CliSnapshotRestoreRequest(
|
||||
svmName, flexVolName, snapshotName, ontapLunPath);
|
||||
|
||||
logger.info("revertSnapshotForCloudStackVolume: Calling CLI file restore API with vserver={}, volume={}, snapshot={}, path={}",
|
||||
svmName, flexVolName, snapshotName, ontapLunPath);
|
||||
|
||||
return getSnapshotFeignClient().restoreFileFromSnapshotCli(authHeader, restoreRequest);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@
|
|||
package org.apache.cloudstack.storage.service.model;
|
||||
|
||||
import com.cloud.host.HostVO;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
|
||||
import org.apache.cloudstack.storage.feign.model.ExportPolicy;
|
||||
import org.apache.cloudstack.storage.feign.model.Igroup;
|
||||
|
|
@ -33,7 +32,7 @@ public class AccessGroup {
|
|||
private ExportPolicy exportPolicy;
|
||||
|
||||
private List<HostVO> hostsToConnect;
|
||||
private PrimaryDataStoreInfo primaryDataStoreInfo;
|
||||
private Long storagePoolId;
|
||||
private Scope scope;
|
||||
|
||||
|
||||
|
|
@ -55,18 +54,23 @@ public class AccessGroup {
|
|||
public List<HostVO> getHostsToConnect() {
|
||||
return hostsToConnect;
|
||||
}
|
||||
|
||||
public void setHostsToConnect(List<HostVO> hostsToConnect) {
|
||||
this.hostsToConnect = hostsToConnect;
|
||||
}
|
||||
public PrimaryDataStoreInfo getPrimaryDataStoreInfo() {
|
||||
return primaryDataStoreInfo;
|
||||
|
||||
public Long getStoragePoolId() {
|
||||
return storagePoolId;
|
||||
}
|
||||
public void setPrimaryDataStoreInfo(PrimaryDataStoreInfo primaryDataStoreInfo) {
|
||||
this.primaryDataStoreInfo = primaryDataStoreInfo;
|
||||
|
||||
public void setStoragePoolId(Long storagePoolId) {
|
||||
this.storagePoolId = storagePoolId;
|
||||
}
|
||||
|
||||
public Scope getScope() {
|
||||
return scope;
|
||||
}
|
||||
|
||||
public void setScope(Scope scope) {
|
||||
this.scope = scope;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,9 +25,28 @@ import org.apache.cloudstack.storage.feign.model.Lun;
|
|||
|
||||
public class CloudStackVolume {
|
||||
|
||||
/**
|
||||
* Filed used for request:
|
||||
* a. snapshot workflows will get source file details from it.
|
||||
*/
|
||||
private FileInfo file;
|
||||
|
||||
/**
|
||||
* Filed used for request:
|
||||
* a. snapshot workflows will get source LUN details from it.
|
||||
*/
|
||||
private Lun lun;
|
||||
private String datastoreId;
|
||||
/**
|
||||
* FlexVolume UUID on which this cloudstack volume is created.
|
||||
* a. Field is eligible for unified storage only.
|
||||
* b. It will be null for the disaggregated storage.
|
||||
*/
|
||||
private String flexVolumeUuid;
|
||||
/**
|
||||
* Field serves for snapshot workflows
|
||||
*/
|
||||
private String destinationPath;
|
||||
private DataObject volumeInfo; // This is needed as we need DataObject to be passed to agent to create volume
|
||||
public FileInfo getFile() {
|
||||
return file;
|
||||
|
|
@ -44,16 +63,36 @@ public class CloudStackVolume {
|
|||
public void setLun(Lun lun) {
|
||||
this.lun = lun;
|
||||
}
|
||||
|
||||
public String getDatastoreId() {
|
||||
return datastoreId;
|
||||
}
|
||||
|
||||
public void setDatastoreId(String datastoreId) {
|
||||
this.datastoreId = datastoreId;
|
||||
}
|
||||
|
||||
public DataObject getVolumeInfo() {
|
||||
return volumeInfo;
|
||||
}
|
||||
|
||||
public void setVolumeInfo(DataObject volumeInfo) {
|
||||
this.volumeInfo = volumeInfo;
|
||||
}
|
||||
public String getFlexVolumeUuid() {
|
||||
return flexVolumeUuid;
|
||||
}
|
||||
|
||||
public void setFlexVolumeUuid(String flexVolumeUuid) {
|
||||
this.flexVolumeUuid = flexVolumeUuid;
|
||||
}
|
||||
|
||||
public String getDestinationPath() {
|
||||
return this.destinationPath;
|
||||
}
|
||||
|
||||
public void setDestinationPath(String destinationPath) {
|
||||
this.destinationPath = destinationPath;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ package org.apache.cloudstack.storage.utils;
|
|||
|
||||
public class OntapStorageConstants {
|
||||
|
||||
public static final String ONTAP_PLUGIN_NAME = "ONTAP";
|
||||
public static final String ONTAP_PLUGIN_NAME = "NetApp ONTAP";
|
||||
public static final int NFS3_PORT = 2049;
|
||||
public static final int ISCSI_PORT = 3260;
|
||||
|
||||
|
|
@ -34,7 +34,7 @@ public class OntapStorageConstants {
|
|||
public static final String USERNAME = "username";
|
||||
public static final String PASSWORD = "password";
|
||||
public static final String DATA_LIF = "dataLIF";
|
||||
public static final String MANAGEMENT_LIF = "managementLIF";
|
||||
public static final String STORAGE_IP = "storageIP";
|
||||
public static final String VOLUME_NAME = "volumeName";
|
||||
public static final String VOLUME_UUID = "volumeUUID";
|
||||
public static final String EXPORT_POLICY_ID = "exportPolicyId";
|
||||
|
|
@ -42,6 +42,7 @@ public class OntapStorageConstants {
|
|||
public static final String IS_DISAGGREGATED = "isDisaggregated";
|
||||
public static final String RUNNING = "running";
|
||||
public static final String EXPORT = "export";
|
||||
public static final String NFS3_MOUNT_OPTIONS_VER_3 = "vers=3";
|
||||
|
||||
public static final int ONTAP_PORT = 443;
|
||||
|
||||
|
|
@ -90,4 +91,16 @@ public class OntapStorageConstants {
|
|||
public static final String IGROUP_DOT_UUID = "igroup.uuid";
|
||||
public static final String UNDERSCORE = "_";
|
||||
public static final String CS = "cs";
|
||||
public static final String SRC_CS_VOLUME_ID = "src_cs_volume_id";
|
||||
public static final String BASE_ONTAP_FV_ID = "base_ontap_fv_id";
|
||||
public static final String ONTAP_SNAP_ID = "ontap_snap_id";
|
||||
public static final String ONTAP_SNAP_NAME = "ontap_snap_name";
|
||||
public static final String VOLUME_PATH = "volume_path";
|
||||
public static final String PRIMARY_POOL_ID = "primary_pool_id";
|
||||
public static final String ONTAP_SNAP_SIZE = "ontap_snap_size";
|
||||
public static final String FILE_PATH = "file_path";
|
||||
public static final int MAX_SNAPSHOT_NAME_LENGTH = 64;
|
||||
|
||||
/** vm_snapshot_details key for ONTAP FlexVolume-level VM snapshots. */
|
||||
public static final String ONTAP_FLEXVOL_SNAPSHOT = "ontapFlexVolSnapshot";
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,12 +19,18 @@
|
|||
|
||||
package org.apache.cloudstack.storage.utils;
|
||||
|
||||
import com.cloud.storage.ScopeType;
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import com.cloud.utils.StringUtils;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.feign.model.Lun;
|
||||
import org.apache.cloudstack.storage.feign.model.LunSpace;
|
||||
import org.apache.cloudstack.storage.feign.model.OntapStorage;
|
||||
import org.apache.cloudstack.storage.feign.model.Svm;
|
||||
import org.apache.cloudstack.storage.provider.StorageProviderFactory;
|
||||
import org.apache.cloudstack.storage.service.StorageStrategy;
|
||||
import org.apache.cloudstack.storage.service.model.CloudStackVolume;
|
||||
import org.apache.cloudstack.storage.service.model.ProtocolType;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
|
@ -36,25 +42,92 @@ import java.util.Map;
|
|||
public class OntapStorageUtils {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(OntapStorageUtils.class);
|
||||
|
||||
private static final String BASIC = "Basic";
|
||||
private static final String AUTH_HEADER_COLON = ":";
|
||||
|
||||
/**
|
||||
* Method generates authentication headers using storage backend credentials passed as normal string
|
||||
*
|
||||
* @param username -->> username of the storage backend
|
||||
* @param password -->> normal decoded password of the storage backend
|
||||
* @return
|
||||
*/
|
||||
public static String generateAuthHeader (String username, String password) {
|
||||
byte[] encodedBytes = Base64Utils.encode((username + AUTH_HEADER_COLON + password).getBytes(StandardCharsets.UTF_8));
|
||||
return BASIC + StringUtils.SPACE + new String(encodedBytes);
|
||||
}
|
||||
|
||||
public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map<String, String> details, DataObject volumeObject) {
|
||||
CloudStackVolume cloudStackVolumeRequest = null;
|
||||
|
||||
String protocol = details.get(OntapStorageConstants.PROTOCOL);
|
||||
ProtocolType protocolType = ProtocolType.valueOf(protocol);
|
||||
switch (protocolType) {
|
||||
case NFS3:
|
||||
cloudStackVolumeRequest = new CloudStackVolume();
|
||||
cloudStackVolumeRequest.setDatastoreId(String.valueOf(storagePool.getId()));
|
||||
cloudStackVolumeRequest.setVolumeInfo(volumeObject);
|
||||
break;
|
||||
case ISCSI:
|
||||
Svm svm = new Svm();
|
||||
svm.setName(details.get(OntapStorageConstants.SVM_NAME));
|
||||
cloudStackVolumeRequest = new CloudStackVolume();
|
||||
Lun lunRequest = new Lun();
|
||||
lunRequest.setSvm(svm);
|
||||
|
||||
LunSpace lunSpace = new LunSpace();
|
||||
lunSpace.setSize(volumeObject.getSize());
|
||||
lunRequest.setSpace(lunSpace);
|
||||
//Lun name is full path like in unified "/vol/VolumeName/LunName"
|
||||
String lunName = volumeObject.getName().replace(OntapStorageConstants.HYPHEN, OntapStorageConstants.UNDERSCORE);
|
||||
if(!isValidName(lunName)) {
|
||||
String errMsg = "createAsync: Invalid dataObject name [" + lunName + "]. It must start with a letter and can only contain letters, digits, and underscores, and be up to 200 characters long.";
|
||||
throw new InvalidParameterValueException(errMsg);
|
||||
}
|
||||
String lunFullName = getLunName(storagePool.getName(), lunName);
|
||||
lunRequest.setName(lunFullName);
|
||||
|
||||
String osType = getOSTypeFromHypervisor(storagePool.getHypervisor().name());
|
||||
lunRequest.setOsType(Lun.OsTypeEnum.valueOf(osType));
|
||||
|
||||
cloudStackVolumeRequest.setLun(lunRequest);
|
||||
break;
|
||||
default:
|
||||
throw new CloudRuntimeException("Unsupported protocol " + protocol);
|
||||
|
||||
}
|
||||
return cloudStackVolumeRequest;
|
||||
}
|
||||
|
||||
public static boolean isValidName(String name) {
|
||||
// Check for null and length constraint first
|
||||
if (name == null || name.length() > 200) {
|
||||
return false;
|
||||
}
|
||||
// Regex: Starts with a letter, followed by letters, digits, or underscores
|
||||
return name.matches(OntapStorageConstants.ONTAP_NAME_REGEX);
|
||||
}
|
||||
|
||||
public static String getOSTypeFromHypervisor(String hypervisorType) {
|
||||
switch (hypervisorType) {
|
||||
case OntapStorageConstants.KVM:
|
||||
return Lun.OsTypeEnum.LINUX.name();
|
||||
default:
|
||||
String errMsg = "getOSTypeFromHypervisor : Unsupported hypervisor type " + hypervisorType + " for ONTAP storage";
|
||||
logger.error(errMsg);
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
}
|
||||
|
||||
public static StorageStrategy getStrategyByStoragePoolDetails(Map<String, String> details) {
|
||||
if (details == null || details.isEmpty()) {
|
||||
logger.error("getStrategyByStoragePoolDetails: Storage pool details are null or empty");
|
||||
throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Storage pool details are null or empty");
|
||||
throw new CloudRuntimeException("Storage pool details are null or empty");
|
||||
}
|
||||
String protocol = details.get(OntapStorageConstants.PROTOCOL);
|
||||
OntapStorage ontapStorage = new OntapStorage(details.get(OntapStorageConstants.USERNAME), details.get(OntapStorageConstants.PASSWORD),
|
||||
details.get(OntapStorageConstants.MANAGEMENT_LIF), details.get(OntapStorageConstants.SVM_NAME), Long.parseLong(details.get(OntapStorageConstants.SIZE)),
|
||||
ProtocolType.valueOf(protocol),
|
||||
Boolean.parseBoolean(details.get(OntapStorageConstants.IS_DISAGGREGATED)));
|
||||
details.get(OntapStorageConstants.STORAGE_IP), details.get(OntapStorageConstants.SVM_NAME), Long.parseLong(details.get(OntapStorageConstants.SIZE)),
|
||||
ProtocolType.valueOf(protocol));
|
||||
StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage);
|
||||
boolean isValid = storageStrategy.connect();
|
||||
if (isValid) {
|
||||
|
|
@ -62,15 +135,23 @@ public class OntapStorageUtils {
|
|||
return storageStrategy;
|
||||
} else {
|
||||
logger.error("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(OntapStorageConstants.SVM_NAME) + "] failed");
|
||||
throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(OntapStorageConstants.SVM_NAME) + "] failed");
|
||||
throw new CloudRuntimeException("Connection to Ontap SVM [" + details.get(OntapStorageConstants.SVM_NAME) + "] failed");
|
||||
}
|
||||
}
|
||||
|
||||
public static String getIgroupName(String svmName, ScopeType scopeType, Long scopeId) {
|
||||
return OntapStorageConstants.CS + OntapStorageConstants.UNDERSCORE + svmName + OntapStorageConstants.UNDERSCORE + scopeType.toString().toLowerCase() + OntapStorageConstants.UNDERSCORE + scopeId;
|
||||
public static String getIgroupName(String svmName, String hostName) {
|
||||
//Igroup name format: cs_svmName_hostName
|
||||
String sanitizedHostName = hostName.split("\\.")[0].replaceAll("[^a-zA-Z0-9_-]", "_");
|
||||
return OntapStorageConstants.CS + OntapStorageConstants.UNDERSCORE + svmName + OntapStorageConstants.UNDERSCORE + sanitizedHostName;
|
||||
}
|
||||
|
||||
public static String generateExportPolicyName(String svmName, String volumeName){
|
||||
return OntapStorageConstants.EXPORT + OntapStorageConstants.HYPHEN + svmName + OntapStorageConstants.HYPHEN + volumeName;
|
||||
return OntapStorageConstants.CS + OntapStorageConstants.HYPHEN + svmName + OntapStorageConstants.HYPHEN + volumeName;
|
||||
}
|
||||
|
||||
public static String getLunName(String volName, String lunName) {
|
||||
//LUN name in ONTAP unified format: "/vol/VolumeName/LunName"
|
||||
return OntapStorageConstants.VOLUME_PATH_PREFIX + volName + OntapStorageConstants.SLASH + lunName;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,929 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.vmsnapshot;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import javax.naming.ConfigurationException;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotOptions;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.feign.client.SnapshotFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.model.CliSnapshotRestoreRequest;
|
||||
import org.apache.cloudstack.storage.feign.model.FlexVolSnapshot;
|
||||
import org.apache.cloudstack.storage.feign.model.response.JobResponse;
|
||||
import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
|
||||
import org.apache.cloudstack.storage.service.StorageStrategy;
|
||||
import org.apache.cloudstack.storage.service.model.ProtocolType;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.apache.cloudstack.storage.utils.OntapStorageUtils;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import com.cloud.agent.api.CreateVMSnapshotAnswer;
|
||||
import com.cloud.agent.api.CreateVMSnapshotCommand;
|
||||
import com.cloud.agent.api.DeleteVMSnapshotAnswer;
|
||||
import com.cloud.agent.api.DeleteVMSnapshotCommand;
|
||||
import com.cloud.agent.api.FreezeThawVMAnswer;
|
||||
import com.cloud.agent.api.FreezeThawVMCommand;
|
||||
import com.cloud.agent.api.RevertToVMSnapshotAnswer;
|
||||
import com.cloud.agent.api.RevertToVMSnapshotCommand;
|
||||
import com.cloud.agent.api.VMSnapshotTO;
|
||||
import com.cloud.event.EventTypes;
|
||||
import com.cloud.exception.AgentUnavailableException;
|
||||
import com.cloud.exception.OperationTimedoutException;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.storage.GuestOSVO;
|
||||
import com.cloud.storage.VolumeDetailVO;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.dao.VolumeDetailsDao;
|
||||
import com.cloud.uservm.UserVm;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.fsm.NoTransitionException;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.snapshot.VMSnapshot;
|
||||
import com.cloud.vm.snapshot.VMSnapshotDetailsVO;
|
||||
import com.cloud.vm.snapshot.VMSnapshotVO;
|
||||
import org.apache.cloudstack.storage.utils.OntapStorageConstants;
|
||||
|
||||
/**
|
||||
* VM Snapshot strategy for NetApp ONTAP managed storage using FlexVolume-level snapshots.
|
||||
*
|
||||
* <p>This strategy handles VM-level (instance) snapshots for VMs whose volumes
|
||||
* reside on ONTAP managed primary storage. Instead of creating per-file clones
|
||||
* (the old approach), it takes <b>ONTAP FlexVolume-level snapshots</b> via the
|
||||
* ONTAP REST API ({@code POST /api/storage/volumes/{uuid}/snapshots}).</p>
|
||||
*
|
||||
* <h3>Key Advantage:</h3>
|
||||
* <p>When multiple CloudStack disks (ROOT + DATA) reside on the same ONTAP
|
||||
* FlexVolume, a single FlexVolume snapshot atomically captures all of them.
|
||||
* This is both faster and more storage-efficient than per-file clones.</p>
|
||||
*
|
||||
* <h3>Flow:</h3>
|
||||
* <ol>
|
||||
* <li>Group all VM volumes by their parent FlexVolume UUID</li>
|
||||
* <li>Freeze the VM via QEMU guest agent ({@code fsfreeze}) — if quiesce requested</li>
|
||||
* <li>For each unique FlexVolume, create one ONTAP snapshot</li>
|
||||
* <li>Thaw the VM</li>
|
||||
* <li>Record FlexVolume → snapshot UUID mappings in {@code vm_snapshot_details}</li>
|
||||
* </ol>
|
||||
*
|
||||
* <h3>Metadata in vm_snapshot_details:</h3>
|
||||
* <p>Each FlexVolume snapshot is stored as a detail row with:
|
||||
* <ul>
|
||||
* <li>name = {@value OntapStorageConstants#ONTAP_FLEXVOL_SNAPSHOT}</li>
|
||||
* <li>value = {@code "<flexVolUuid>::<snapshotUuid>::<snapshotName>::<volumePath>::<poolId>::<protocol>"}</li>
|
||||
* </ul>
|
||||
* One row is persisted per CloudStack volume (not per FlexVolume) so that the
|
||||
* revert operation can restore individual files/LUNs using the ONTAP Snapshot
|
||||
* File Restore API ({@code POST /api/storage/volumes/{vol}/snapshots/{snap}/files/{path}/restore}).</p>
|
||||
*
|
||||
* <h3>Strategy Selection:</h3>
|
||||
* <p>Returns {@code StrategyPriority.HIGHEST} when:</p>
|
||||
* <ul>
|
||||
* <li>Hypervisor is KVM</li>
|
||||
* <li>Snapshot type is Disk-only (no memory)</li>
|
||||
* <li>All VM volumes are on ONTAP managed primary storage</li>
|
||||
* </ul>
|
||||
*/
|
||||
public class OntapVMSnapshotStrategy extends StorageVMSnapshotStrategy {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(OntapVMSnapshotStrategy.class);
|
||||
|
||||
/** Separator used in the vm_snapshot_details value to delimit FlexVol UUID, snapshot UUID, snapshot name, and pool ID. */
|
||||
static final String DETAIL_SEPARATOR = "::";
|
||||
|
||||
@Inject
|
||||
private StoragePoolDetailsDao storagePoolDetailsDao;
|
||||
|
||||
@Inject
|
||||
private VolumeDetailsDao volumeDetailsDao;
|
||||
|
||||
@Override
|
||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||
return super.configure(name, params);
|
||||
}
|
||||
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
// Strategy Selection
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
|
||||
@Override
|
||||
public StrategyPriority canHandle(VMSnapshot vmSnapshot) {
|
||||
VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot;
|
||||
|
||||
// For existing (non-Allocated) snapshots, check if we created them
|
||||
if (!VMSnapshot.State.Allocated.equals(vmSnapshotVO.getState())) {
|
||||
// Check for our FlexVolume snapshot details first
|
||||
List<VMSnapshotDetailsVO> flexVolDetails = vmSnapshotDetailsDao.findDetails(vmSnapshot.getId(), OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT);
|
||||
if (CollectionUtils.isNotEmpty(flexVolDetails)) {
|
||||
// Verify the volumes are still on ONTAP storage
|
||||
if (allVolumesOnOntapManagedStorage(vmSnapshot.getVmId())) {
|
||||
return StrategyPriority.HIGHEST;
|
||||
}
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
// Also check legacy STORAGE_SNAPSHOT details for backward compatibility
|
||||
List<VMSnapshotDetailsVO> legacyDetails = vmSnapshotDetailsDao.findDetails(vmSnapshot.getId(), STORAGE_SNAPSHOT);
|
||||
if (CollectionUtils.isNotEmpty(legacyDetails) && allVolumesOnOntapManagedStorage(vmSnapshot.getVmId())) {
|
||||
return StrategyPriority.HIGHEST;
|
||||
}
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
|
||||
// For new snapshots (Allocated state), check if we can handle this VM
|
||||
// ONTAP only supports disk-only snapshots, not memory snapshots
|
||||
if (allVolumesOnOntapManagedStorage(vmSnapshot.getVmId())) {
|
||||
if (vmSnapshotVO.getType() == VMSnapshot.Type.DiskAndMemory) {
|
||||
logger.debug("canHandle: Memory snapshots (DiskAndMemory) are not supported for VMs on ONTAP storage. VMSnapshot [{}]", vmSnapshot.getId());
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
return StrategyPriority.HIGHEST;
|
||||
}
|
||||
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public StrategyPriority canHandle(Long vmId, Long rootPoolId, boolean snapshotMemory) {
|
||||
// ONTAP FlexVolume snapshots only support disk-only (crash-consistent) snapshots.
|
||||
// Memory snapshots (snapshotMemory=true) are not supported because:
|
||||
// 1. ONTAP snapshots capture disk state only, not VM memory
|
||||
// 2. Allowing memory snapshots would require falling back to libvirt snapshots,
|
||||
// creating mixed snapshot chains that would cause issues during revert
|
||||
// Return CANT_HANDLE so VMSnapshotManagerImpl can provide a clear error message.
|
||||
if (snapshotMemory) {
|
||||
logger.debug("canHandle: Memory snapshots (snapshotMemory=true) are not supported for VMs on ONTAP storage. VM [{}]", vmId);
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
|
||||
if (allVolumesOnOntapManagedStorage(vmId)) {
|
||||
return StrategyPriority.HIGHEST;
|
||||
}
|
||||
|
||||
return StrategyPriority.CANT_HANDLE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks whether all volumes of a VM reside on ONTAP managed primary storage.
|
||||
*/
|
||||
boolean allVolumesOnOntapManagedStorage(long vmId) {
|
||||
UserVm userVm = userVmDao.findById(vmId);
|
||||
if (userVm == null) {
|
||||
logger.debug("allVolumesOnOntapManagedStorage: VM with id [{}] not found", vmId);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!Hypervisor.HypervisorType.KVM.equals(userVm.getHypervisorType())) {
|
||||
logger.debug("allVolumesOnOntapManagedStorage: ONTAP VM snapshot strategy only supports KVM hypervisor, VM [{}] uses [{}]",
|
||||
vmId, userVm.getHypervisorType());
|
||||
return false;
|
||||
}
|
||||
|
||||
// ONTAP VM snapshots work for both Running and Stopped VMs.
|
||||
// Running VMs may be frozen/thawed (if quiesce is requested).
|
||||
// Stopped VMs don't need freeze/thaw - just take the FlexVol snapshot directly.
|
||||
VirtualMachine.State vmState = userVm.getState();
|
||||
if (!VirtualMachine.State.Running.equals(vmState) && !VirtualMachine.State.Stopped.equals(vmState)) {
|
||||
logger.info("allVolumesOnOntapManagedStorage: ONTAP VM snapshot strategy requires VM to be Running or Stopped, VM [{}] is in state [{}], returning false",
|
||||
vmId, vmState);
|
||||
return false;
|
||||
}
|
||||
|
||||
List<VolumeVO> volumes = volumeDao.findByInstance(vmId);
|
||||
if (volumes == null || volumes.isEmpty()) {
|
||||
logger.debug("allVolumesOnOntapManagedStorage: No volumes found for VM [{}]", vmId);
|
||||
return false;
|
||||
}
|
||||
|
||||
for (VolumeVO volume : volumes) {
|
||||
if (volume.getPoolId() == null) {
|
||||
return false;
|
||||
}
|
||||
StoragePoolVO pool = storagePool.findById(volume.getPoolId());
|
||||
if (pool == null) {
|
||||
return false;
|
||||
}
|
||||
if (!pool.isManaged()) {
|
||||
logger.debug("allVolumesOnOntapManagedStorage: Volume [{}] is on non-managed storage pool [{}], not ONTAP",
|
||||
volume.getId(), pool.getName());
|
||||
return false;
|
||||
}
|
||||
if (!OntapStorageConstants.ONTAP_PLUGIN_NAME.equals(pool.getStorageProviderName())) {
|
||||
logger.debug("allVolumesOnOntapManagedStorage: Volume [{}] is on managed pool [{}] with provider [{}], not ONTAP",
|
||||
volume.getId(), pool.getName(), pool.getStorageProviderName());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug("allVolumesOnOntapManagedStorage: All volumes of VM [{}] are on ONTAP managed storage, this strategy can handle", vmId);
|
||||
return true;
|
||||
}
|
||||
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
// Take VM Snapshot (FlexVolume-level)
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Takes a VM-level snapshot by freezing the VM, creating ONTAP FlexVolume-level
|
||||
* snapshots (one per unique FlexVolume), and then thawing the VM.
|
||||
*
|
||||
* <p>Volumes are grouped by their parent FlexVolume UUID (from storage pool details).
|
||||
* For each unique FlexVolume, exactly one ONTAP snapshot is created via
|
||||
* {@code POST /api/storage/volumes/{uuid}/snapshots}. This means if a VM has
|
||||
* ROOT and DATA disks on the same FlexVolume, only one snapshot is created.</p>
|
||||
*
|
||||
* <p><b>Memory Snapshots Not Supported:</b> This strategy only supports disk-only
|
||||
* (crash-consistent) snapshots. Memory snapshots (snapshotmemory=true) are rejected
|
||||
* with a clear error message. This is because ONTAP FlexVolume snapshots capture disk
|
||||
* state only, and allowing mixed snapshot chains (ONTAP disk + libvirt memory) would
|
||||
* cause issues during revert operations.</p>
|
||||
*
|
||||
* @throws CloudRuntimeException if memory snapshot is requested
|
||||
*/
|
||||
@Override
|
||||
public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) {
|
||||
Long hostId = vmSnapshotHelper.pickRunningHost(vmSnapshot.getVmId());
|
||||
UserVm userVm = userVmDao.findById(vmSnapshot.getVmId());
|
||||
VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot;
|
||||
|
||||
// Transition to Creating state FIRST - this is required so that the finally block
|
||||
// can properly transition to Error state via OperationFailed event if anything fails.
|
||||
// (OperationFailed can only transition FROM Creating state, not from Allocated)
|
||||
try {
|
||||
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.CreateRequested);
|
||||
} catch (NoTransitionException e) {
|
||||
throw new CloudRuntimeException(e.getMessage());
|
||||
}
|
||||
|
||||
FreezeThawVMAnswer freezeAnswer = null;
|
||||
FreezeThawVMCommand thawCmd = null;
|
||||
FreezeThawVMAnswer thawAnswer = null;
|
||||
long startFreeze = 0;
|
||||
|
||||
// Track which FlexVolume snapshots were created (for rollback)
|
||||
List<FlexVolSnapshotDetail> createdSnapshots = new ArrayList<>();
|
||||
|
||||
boolean result = false;
|
||||
try {
|
||||
GuestOSVO guestOS = guestOSDao.findById(userVm.getGuestOSId());
|
||||
List<VolumeObjectTO> volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId());
|
||||
|
||||
long prevChainSize = 0;
|
||||
long virtualSize = 0;
|
||||
|
||||
// Build snapshot parent chain
|
||||
VMSnapshotTO current = null;
|
||||
VMSnapshotVO currentSnapshot = vmSnapshotDao.findCurrentSnapshotByVmId(userVm.getId());
|
||||
if (currentSnapshot != null) {
|
||||
current = vmSnapshotHelper.getSnapshotWithParents(currentSnapshot);
|
||||
}
|
||||
|
||||
// Respect the user's quiesce option from the VM snapshot request
|
||||
boolean quiesceVm = true; // default to true for safety
|
||||
VMSnapshotOptions options = vmSnapshotVO.getOptions();
|
||||
if (options != null) {
|
||||
quiesceVm = options.needQuiesceVM();
|
||||
}
|
||||
|
||||
// Check if VM is actually running - freeze/thaw only makes sense for running VMs
|
||||
boolean vmIsRunning = VirtualMachine.State.Running.equals(userVm.getState());
|
||||
boolean shouldFreezeThaw = quiesceVm && vmIsRunning;
|
||||
|
||||
if (!vmIsRunning) {
|
||||
logger.info("takeVMSnapshot: VM [{}] is in state [{}] (not Running). Skipping freeze/thaw - " +
|
||||
"FlexVolume snapshot will be taken directly.", userVm.getInstanceName(), userVm.getState());
|
||||
} else if (quiesceVm) {
|
||||
logger.info("takeVMSnapshot: Quiesce option is enabled for ONTAP VM Snapshot of VM [{}]. " +
|
||||
"VM file systems will be frozen/thawed for application-consistent snapshots.", userVm.getInstanceName());
|
||||
} else {
|
||||
logger.info("takeVMSnapshot: Quiesce option is disabled for ONTAP VM Snapshot of VM [{}]. " +
|
||||
"Snapshots will be crash-consistent only.", userVm.getInstanceName());
|
||||
}
|
||||
|
||||
VMSnapshotTO target = new VMSnapshotTO(vmSnapshot.getId(), vmSnapshot.getName(),
|
||||
vmSnapshot.getType(), null, vmSnapshot.getDescription(), false, current, quiesceVm);
|
||||
|
||||
if (current == null) {
|
||||
vmSnapshotVO.setParent(null);
|
||||
} else {
|
||||
vmSnapshotVO.setParent(current.getId());
|
||||
}
|
||||
|
||||
CreateVMSnapshotCommand ccmd = new CreateVMSnapshotCommand(
|
||||
userVm.getInstanceName(), userVm.getUuid(), target, volumeTOs, guestOS.getDisplayName());
|
||||
|
||||
logger.info("takeVMSnapshot: Creating ONTAP FlexVolume VM Snapshot for VM [{}] with quiesce={}", userVm.getInstanceName(), quiesceVm);
|
||||
|
||||
// Prepare volume info list and calculate sizes
|
||||
for (VolumeObjectTO volumeObjectTO : volumeTOs) {
|
||||
virtualSize += volumeObjectTO.getSize();
|
||||
VolumeVO volumeVO = volumeDao.findById(volumeObjectTO.getId());
|
||||
prevChainSize += volumeVO.getVmSnapshotChainSize() == null ? 0 : volumeVO.getVmSnapshotChainSize();
|
||||
}
|
||||
|
||||
// ── Group volumes by FlexVolume UUID ──
|
||||
Map<String, FlexVolGroupInfo> flexVolGroups = groupVolumesByFlexVol(volumeTOs);
|
||||
|
||||
logger.info("takeVMSnapshot: VM [{}] has {} volumes across {} unique FlexVolume(s)",
|
||||
userVm.getInstanceName(), volumeTOs.size(), flexVolGroups.size());
|
||||
|
||||
// ── Step 1: Freeze the VM (only if quiescing is requested AND VM is running) ──
|
||||
if (shouldFreezeThaw) {
|
||||
FreezeThawVMCommand freezeCommand = new FreezeThawVMCommand(userVm.getInstanceName());
|
||||
freezeCommand.setOption(FreezeThawVMCommand.FREEZE);
|
||||
freezeAnswer = (FreezeThawVMAnswer) agentMgr.send(hostId, freezeCommand);
|
||||
startFreeze = System.nanoTime();
|
||||
|
||||
thawCmd = new FreezeThawVMCommand(userVm.getInstanceName());
|
||||
thawCmd.setOption(FreezeThawVMCommand.THAW);
|
||||
|
||||
if (freezeAnswer == null || !freezeAnswer.getResult()) {
|
||||
String detail = (freezeAnswer != null) ? freezeAnswer.getDetails() : "no response from agent";
|
||||
throw new CloudRuntimeException("Could not freeze VM [" + userVm.getInstanceName() +
|
||||
"] for ONTAP snapshot. Ensure qemu-guest-agent is installed and running. Details: " + detail);
|
||||
}
|
||||
|
||||
logger.info("takeVMSnapshot: VM [{}] frozen successfully via QEMU guest agent", userVm.getInstanceName());
|
||||
} else {
|
||||
logger.info("takeVMSnapshot: Skipping VM freeze for VM [{}] (quiesce={}, vmIsRunning={})",
|
||||
userVm.getInstanceName(), quiesceVm, vmIsRunning);
|
||||
}
|
||||
|
||||
// ── Step 2: Create FlexVolume-level snapshots ──
|
||||
try {
|
||||
String snapshotNameBase = buildSnapshotName(vmSnapshot);
|
||||
|
||||
for (Map.Entry<String, FlexVolGroupInfo> entry : flexVolGroups.entrySet()) {
|
||||
String flexVolUuid = entry.getKey();
|
||||
FlexVolGroupInfo groupInfo = entry.getValue();
|
||||
long startSnapshot = System.nanoTime();
|
||||
|
||||
// Build storage strategy from pool details to get the feign client
|
||||
StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(groupInfo.poolDetails);
|
||||
SnapshotFeignClient snapshotClient = storageStrategy.getSnapshotFeignClient();
|
||||
String authHeader = storageStrategy.getAuthHeader();
|
||||
|
||||
// Use the same snapshot name for all FlexVolumes in this VM snapshot
|
||||
// (each FlexVolume gets its own independent snapshot with this name)
|
||||
FlexVolSnapshot snapshotRequest = new FlexVolSnapshot(snapshotNameBase,
|
||||
"CloudStack VM snapshot " + vmSnapshot.getName() + " for VM " + userVm.getInstanceName());
|
||||
|
||||
logger.info("takeVMSnapshot: Creating ONTAP FlexVolume snapshot [{}] on FlexVol UUID [{}] covering {} volume(s)",
|
||||
snapshotNameBase, flexVolUuid, groupInfo.volumeIds.size());
|
||||
|
||||
JobResponse jobResponse = snapshotClient.createSnapshot(authHeader, flexVolUuid, snapshotRequest);
|
||||
if (jobResponse == null || jobResponse.getJob() == null) {
|
||||
throw new CloudRuntimeException("Failed to initiate FlexVolume snapshot on FlexVol UUID [" + flexVolUuid + "]");
|
||||
}
|
||||
|
||||
// Poll for job completion
|
||||
Boolean jobSucceeded = storageStrategy.jobPollForSuccess(jobResponse.getJob().getUuid(), 30, 2000);
|
||||
if (!jobSucceeded) {
|
||||
throw new CloudRuntimeException("FlexVolume snapshot job failed on FlexVol UUID [" + flexVolUuid + "]");
|
||||
}
|
||||
|
||||
// Retrieve the created snapshot UUID by name
|
||||
String snapshotUuid = resolveSnapshotUuid(snapshotClient, authHeader, flexVolUuid, snapshotNameBase);
|
||||
|
||||
String protocol = groupInfo.poolDetails.get(OntapStorageConstants.PROTOCOL);
|
||||
|
||||
// Create one detail per CloudStack volume in this FlexVol group (for single-file restore during revert)
|
||||
for (Long volumeId : groupInfo.volumeIds) {
|
||||
String volumePath = resolveVolumePathOnOntap(volumeId, protocol, groupInfo.poolDetails);
|
||||
FlexVolSnapshotDetail detail = new FlexVolSnapshotDetail(
|
||||
flexVolUuid, snapshotUuid, snapshotNameBase, volumePath, groupInfo.poolId, protocol);
|
||||
createdSnapshots.add(detail);
|
||||
}
|
||||
|
||||
logger.info("takeVMSnapshot: ONTAP FlexVolume snapshot [{}] (uuid={}) on FlexVol [{}] completed in {} ms. Covers volumes: {}",
|
||||
snapshotNameBase, snapshotUuid, flexVolUuid,
|
||||
TimeUnit.MILLISECONDS.convert(System.nanoTime() - startSnapshot, TimeUnit.NANOSECONDS),
|
||||
groupInfo.volumeIds);
|
||||
}
|
||||
} finally {
|
||||
// ── Step 3: Thaw the VM (only if it was frozen, always even on error) ──
|
||||
if (quiesceVm && freezeAnswer != null && freezeAnswer.getResult()) {
|
||||
try {
|
||||
thawAnswer = (FreezeThawVMAnswer) agentMgr.send(hostId, thawCmd);
|
||||
if (thawAnswer != null && thawAnswer.getResult()) {
|
||||
logger.info("takeVMSnapshot: VM [{}] thawed successfully. Total freeze duration: {} ms",
|
||||
userVm.getInstanceName(),
|
||||
TimeUnit.MILLISECONDS.convert(System.nanoTime() - startFreeze, TimeUnit.NANOSECONDS));
|
||||
} else {
|
||||
logger.warn("takeVMSnapshot: Failed to thaw VM [{}]: {}", userVm.getInstanceName(),
|
||||
(thawAnswer != null) ? thawAnswer.getDetails() : "no response");
|
||||
}
|
||||
} catch (Exception thawEx) {
|
||||
logger.error("takeVMSnapshot: Exception while thawing VM [{}]: {}", userVm.getInstanceName(), thawEx.getMessage(), thawEx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Step 4: Persist FlexVolume snapshot details (one row per CloudStack volume) ──
|
||||
for (FlexVolSnapshotDetail detail : createdSnapshots) {
|
||||
vmSnapshotDetailsDao.persist(new VMSnapshotDetailsVO(
|
||||
vmSnapshot.getId(), OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT, detail.toString(), true));
|
||||
}
|
||||
|
||||
// ── Step 5: Finalize via parent processAnswer ──
|
||||
CreateVMSnapshotAnswer answer = new CreateVMSnapshotAnswer(ccmd, true, "");
|
||||
answer.setVolumeTOs(volumeTOs);
|
||||
|
||||
processAnswer(vmSnapshotVO, userVm, answer, null);
|
||||
logger.info("takeVMSnapshot: ONTAP FlexVolume VM Snapshot [{}] created successfully for VM [{}] ({} FlexVol snapshot(s))",
|
||||
vmSnapshot.getName(), userVm.getInstanceName(), createdSnapshots.size());
|
||||
|
||||
long newChainSize = 0;
|
||||
for (VolumeObjectTO volumeTo : answer.getVolumeTOs()) {
|
||||
publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_CREATE, vmSnapshot, userVm, volumeTo);
|
||||
newChainSize += volumeTo.getSize();
|
||||
}
|
||||
publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_ON_PRIMARY, vmSnapshot, userVm,
|
||||
newChainSize - prevChainSize, virtualSize);
|
||||
|
||||
result = true;
|
||||
return vmSnapshot;
|
||||
|
||||
} catch (OperationTimedoutException e) {
|
||||
logger.error("takeVMSnapshot: ONTAP VM Snapshot [{}] timed out: {}", vmSnapshot.getName(), e.getMessage());
|
||||
throw new CloudRuntimeException("Creating Instance Snapshot: " + vmSnapshot.getName() + " timed out: " + e.getMessage());
|
||||
} catch (AgentUnavailableException e) {
|
||||
logger.error("takeVMSnapshot: ONTAP VM Snapshot [{}] failed, agent unavailable: {}", vmSnapshot.getName(), e.getMessage());
|
||||
throw new CloudRuntimeException("Creating Instance Snapshot: " + vmSnapshot.getName() + " failed: " + e.getMessage());
|
||||
} finally {
|
||||
if (!result) {
|
||||
// Rollback all FlexVolume snapshots created so far (deduplicate by FlexVol+Snapshot)
|
||||
Map<String, Boolean> rolledBack = new HashMap<>();
|
||||
for (FlexVolSnapshotDetail detail : createdSnapshots) {
|
||||
String dedupeKey = detail.flexVolUuid + "::" + detail.snapshotUuid;
|
||||
if (!rolledBack.containsKey(dedupeKey)) {
|
||||
try {
|
||||
rollbackFlexVolSnapshot(detail);
|
||||
rolledBack.put(dedupeKey, Boolean.TRUE);
|
||||
} catch (Exception rollbackEx) {
|
||||
logger.error("takeVMSnapshot: Failed to rollback FlexVol snapshot [{}] on FlexVol [{}]: {}",
|
||||
detail.snapshotUuid, detail.flexVolUuid, rollbackEx.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure VM is thawed if we haven't done so
|
||||
if (thawAnswer == null && freezeAnswer != null && freezeAnswer.getResult()) {
|
||||
try {
|
||||
logger.info("takeVMSnapshot: Thawing VM [{}] during error cleanup", userVm.getInstanceName());
|
||||
thawAnswer = (FreezeThawVMAnswer) agentMgr.send(hostId, thawCmd);
|
||||
} catch (Exception ex) {
|
||||
logger.error("takeVMSnapshot: Could not thaw VM during cleanup: {}", ex.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up VM snapshot details and transition state
|
||||
try {
|
||||
List<VMSnapshotDetailsVO> vmSnapshotDetails = vmSnapshotDetailsDao.listDetails(vmSnapshot.getId());
|
||||
for (VMSnapshotDetailsVO detail : vmSnapshotDetails) {
|
||||
if (OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT.equals(detail.getName())) {
|
||||
vmSnapshotDetailsDao.remove(detail.getId());
|
||||
}
|
||||
}
|
||||
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed);
|
||||
} catch (NoTransitionException e1) {
|
||||
logger.error("takeVMSnapshot: Cannot set VM Snapshot state to OperationFailed: {}", e1.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
// Delete VM Snapshot
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
|
||||
@Override
|
||||
public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) {
|
||||
VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot;
|
||||
UserVm userVm = userVmDao.findById(vmSnapshot.getVmId());
|
||||
|
||||
try {
|
||||
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.ExpungeRequested);
|
||||
} catch (NoTransitionException e) {
|
||||
throw new CloudRuntimeException(e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
List<VolumeObjectTO> volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId());
|
||||
String vmInstanceName = userVm.getInstanceName();
|
||||
VMSnapshotTO parent = vmSnapshotHelper.getSnapshotWithParents(vmSnapshotVO).getParent();
|
||||
|
||||
VMSnapshotTO vmSnapshotTO = new VMSnapshotTO(vmSnapshotVO.getId(), vmSnapshotVO.getName(), vmSnapshotVO.getType(),
|
||||
vmSnapshotVO.getCreated().getTime(), vmSnapshotVO.getDescription(), vmSnapshotVO.getCurrent(), parent, true);
|
||||
GuestOSVO guestOS = guestOSDao.findById(userVm.getGuestOSId());
|
||||
DeleteVMSnapshotCommand deleteSnapshotCommand = new DeleteVMSnapshotCommand(vmInstanceName, vmSnapshotTO,
|
||||
volumeTOs, guestOS.getDisplayName());
|
||||
|
||||
// Check for FlexVolume snapshots (new approach)
|
||||
List<VMSnapshotDetailsVO> flexVolDetails = vmSnapshotDetailsDao.findDetails(vmSnapshot.getId(), OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT);
|
||||
if (CollectionUtils.isNotEmpty(flexVolDetails)) {
|
||||
deleteFlexVolSnapshots(flexVolDetails);
|
||||
}
|
||||
|
||||
// Also handle legacy STORAGE_SNAPSHOT details (backward compatibility)
|
||||
List<VMSnapshotDetailsVO> legacyDetails = vmSnapshotDetailsDao.findDetails(vmSnapshot.getId(), STORAGE_SNAPSHOT);
|
||||
if (CollectionUtils.isNotEmpty(legacyDetails)) {
|
||||
deleteDiskSnapshot(vmSnapshot);
|
||||
}
|
||||
|
||||
processAnswer(vmSnapshotVO, userVm, new DeleteVMSnapshotAnswer(deleteSnapshotCommand, volumeTOs), null);
|
||||
long fullChainSize = 0;
|
||||
for (VolumeObjectTO volumeTo : volumeTOs) {
|
||||
publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_DELETE, vmSnapshot, userVm, volumeTo);
|
||||
fullChainSize += volumeTo.getSize();
|
||||
}
|
||||
publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_OFF_PRIMARY, vmSnapshot, userVm, fullChainSize, 0L);
|
||||
return true;
|
||||
} catch (CloudRuntimeException err) {
|
||||
String errMsg = String.format("Delete of ONTAP VM Snapshot [%s] of VM [%s] failed: %s",
|
||||
vmSnapshot.getName(), userVm.getInstanceName(), err.getMessage());
|
||||
logger.error(errMsg, err);
|
||||
throw new CloudRuntimeException(errMsg, err);
|
||||
}
|
||||
}
|
||||
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
// Revert VM Snapshot
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
|
||||
@Override
|
||||
public boolean revertVMSnapshot(VMSnapshot vmSnapshot) {
|
||||
VMSnapshotVO vmSnapshotVO = (VMSnapshotVO) vmSnapshot;
|
||||
UserVm userVm = userVmDao.findById(vmSnapshot.getVmId());
|
||||
|
||||
try {
|
||||
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.RevertRequested);
|
||||
} catch (NoTransitionException e) {
|
||||
throw new CloudRuntimeException(e.getMessage());
|
||||
}
|
||||
|
||||
boolean result = false;
|
||||
try {
|
||||
List<VolumeObjectTO> volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId());
|
||||
String vmInstanceName = userVm.getInstanceName();
|
||||
VMSnapshotTO parent = vmSnapshotHelper.getSnapshotWithParents(vmSnapshotVO).getParent();
|
||||
|
||||
VMSnapshotTO vmSnapshotTO = new VMSnapshotTO(vmSnapshotVO.getId(), vmSnapshotVO.getName(), vmSnapshotVO.getType(),
|
||||
vmSnapshotVO.getCreated().getTime(), vmSnapshotVO.getDescription(), vmSnapshotVO.getCurrent(), parent, true);
|
||||
GuestOSVO guestOS = guestOSDao.findById(userVm.getGuestOSId());
|
||||
RevertToVMSnapshotCommand revertToSnapshotCommand = new RevertToVMSnapshotCommand(vmInstanceName,
|
||||
userVm.getUuid(), vmSnapshotTO, volumeTOs, guestOS.getDisplayName());
|
||||
|
||||
// Check for FlexVolume snapshots (new approach)
|
||||
List<VMSnapshotDetailsVO> flexVolDetails = vmSnapshotDetailsDao.findDetails(vmSnapshot.getId(), OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT);
|
||||
if (CollectionUtils.isNotEmpty(flexVolDetails)) {
|
||||
revertFlexVolSnapshots(flexVolDetails);
|
||||
}
|
||||
|
||||
// Also handle legacy STORAGE_SNAPSHOT details (backward compatibility)
|
||||
List<VMSnapshotDetailsVO> legacyDetails = vmSnapshotDetailsDao.findDetails(vmSnapshot.getId(), STORAGE_SNAPSHOT);
|
||||
if (CollectionUtils.isNotEmpty(legacyDetails)) {
|
||||
revertDiskSnapshot(vmSnapshot);
|
||||
}
|
||||
|
||||
RevertToVMSnapshotAnswer answer = new RevertToVMSnapshotAnswer(revertToSnapshotCommand, true, "");
|
||||
answer.setVolumeTOs(volumeTOs);
|
||||
processAnswer(vmSnapshotVO, userVm, answer, null);
|
||||
result = true;
|
||||
} catch (CloudRuntimeException e) {
|
||||
logger.error("revertVMSnapshot: Revert ONTAP VM Snapshot [{}] failed: {}", vmSnapshot.getName(), e.getMessage(), e);
|
||||
throw new CloudRuntimeException("Revert ONTAP VM Snapshot ["+ vmSnapshot.getName() +"] failed.");
|
||||
} finally {
|
||||
if (!result) {
|
||||
try {
|
||||
vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed);
|
||||
} catch (NoTransitionException e1) {
|
||||
logger.error("Cannot set Instance Snapshot state due to: " + e1.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
// FlexVolume Snapshot Helpers
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Groups volumes by their parent FlexVolume UUID using storage pool details.
|
||||
*
|
||||
* @param volumeTOs list of volume transfer objects
|
||||
* @return map of FlexVolume UUID → group info (pool details, pool ID, volume IDs)
|
||||
*/
|
||||
Map<String, FlexVolGroupInfo> groupVolumesByFlexVol(List<VolumeObjectTO> volumeTOs) {
|
||||
Map<String, FlexVolGroupInfo> groups = new HashMap<>();
|
||||
|
||||
for (VolumeObjectTO volumeTO : volumeTOs) {
|
||||
VolumeVO volumeVO = volumeDao.findById(volumeTO.getId());
|
||||
if (volumeVO == null || volumeVO.getPoolId() == null) {
|
||||
throw new CloudRuntimeException("Volume [" + volumeTO.getId() + "] not found or has no pool assigned");
|
||||
}
|
||||
|
||||
Map<String, String> poolDetails = storagePoolDetailsDao.listDetailsKeyPairs(volumeVO.getPoolId());
|
||||
String flexVolUuid = poolDetails.get(OntapStorageConstants.VOLUME_UUID);
|
||||
if (flexVolUuid == null || flexVolUuid.isEmpty()) {
|
||||
throw new CloudRuntimeException("FlexVolume UUID not found in pool details for pool [" + volumeVO.getPoolId() + "]");
|
||||
}
|
||||
|
||||
FlexVolGroupInfo group = groups.get(flexVolUuid);
|
||||
if (group == null) {
|
||||
group = new FlexVolGroupInfo(poolDetails, volumeVO.getPoolId());
|
||||
groups.put(flexVolUuid, group);
|
||||
}
|
||||
group.volumeIds.add(volumeVO.getId());
|
||||
}
|
||||
|
||||
return groups;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a deterministic, ONTAP-safe snapshot name for a VM snapshot.
|
||||
* Format: {@code vmsnap_<vmSnapshotId>_<timestamp>}
|
||||
*/
|
||||
String buildSnapshotName(VMSnapshot vmSnapshot) {
|
||||
String name = "vmsnap_" + vmSnapshot.getId() + "_" + System.currentTimeMillis();
|
||||
// ONTAP snapshot names: max 256 chars, must start with letter, only alphanumeric and underscores
|
||||
if (name.length() > OntapStorageConstants.MAX_SNAPSHOT_NAME_LENGTH) {
|
||||
name = name.substring(0, OntapStorageConstants.MAX_SNAPSHOT_NAME_LENGTH);
|
||||
}
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves the UUID of a newly created FlexVolume snapshot by name.
|
||||
*/
|
||||
String resolveSnapshotUuid(SnapshotFeignClient client, String authHeader,
|
||||
String flexVolUuid, String snapshotName) {
|
||||
Map<String, Object> queryParams = new HashMap<>();
|
||||
queryParams.put("name", snapshotName);
|
||||
OntapResponse<FlexVolSnapshot> response = client.getSnapshots(authHeader, flexVolUuid, queryParams);
|
||||
if (response == null || response.getRecords() == null || response.getRecords().isEmpty()) {
|
||||
throw new CloudRuntimeException("Could not find FlexVolume snapshot [" + snapshotName +
|
||||
"] on FlexVol [" + flexVolUuid + "] after creation");
|
||||
}
|
||||
return response.getRecords().get(0).getUuid();
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves the ONTAP-side path of a CloudStack volume within its FlexVolume.
|
||||
*
|
||||
* <ul>
|
||||
* <li>For NFS volumes the path is the filename (e.g. {@code uuid.qcow2})
|
||||
* retrieved via {@link VolumeVO#getPath()}.</li>
|
||||
* <li>For iSCSI volumes the path is the LUN name within the FlexVolume
|
||||
* (e.g. {@code /vol/vol1/lun_name}) stored in volume_details.</li>
|
||||
* </ul>
|
||||
*
|
||||
* @param volumeId the CloudStack volume ID
|
||||
* @param protocol the storage protocol (e.g. "NFS3", "ISCSI")
|
||||
* @param poolDetails storage pool detail map (used for fall-back lookups)
|
||||
* @return the volume path relative to the FlexVolume root
|
||||
*/
|
||||
String resolveVolumePathOnOntap(Long volumeId, String protocol, Map<String, String> poolDetails) {
|
||||
if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) {
|
||||
// iSCSI – the LUN's ONTAP name is stored as a volume detail
|
||||
VolumeDetailVO lunDetail = volumeDetailsDao.findDetail(volumeId, OntapStorageConstants.LUN_DOT_NAME);
|
||||
if (lunDetail == null || lunDetail.getValue() == null || lunDetail.getValue().isEmpty()) {
|
||||
throw new CloudRuntimeException(
|
||||
"LUN name (volume detail '" + OntapStorageConstants.LUN_DOT_NAME + "') not found for iSCSI volume [" + volumeId + "]");
|
||||
}
|
||||
return lunDetail.getValue();
|
||||
} else {
|
||||
// NFS – volumeVO.getPath() holds the file path (e.g. "uuid.qcow2")
|
||||
VolumeVO vol = volumeDao.findById(volumeId);
|
||||
if (vol == null || vol.getPath() == null || vol.getPath().isEmpty()) {
|
||||
throw new CloudRuntimeException("Volume path not found for NFS volume [" + volumeId + "]");
|
||||
}
|
||||
return vol.getPath();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Rolls back (deletes) a FlexVolume snapshot that was created during a failed takeVMSnapshot.
|
||||
*/
|
||||
void rollbackFlexVolSnapshot(FlexVolSnapshotDetail detail) {
|
||||
try {
|
||||
Map<String, String> poolDetails = storagePoolDetailsDao.listDetailsKeyPairs(detail.poolId);
|
||||
StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(poolDetails);
|
||||
SnapshotFeignClient client = storageStrategy.getSnapshotFeignClient();
|
||||
String authHeader = storageStrategy.getAuthHeader();
|
||||
|
||||
logger.info("rollbackFlexVolSnapshot: Rolling back FlexVol snapshot [{}] (uuid={}) on FlexVol [{}]",
|
||||
detail.snapshotName, detail.snapshotUuid, detail.flexVolUuid);
|
||||
|
||||
JobResponse jobResponse = client.deleteSnapshot(authHeader, detail.flexVolUuid, detail.snapshotUuid);
|
||||
if (jobResponse != null && jobResponse.getJob() != null) {
|
||||
storageStrategy.jobPollForSuccess(jobResponse.getJob().getUuid(), 10, 2000);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("rollbackFlexVolSnapshot: Rollback of FlexVol snapshot failed: {}", e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes all FlexVolume snapshots associated with a VM snapshot.
|
||||
*
|
||||
* <p>Since there is one detail row per CloudStack volume, multiple rows may reference
|
||||
* the same FlexVol + snapshot combination. This method deduplicates to delete each
|
||||
* underlying ONTAP snapshot only once.</p>
|
||||
*/
|
||||
void deleteFlexVolSnapshots(List<VMSnapshotDetailsVO> flexVolDetails) {
|
||||
// Track which FlexVol+Snapshot pairs have already been deleted
|
||||
Map<String, Boolean> deletedSnapshots = new HashMap<>();
|
||||
|
||||
for (VMSnapshotDetailsVO detailVO : flexVolDetails) {
|
||||
FlexVolSnapshotDetail detail = FlexVolSnapshotDetail.parse(detailVO.getValue());
|
||||
String dedupeKey = detail.flexVolUuid + "::" + detail.snapshotUuid;
|
||||
|
||||
// Only delete the ONTAP snapshot once per FlexVol+Snapshot pair
|
||||
if (!deletedSnapshots.containsKey(dedupeKey)) {
|
||||
Map<String, String> poolDetails = storagePoolDetailsDao.listDetailsKeyPairs(detail.poolId);
|
||||
StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(poolDetails);
|
||||
SnapshotFeignClient client = storageStrategy.getSnapshotFeignClient();
|
||||
String authHeader = storageStrategy.getAuthHeader();
|
||||
|
||||
logger.info("deleteFlexVolSnapshots: Deleting ONTAP FlexVol snapshot [{}] (uuid={}) on FlexVol [{}]",
|
||||
detail.snapshotName, detail.snapshotUuid, detail.flexVolUuid);
|
||||
|
||||
JobResponse jobResponse = client.deleteSnapshot(authHeader, detail.flexVolUuid, detail.snapshotUuid);
|
||||
if (jobResponse != null && jobResponse.getJob() != null) {
|
||||
storageStrategy.jobPollForSuccess(jobResponse.getJob().getUuid(), 30, 2000);
|
||||
}
|
||||
|
||||
deletedSnapshots.put(dedupeKey, Boolean.TRUE);
|
||||
logger.info("deleteFlexVolSnapshots: Deleted ONTAP FlexVol snapshot [{}] on FlexVol [{}]", detail.snapshotName, detail.flexVolUuid);
|
||||
}
|
||||
|
||||
// Always remove the DB detail row
|
||||
vmSnapshotDetailsDao.remove(detailVO.getId());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Reverts all volumes of a VM snapshot using ONTAP CLI-based Snapshot File Restore.
|
||||
*
|
||||
* <p>Instead of restoring the entire FlexVolume to a snapshot (which would affect
|
||||
* other VMs/files on the same FlexVol), this method restores <b>only the individual
|
||||
* files or LUNs</b> belonging to this VM using the dedicated ONTAP CLI snapshot file
|
||||
* restore API:</p>
|
||||
*
|
||||
* <p>{@code POST /api/private/cli/volume/snapshot/restore-file}</p>
|
||||
*
|
||||
* <p>For each persisted detail row (one per CloudStack volume):</p>
|
||||
* <ul>
|
||||
* <li><b>NFS</b>: restores {@code <filename>} from the snapshot to the live volume</li>
|
||||
* <li><b>iSCSI</b>: restores {@code <lunPath>} from the snapshot to the live volume</li>
|
||||
* </ul>
|
||||
*/
|
||||
void revertFlexVolSnapshots(List<VMSnapshotDetailsVO> flexVolDetails) {
|
||||
for (VMSnapshotDetailsVO detailVO : flexVolDetails) {
|
||||
FlexVolSnapshotDetail detail = FlexVolSnapshotDetail.parse(detailVO.getValue());
|
||||
|
||||
if (detail.volumePath == null || detail.volumePath.isEmpty()) {
|
||||
// Legacy detail row without volumePath – cannot do single-file restore
|
||||
logger.warn("revertFlexVolSnapshots: FlexVol snapshot detail for FlexVol [{}] has no volumePath (legacy format). " +
|
||||
"Skipping single-file restore for this entry.", detail.flexVolUuid);
|
||||
continue;
|
||||
}
|
||||
|
||||
Map<String, String> poolDetails = storagePoolDetailsDao.listDetailsKeyPairs(detail.poolId);
|
||||
StorageStrategy storageStrategy = OntapStorageUtils.getStrategyByStoragePoolDetails(poolDetails);
|
||||
SnapshotFeignClient snapshotClient = storageStrategy.getSnapshotFeignClient();
|
||||
String authHeader = storageStrategy.getAuthHeader();
|
||||
|
||||
// Get SVM name and FlexVolume name from pool details
|
||||
String svmName = poolDetails.get(OntapStorageConstants.SVM_NAME);
|
||||
String flexVolName = poolDetails.get(OntapStorageConstants.VOLUME_NAME);
|
||||
|
||||
if (svmName == null || svmName.isEmpty()) {
|
||||
throw new CloudRuntimeException("SVM name not found in pool details for pool [" + detail.poolId + "]");
|
||||
}
|
||||
if (flexVolName == null || flexVolName.isEmpty()) {
|
||||
throw new CloudRuntimeException("FlexVolume name not found in pool details for pool [" + detail.poolId + "]");
|
||||
}
|
||||
|
||||
// The path must start with "/" for the ONTAP CLI API
|
||||
String ontapFilePath = detail.volumePath.startsWith("/") ? detail.volumePath : "/" + detail.volumePath;
|
||||
|
||||
logger.info("revertFlexVolSnapshots: Restoring volume [{}] from FlexVol snapshot [{}] on FlexVol [{}] (protocol={})",
|
||||
ontapFilePath, detail.snapshotName, flexVolName, detail.protocol);
|
||||
|
||||
// Use CLI-based restore API: POST /api/private/cli/volume/snapshot/restore-file
|
||||
CliSnapshotRestoreRequest restoreRequest = new CliSnapshotRestoreRequest(
|
||||
svmName, flexVolName, detail.snapshotName, ontapFilePath);
|
||||
|
||||
JobResponse jobResponse = snapshotClient.restoreFileFromSnapshotCli(authHeader, restoreRequest);
|
||||
|
||||
if (jobResponse != null && jobResponse.getJob() != null) {
|
||||
Boolean success = storageStrategy.jobPollForSuccess(jobResponse.getJob().getUuid(), 60, 2000);
|
||||
if (!success) {
|
||||
throw new CloudRuntimeException("Snapshot file restore failed for volume path [" +
|
||||
ontapFilePath + "] from snapshot [" + detail.snapshotName +
|
||||
"] on FlexVol [" + flexVolName + "]");
|
||||
}
|
||||
}
|
||||
|
||||
logger.info("revertFlexVolSnapshots: Successfully restored volume [{}] from snapshot [{}] on FlexVol [{}]",
|
||||
ontapFilePath, detail.snapshotName, flexVolName);
|
||||
}
|
||||
}
|
||||
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
// Inner classes for grouping & detail tracking
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Groups information about volumes that share the same FlexVolume.
|
||||
*/
|
||||
static class FlexVolGroupInfo {
|
||||
final Map<String, String> poolDetails;
|
||||
final long poolId;
|
||||
final List<Long> volumeIds = new ArrayList<>();
|
||||
|
||||
FlexVolGroupInfo(Map<String, String> poolDetails, long poolId) {
|
||||
this.poolDetails = poolDetails;
|
||||
this.poolId = poolId;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Holds the metadata for a single volume's FlexVolume snapshot entry (used during create and for
|
||||
* serialization/deserialization to/from vm_snapshot_details).
|
||||
*
|
||||
* <p>One row is persisted per CloudStack volume. Multiple volumes may share the same
|
||||
* FlexVol snapshot (if they reside on the same FlexVolume).</p>
|
||||
*
|
||||
* <p>Serialized format: {@code "<flexVolUuid>::<snapshotUuid>::<snapshotName>::<volumePath>::<poolId>::<protocol>"}</p>
|
||||
*/
|
||||
static class FlexVolSnapshotDetail {
|
||||
final String flexVolUuid;
|
||||
final String snapshotUuid;
|
||||
final String snapshotName;
|
||||
/** The ONTAP-side path of the file or LUN within the FlexVolume (e.g. "uuid.qcow2" for NFS, "/vol/vol1/lun1" for iSCSI). */
|
||||
final String volumePath;
|
||||
final long poolId;
|
||||
/** Storage protocol: NFS3, ISCSI, etc. */
|
||||
final String protocol;
|
||||
|
||||
FlexVolSnapshotDetail(String flexVolUuid, String snapshotUuid, String snapshotName,
|
||||
String volumePath, long poolId, String protocol) {
|
||||
this.flexVolUuid = flexVolUuid;
|
||||
this.snapshotUuid = snapshotUuid;
|
||||
this.snapshotName = snapshotName;
|
||||
this.volumePath = volumePath;
|
||||
this.poolId = poolId;
|
||||
this.protocol = protocol;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a vm_snapshot_details value string back into a FlexVolSnapshotDetail.
|
||||
*/
|
||||
static FlexVolSnapshotDetail parse(String value) {
|
||||
String[] parts = value.split(DETAIL_SEPARATOR);
|
||||
if (parts.length == 4) {
|
||||
// Legacy format without volumePath and protocol: flexVolUuid::snapshotUuid::snapshotName::poolId
|
||||
return new FlexVolSnapshotDetail(parts[0], parts[1], parts[2], null, Long.parseLong(parts[3]), null);
|
||||
}
|
||||
if (parts.length != 6) {
|
||||
throw new CloudRuntimeException("Invalid ONTAP FlexVol snapshot detail format: " + value);
|
||||
}
|
||||
return new FlexVolSnapshotDetail(parts[0], parts[1], parts[2], parts[3], Long.parseLong(parts[4]), parts[5]);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return flexVolUuid + DETAIL_SEPARATOR + snapshotUuid + DETAIL_SEPARATOR + snapshotName +
|
||||
DETAIL_SEPARATOR + volumePath + DETAIL_SEPARATOR + poolId + DETAIL_SEPARATOR + protocol;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -30,4 +30,7 @@
|
|||
<bean id="ontapPrimaryDataStoreProvider"
|
||||
class="org.apache.cloudstack.storage.provider.OntapPrimaryDatastoreProvider" />
|
||||
|
||||
<bean id="ontapVMSnapshotStrategy"
|
||||
class="org.apache.cloudstack.storage.vmsnapshot.OntapVMSnapshotStrategy" />
|
||||
|
||||
</beans>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,567 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.driver;
|
||||
|
||||
import com.cloud.exception.InvalidParameterValueException;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.storage.ScopeType;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.VolumeDetailVO;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.storage.dao.VolumeDetailsDao;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
import org.apache.cloudstack.storage.command.CommandResult;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.feign.model.Igroup;
|
||||
import org.apache.cloudstack.storage.feign.model.Lun;
|
||||
import org.apache.cloudstack.storage.service.UnifiedSANStrategy;
|
||||
import org.apache.cloudstack.storage.service.model.AccessGroup;
|
||||
import org.apache.cloudstack.storage.service.model.CloudStackVolume;
|
||||
import org.apache.cloudstack.storage.service.model.ProtocolType;
|
||||
import org.apache.cloudstack.storage.utils.OntapStorageConstants;
|
||||
import org.apache.cloudstack.storage.utils.OntapStorageUtils;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockedStatic;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static com.cloud.agent.api.to.DataObjectType.VOLUME;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||
import static org.junit.jupiter.api.Assertions.assertNull;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.ArgumentMatchers.argThat;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.doNothing;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.mockStatic;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
class OntapPrimaryDatastoreDriverTest {
|
||||
|
||||
@Mock
|
||||
private StoragePoolDetailsDao storagePoolDetailsDao;
|
||||
|
||||
@Mock
|
||||
private PrimaryDataStoreDao storagePoolDao;
|
||||
|
||||
@Mock
|
||||
private VolumeDao volumeDao;
|
||||
|
||||
@Mock
|
||||
private VolumeDetailsDao volumeDetailsDao;
|
||||
|
||||
@Mock
|
||||
private DataStore dataStore;
|
||||
|
||||
@Mock
|
||||
private VolumeInfo volumeInfo;
|
||||
|
||||
@Mock
|
||||
private StoragePoolVO storagePool;
|
||||
|
||||
@Mock
|
||||
private VolumeVO volumeVO;
|
||||
|
||||
@Mock
|
||||
private Host host;
|
||||
|
||||
@Mock
|
||||
private UnifiedSANStrategy sanStrategy;
|
||||
|
||||
@Mock
|
||||
private AsyncCompletionCallback<CreateCmdResult> createCallback;
|
||||
|
||||
@Mock
|
||||
private AsyncCompletionCallback<CommandResult> commandCallback;
|
||||
|
||||
@InjectMocks
|
||||
private OntapPrimaryDatastoreDriver driver;
|
||||
|
||||
private Map<String, String> storagePoolDetails;
|
||||
|
||||
@BeforeEach
|
||||
void setUp() {
|
||||
storagePoolDetails = new HashMap<>();
|
||||
storagePoolDetails.put(OntapStorageConstants.PROTOCOL, ProtocolType.ISCSI.name());
|
||||
storagePoolDetails.put(OntapStorageConstants.SVM_NAME, "svm1");
|
||||
}
|
||||
|
||||
@Test
|
||||
void testGetCapabilities() {
|
||||
Map<String, String> capabilities = driver.getCapabilities();
|
||||
|
||||
assertNotNull(capabilities);
|
||||
// With SIS clone approach, driver advertises storage system snapshot capability
|
||||
// so StorageSystemSnapshotStrategy handles snapshot backup to secondary storage
|
||||
assertEquals(Boolean.TRUE.toString(), capabilities.get("STORAGE_SYSTEM_SNAPSHOT"));
|
||||
assertEquals(Boolean.TRUE.toString(), capabilities.get("CAN_CREATE_VOLUME_FROM_SNAPSHOT"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCreateAsync_NullDataObject_ThrowsException() {
|
||||
assertThrows(InvalidParameterValueException.class,
|
||||
() -> driver.createAsync(dataStore, null, createCallback));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCreateAsync_NullDataStore_ThrowsException() {
|
||||
assertThrows(InvalidParameterValueException.class,
|
||||
() -> driver.createAsync(null, volumeInfo, createCallback));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCreateAsync_NullCallback_ThrowsException() {
|
||||
assertThrows(InvalidParameterValueException.class,
|
||||
() -> driver.createAsync(dataStore, volumeInfo, null));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCreateAsync_VolumeWithISCSI_Success() {
|
||||
// Setup
|
||||
when(dataStore.getId()).thenReturn(1L);
|
||||
when(dataStore.getName()).thenReturn("ontap-pool");
|
||||
when(volumeInfo.getType()).thenReturn(VOLUME);
|
||||
when(volumeInfo.getId()).thenReturn(100L);
|
||||
when(volumeInfo.getName()).thenReturn("test-volume");
|
||||
|
||||
when(storagePoolDao.findById(1L)).thenReturn(storagePool);
|
||||
when(storagePool.getId()).thenReturn(1L);
|
||||
when(storagePool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
|
||||
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails);
|
||||
when(volumeDao.findById(100L)).thenReturn(volumeVO);
|
||||
when(volumeVO.getId()).thenReturn(100L);
|
||||
|
||||
Lun mockLun = new Lun();
|
||||
mockLun.setName("/vol/vol1/lun1");
|
||||
mockLun.setUuid("lun-uuid-123");
|
||||
// Create request volume (returned by Utility.createCloudStackVolumeRequestByProtocol)
|
||||
CloudStackVolume requestVolume = new CloudStackVolume();
|
||||
requestVolume.setLun(mockLun);
|
||||
// Create response volume (returned by sanStrategy.createCloudStackVolume)
|
||||
CloudStackVolume responseVolume = new CloudStackVolume();
|
||||
responseVolume.setLun(mockLun);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any()))
|
||||
.thenReturn(sanStrategy);
|
||||
utilityMock.when(() -> OntapStorageUtils.createCloudStackVolumeRequestByProtocol(
|
||||
any(), any(), any())).thenReturn(requestVolume);
|
||||
when(sanStrategy.createCloudStackVolume(any())).thenReturn(responseVolume);
|
||||
|
||||
// Execute
|
||||
driver.createAsync(dataStore, volumeInfo, createCallback);
|
||||
|
||||
// Verify
|
||||
ArgumentCaptor<CreateCmdResult> resultCaptor = ArgumentCaptor.forClass(CreateCmdResult.class);
|
||||
verify(createCallback).complete(resultCaptor.capture());
|
||||
|
||||
CreateCmdResult result = resultCaptor.getValue();
|
||||
assertNotNull(result);
|
||||
assertTrue(result.isSuccess());
|
||||
|
||||
verify(volumeDetailsDao).addDetail(eq(100L), eq(OntapStorageConstants.LUN_DOT_UUID), eq("lun-uuid-123"), eq(false));
|
||||
verify(volumeDetailsDao).addDetail(eq(100L), eq(OntapStorageConstants.LUN_DOT_NAME), eq("/vol/vol1/lun1"), eq(false));
|
||||
verify(volumeDao).update(eq(100L), any(VolumeVO.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCreateAsync_VolumeWithNFS_Success() {
|
||||
// Setup
|
||||
storagePoolDetails.put(OntapStorageConstants.PROTOCOL, ProtocolType.NFS3.name());
|
||||
|
||||
when(dataStore.getId()).thenReturn(1L);
|
||||
when(dataStore.getName()).thenReturn("ontap-pool");
|
||||
when(volumeInfo.getType()).thenReturn(VOLUME);
|
||||
when(volumeInfo.getId()).thenReturn(100L);
|
||||
when(volumeInfo.getName()).thenReturn("test-volume");
|
||||
|
||||
when(storagePoolDao.findById(1L)).thenReturn(storagePool);
|
||||
when(storagePool.getId()).thenReturn(1L);
|
||||
when(storagePool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails);
|
||||
when(volumeDao.findById(100L)).thenReturn(volumeVO);
|
||||
when(volumeVO.getId()).thenReturn(100L);
|
||||
|
||||
CloudStackVolume mockCloudStackVolume = new CloudStackVolume();
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(storagePoolDetails))
|
||||
.thenReturn(sanStrategy);
|
||||
utilityMock.when(() -> OntapStorageUtils.createCloudStackVolumeRequestByProtocol(
|
||||
any(), any(), any())).thenReturn(mockCloudStackVolume);
|
||||
|
||||
when(sanStrategy.createCloudStackVolume(any())).thenReturn(mockCloudStackVolume);
|
||||
|
||||
// Execute
|
||||
driver.createAsync(dataStore, volumeInfo, createCallback);
|
||||
|
||||
// Verify
|
||||
ArgumentCaptor<CreateCmdResult> resultCaptor = ArgumentCaptor.forClass(CreateCmdResult.class);
|
||||
verify(createCallback).complete(resultCaptor.capture());
|
||||
|
||||
CreateCmdResult result = resultCaptor.getValue();
|
||||
assertNotNull(result);
|
||||
assertTrue(result.isSuccess());
|
||||
verify(volumeDao).update(eq(100L), any(VolumeVO.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testDeleteAsync_NullStore_ThrowsException() {
|
||||
ArgumentCaptor<CommandResult> resultCaptor = ArgumentCaptor.forClass(CommandResult.class);
|
||||
|
||||
driver.deleteAsync(null, volumeInfo, commandCallback);
|
||||
|
||||
verify(commandCallback).complete(resultCaptor.capture());
|
||||
CommandResult result = resultCaptor.getValue();
|
||||
assertFalse(result.isSuccess());
|
||||
assertTrue(result.getResult().contains("store or data is null"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testDeleteAsync_ISCSIVolume_Success() {
|
||||
// Setup
|
||||
when(dataStore.getId()).thenReturn(1L);
|
||||
when(volumeInfo.getType()).thenReturn(VOLUME);
|
||||
when(volumeInfo.getId()).thenReturn(100L);
|
||||
|
||||
when(storagePoolDao.findById(1L)).thenReturn(storagePool);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails);
|
||||
|
||||
VolumeDetailVO lunNameDetail = new VolumeDetailVO(100L, OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1", false);
|
||||
VolumeDetailVO lunUuidDetail = new VolumeDetailVO(100L, OntapStorageConstants.LUN_DOT_UUID, "lun-uuid-123", false);
|
||||
|
||||
when(volumeDetailsDao.findDetail(100L, OntapStorageConstants.LUN_DOT_NAME)).thenReturn(lunNameDetail);
|
||||
when(volumeDetailsDao.findDetail(100L, OntapStorageConstants.LUN_DOT_UUID)).thenReturn(lunUuidDetail);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(storagePoolDetails))
|
||||
.thenReturn(sanStrategy);
|
||||
|
||||
doNothing().when(sanStrategy).deleteCloudStackVolume(any());
|
||||
|
||||
// Execute
|
||||
driver.deleteAsync(dataStore, volumeInfo, commandCallback);
|
||||
|
||||
// Verify
|
||||
ArgumentCaptor<CommandResult> resultCaptor = ArgumentCaptor.forClass(CommandResult.class);
|
||||
verify(commandCallback).complete(resultCaptor.capture());
|
||||
|
||||
CommandResult result = resultCaptor.getValue();
|
||||
assertNotNull(result);
|
||||
assertTrue(result.isSuccess());
|
||||
verify(sanStrategy).deleteCloudStackVolume(any(CloudStackVolume.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testDeleteAsync_NFSVolume_Success() {
|
||||
// Setup
|
||||
storagePoolDetails.put(OntapStorageConstants.PROTOCOL, ProtocolType.NFS3.name());
|
||||
|
||||
when(dataStore.getId()).thenReturn(1L);
|
||||
when(volumeInfo.getType()).thenReturn(VOLUME);
|
||||
|
||||
when(storagePoolDao.findById(1L)).thenReturn(storagePool);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails);
|
||||
|
||||
// Execute
|
||||
driver.deleteAsync(dataStore, volumeInfo, commandCallback);
|
||||
|
||||
// Verify
|
||||
ArgumentCaptor<CommandResult> resultCaptor = ArgumentCaptor.forClass(CommandResult.class);
|
||||
verify(commandCallback).complete(resultCaptor.capture());
|
||||
|
||||
CommandResult result = resultCaptor.getValue();
|
||||
assertNotNull(result);
|
||||
// NFS deletion doesn't fail, handled by hypervisor
|
||||
}
|
||||
|
||||
@Test
|
||||
void testGrantAccess_NullParameters_ThrowsException() {
|
||||
assertThrows(CloudRuntimeException.class,
|
||||
() -> driver.grantAccess(null, host, dataStore));
|
||||
|
||||
assertThrows(CloudRuntimeException.class,
|
||||
() -> driver.grantAccess(volumeInfo, null, dataStore));
|
||||
|
||||
assertThrows(CloudRuntimeException.class,
|
||||
() -> driver.grantAccess(volumeInfo, host, null));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testGrantAccess_ClusterScope_Success() {
|
||||
// Setup
|
||||
when(dataStore.getId()).thenReturn(1L);
|
||||
when(volumeInfo.getType()).thenReturn(VOLUME);
|
||||
when(volumeInfo.getId()).thenReturn(100L);
|
||||
|
||||
when(storagePoolDao.findById(1L)).thenReturn(storagePool);
|
||||
when(storagePool.getId()).thenReturn(1L);
|
||||
when(storagePool.getScope()).thenReturn(ScopeType.CLUSTER);
|
||||
when(storagePool.getPath()).thenReturn("iqn.1992-08.com.netapp:sn.123456");
|
||||
when(storagePool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
|
||||
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails);
|
||||
when(volumeDao.findById(100L)).thenReturn(volumeVO);
|
||||
when(volumeVO.getId()).thenReturn(100L);
|
||||
|
||||
when(host.getName()).thenReturn("host1");
|
||||
|
||||
VolumeDetailVO lunNameDetail = new VolumeDetailVO(100L, OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1", false);
|
||||
when(volumeDetailsDao.findDetail(100L, OntapStorageConstants.LUN_DOT_NAME)).thenReturn(lunNameDetail);
|
||||
|
||||
// Mock AccessGroup with existing igroup
|
||||
AccessGroup existingAccessGroup = new AccessGroup();
|
||||
Igroup existingIgroup = new Igroup();
|
||||
existingIgroup.setName("igroup1");
|
||||
existingAccessGroup.setIgroup(existingIgroup);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(storagePoolDetails))
|
||||
.thenReturn(sanStrategy);
|
||||
utilityMock.when(() -> OntapStorageUtils.getIgroupName(anyString(), anyString()))
|
||||
.thenReturn("igroup1");
|
||||
|
||||
when(sanStrategy.getAccessGroup(any())).thenReturn(existingAccessGroup);
|
||||
when(sanStrategy.ensureLunMapped(anyString(), anyString(), anyString())).thenReturn("0");
|
||||
|
||||
// Execute
|
||||
boolean result = driver.grantAccess(volumeInfo, host, dataStore);
|
||||
|
||||
// Verify
|
||||
assertTrue(result);
|
||||
verify(volumeDao).update(eq(100L), any(VolumeVO.class));
|
||||
verify(sanStrategy).getAccessGroup(any());
|
||||
verify(sanStrategy).ensureLunMapped(anyString(), anyString(), anyString());
|
||||
verify(sanStrategy, never()).validateInitiatorInAccessGroup(anyString(), anyString(), any(Igroup.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testGrantAccess_IgroupNotFound_CreatesNewIgroup() {
|
||||
// Setup - use HostVO mock since production code casts Host to HostVO
|
||||
HostVO hostVO = mock(HostVO.class);
|
||||
when(hostVO.getName()).thenReturn("host1");
|
||||
|
||||
when(dataStore.getId()).thenReturn(1L);
|
||||
when(volumeInfo.getType()).thenReturn(VOLUME);
|
||||
when(volumeInfo.getId()).thenReturn(100L);
|
||||
|
||||
when(storagePoolDao.findById(1L)).thenReturn(storagePool);
|
||||
when(storagePool.getId()).thenReturn(1L);
|
||||
when(storagePool.getScope()).thenReturn(ScopeType.CLUSTER);
|
||||
when(storagePool.getPath()).thenReturn("iqn.1992-08.com.netapp:sn.123456");
|
||||
when(storagePool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
|
||||
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails);
|
||||
when(volumeDao.findById(100L)).thenReturn(volumeVO);
|
||||
when(volumeVO.getId()).thenReturn(100L);
|
||||
|
||||
VolumeDetailVO lunNameDetail = new VolumeDetailVO(100L, OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1", false);
|
||||
when(volumeDetailsDao.findDetail(100L, OntapStorageConstants.LUN_DOT_NAME)).thenReturn(lunNameDetail);
|
||||
|
||||
// Mock getAccessGroup returning null (igroup doesn't exist)
|
||||
AccessGroup createdAccessGroup = new AccessGroup();
|
||||
Igroup createdIgroup = new Igroup();
|
||||
createdIgroup.setName("igroup1");
|
||||
createdAccessGroup.setIgroup(createdIgroup);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(storagePoolDetails))
|
||||
.thenReturn(sanStrategy);
|
||||
utilityMock.when(() -> OntapStorageUtils.getIgroupName(anyString(), anyString()))
|
||||
.thenReturn("igroup1");
|
||||
|
||||
when(sanStrategy.getAccessGroup(any())).thenReturn(null);
|
||||
when(sanStrategy.createAccessGroup(any())).thenReturn(createdAccessGroup);
|
||||
when(sanStrategy.ensureLunMapped(anyString(), anyString(), anyString())).thenReturn("0");
|
||||
|
||||
// Execute
|
||||
boolean result = driver.grantAccess(volumeInfo, hostVO, dataStore);
|
||||
|
||||
// Verify
|
||||
assertTrue(result);
|
||||
verify(sanStrategy).getAccessGroup(any());
|
||||
verify(sanStrategy).createAccessGroup(any());
|
||||
verify(sanStrategy).ensureLunMapped(anyString(), anyString(), anyString());
|
||||
verify(volumeDao).update(eq(100L), any(VolumeVO.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testRevokeAccess_NFSVolume_SkipsRevoke() {
|
||||
// Setup - NFS volumes have no LUN mapping, so revokeAccess is a no-op
|
||||
when(dataStore.getId()).thenReturn(1L);
|
||||
when(volumeInfo.getType()).thenReturn(VOLUME);
|
||||
when(volumeInfo.getId()).thenReturn(100L);
|
||||
|
||||
when(volumeDao.findById(100L)).thenReturn(volumeVO);
|
||||
when(volumeVO.getId()).thenReturn(100L);
|
||||
when(volumeVO.getName()).thenReturn("test-volume");
|
||||
|
||||
when(storagePoolDao.findById(1L)).thenReturn(storagePool);
|
||||
when(storagePool.getId()).thenReturn(1L);
|
||||
when(storagePool.getScope()).thenReturn(ScopeType.CLUSTER);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails);
|
||||
when(host.getName()).thenReturn("host1");
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(storagePoolDetails))
|
||||
.thenReturn(sanStrategy);
|
||||
|
||||
// Execute - NFS has no iSCSI protocol, so revokeAccessForVolume does nothing
|
||||
driver.revokeAccess(volumeInfo, host, dataStore);
|
||||
|
||||
// Verify - no LUN unmap operations for NFS
|
||||
verify(sanStrategy, never()).disableLogicalAccess(any());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testRevokeAccess_ISCSIVolume_Success() {
|
||||
// Setup
|
||||
when(dataStore.getId()).thenReturn(1L);
|
||||
when(volumeInfo.getType()).thenReturn(VOLUME);
|
||||
when(volumeInfo.getId()).thenReturn(100L);
|
||||
|
||||
when(volumeDao.findById(100L)).thenReturn(volumeVO);
|
||||
when(volumeVO.getId()).thenReturn(100L);
|
||||
when(volumeVO.getName()).thenReturn("test-volume");
|
||||
|
||||
when(storagePoolDao.findById(1L)).thenReturn(storagePool);
|
||||
when(storagePool.getId()).thenReturn(1L);
|
||||
when(storagePool.getScope()).thenReturn(ScopeType.CLUSTER);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails);
|
||||
|
||||
when(host.getStorageUrl()).thenReturn("iqn.1993-08.org.debian:01:host1");
|
||||
when(host.getName()).thenReturn("host1");
|
||||
|
||||
VolumeDetailVO lunNameDetail = new VolumeDetailVO(100L, OntapStorageConstants.LUN_DOT_NAME, "/vol/vol1/lun1", false);
|
||||
when(volumeDetailsDao.findDetail(100L, OntapStorageConstants.LUN_DOT_NAME)).thenReturn(lunNameDetail);
|
||||
|
||||
Lun mockLun = new Lun();
|
||||
mockLun.setName("/vol/vol1/lun1");
|
||||
mockLun.setUuid("lun-uuid-123");
|
||||
CloudStackVolume mockCloudStackVolume = new CloudStackVolume();
|
||||
mockCloudStackVolume.setLun(mockLun);
|
||||
|
||||
org.apache.cloudstack.storage.feign.model.Igroup mockIgroup = mock(org.apache.cloudstack.storage.feign.model.Igroup.class);
|
||||
when(mockIgroup.getName()).thenReturn("igroup1");
|
||||
when(mockIgroup.getUuid()).thenReturn("igroup-uuid-123");
|
||||
AccessGroup mockAccessGroup = new AccessGroup();
|
||||
mockAccessGroup.setIgroup(mockIgroup);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(storagePoolDetails))
|
||||
.thenReturn(sanStrategy);
|
||||
utilityMock.when(() -> OntapStorageUtils.getIgroupName(anyString(), anyString()))
|
||||
.thenReturn("igroup1");
|
||||
|
||||
// Mock the methods called by getCloudStackVolumeByName and getAccessGroupByName
|
||||
when(sanStrategy.getCloudStackVolume(argThat(map ->
|
||||
map != null &&
|
||||
"/vol/vol1/lun1".equals(map.get("name")) &&
|
||||
"svm1".equals(map.get("svm.name"))
|
||||
))).thenReturn(mockCloudStackVolume);
|
||||
|
||||
when(sanStrategy.getAccessGroup(argThat(map ->
|
||||
map != null &&
|
||||
"igroup1".equals(map.get("name")) &&
|
||||
"svm1".equals(map.get("svm.name"))
|
||||
))).thenReturn(mockAccessGroup);
|
||||
|
||||
when(sanStrategy.validateInitiatorInAccessGroup(
|
||||
eq("iqn.1993-08.org.debian:01:host1"),
|
||||
eq("svm1"),
|
||||
any(Igroup.class)
|
||||
)).thenReturn(true);
|
||||
|
||||
doNothing().when(sanStrategy).disableLogicalAccess(argThat(map ->
|
||||
map != null &&
|
||||
"lun-uuid-123".equals(map.get("lun.uuid")) &&
|
||||
"igroup-uuid-123".equals(map.get("igroup.uuid"))
|
||||
));
|
||||
|
||||
// Execute
|
||||
driver.revokeAccess(volumeInfo, host, dataStore);
|
||||
|
||||
// Verify
|
||||
verify(sanStrategy).getCloudStackVolume(any());
|
||||
verify(sanStrategy).getAccessGroup(any());
|
||||
verify(sanStrategy).validateInitiatorInAccessGroup(anyString(), anyString(), any(Igroup.class));
|
||||
verify(sanStrategy).disableLogicalAccess(any());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHostAccessStoragePool_ReturnsTrue() {
|
||||
assertTrue(driver.canHostAccessStoragePool(host, storagePool));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testIsVmInfoNeeded_ReturnsTrue() {
|
||||
assertTrue(driver.isVmInfoNeeded());
|
||||
}
|
||||
|
||||
@Test
|
||||
void testIsStorageSupportHA_ReturnsTrue() {
|
||||
assertTrue(driver.isStorageSupportHA(Storage.StoragePoolType.NetworkFilesystem));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testGetChapInfo_ReturnsNull() {
|
||||
assertNull(driver.getChapInfo(volumeInfo));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanProvideStorageStats_ReturnsFalse() {
|
||||
assertFalse(driver.canProvideStorageStats());
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanProvideVolumeStats_ReturnsFalse() {
|
||||
assertFalse(driver.canProvideVolumeStats());
|
||||
}
|
||||
}
|
||||
|
|
@ -18,6 +18,8 @@
|
|||
*/
|
||||
package org.apache.cloudstack.storage.lifecycle;
|
||||
|
||||
import org.apache.cloudstack.storage.utils.OntapStorageConstants;
|
||||
import org.apache.cloudstack.storage.utils.OntapStorageUtils;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
|
|
@ -32,15 +34,35 @@ import org.apache.cloudstack.storage.feign.model.Volume;
|
|||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.dc.ClusterVO;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.resource.ResourceManager;
|
||||
import com.cloud.storage.StorageManager;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.storage.service.model.AccessGroup;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import java.util.Map;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyLong;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.when;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.withSettings;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import java.util.HashMap;
|
||||
import org.apache.cloudstack.storage.provider.StorageProviderFactory;
|
||||
import org.apache.cloudstack.storage.service.StorageStrategy;
|
||||
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
|
|
@ -58,8 +80,36 @@ public class OntapPrimaryDatastoreLifecycleTest {
|
|||
@Mock
|
||||
private PrimaryDataStoreHelper _dataStoreHelper;
|
||||
|
||||
@Mock
|
||||
private ResourceManager _resourceMgr;
|
||||
|
||||
@Mock
|
||||
private StorageManager _storageMgr;
|
||||
|
||||
@Mock
|
||||
private StoragePoolDetailsDao storagePoolDetailsDao;
|
||||
|
||||
@Mock
|
||||
private PrimaryDataStoreDao storagePoolDao;
|
||||
|
||||
// Mock object that implements both DataStore and PrimaryDataStoreInfo
|
||||
// This is needed because attachCluster(DataStore) casts DataStore to PrimaryDataStoreInfo internally
|
||||
private DataStore dataStore;
|
||||
|
||||
@Mock
|
||||
private ClusterScope clusterScope;
|
||||
|
||||
@Mock
|
||||
private ZoneScope zoneScope;
|
||||
|
||||
private List<HostVO> mockHosts;
|
||||
private Map<String, String> poolDetails;
|
||||
|
||||
@BeforeEach
|
||||
void setUp() {
|
||||
// Create a mock that implements both DataStore and PrimaryDataStoreInfo interfaces
|
||||
dataStore = Mockito.mock(DataStore.class, withSettings()
|
||||
.extraInterfaces(PrimaryDataStoreInfo.class));
|
||||
|
||||
ClusterVO clusterVO = new ClusterVO(1L, 1L, "clusterName");
|
||||
clusterVO.setHypervisorType("KVM");
|
||||
|
|
@ -73,15 +123,49 @@ public class OntapPrimaryDatastoreLifecycleTest {
|
|||
volume.setName("testVolume");
|
||||
when(storageStrategy.createStorageVolume(any(), any())).thenReturn(volume);
|
||||
|
||||
// Setup for attachCluster tests
|
||||
// Configure dataStore mock with necessary methods (works for both DataStore and PrimaryDataStoreInfo)
|
||||
when(dataStore.getId()).thenReturn(1L);
|
||||
when(((PrimaryDataStoreInfo) dataStore).getClusterId()).thenReturn(1L);
|
||||
|
||||
// Mock the setDetails method to prevent NullPointerException
|
||||
Mockito.doNothing().when(((PrimaryDataStoreInfo) dataStore)).setDetails(any());
|
||||
|
||||
// Mock storagePoolDao to return a valid StoragePoolVO
|
||||
StoragePoolVO mockStoragePoolVO = new StoragePoolVO();
|
||||
mockStoragePoolVO.setId(1L);
|
||||
when(storagePoolDao.findById(1L)).thenReturn(mockStoragePoolVO);
|
||||
|
||||
mockHosts = new ArrayList<>();
|
||||
HostVO host1 = new HostVO("host1-guid");
|
||||
host1.setPrivateIpAddress("192.168.1.10");
|
||||
host1.setStorageIpAddress("192.168.1.10");
|
||||
host1.setClusterId(1L);
|
||||
HostVO host2 = new HostVO("host2-guid");
|
||||
host2.setPrivateIpAddress("192.168.1.11");
|
||||
host2.setStorageIpAddress("192.168.1.11");
|
||||
host2.setClusterId(1L);
|
||||
mockHosts.add(host1);
|
||||
mockHosts.add(host2);
|
||||
poolDetails = new HashMap<>();
|
||||
poolDetails.put("username", "admin");
|
||||
poolDetails.put("password", "password");
|
||||
poolDetails.put("svmName", "svm1");
|
||||
poolDetails.put("protocol", "NFS3");
|
||||
poolDetails.put("storageIP", "192.168.1.100");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInitialize_positive() {
|
||||
|
||||
HashMap<String, String> detailsMap = new HashMap<String, String>();
|
||||
detailsMap.put(OntapStorageConstants.USERNAME, "testUser");
|
||||
detailsMap.put(OntapStorageConstants.PASSWORD, "testPassword");
|
||||
detailsMap.put(OntapStorageConstants.STORAGE_IP, "10.10.10.10");
|
||||
detailsMap.put(OntapStorageConstants.SVM_NAME, "vs0");
|
||||
detailsMap.put(OntapStorageConstants.PROTOCOL, "NFS3");
|
||||
|
||||
Map<String, Object> dsInfos = new HashMap<>();
|
||||
dsInfos.put("username", "testUser");
|
||||
dsInfos.put("password", "testPassword");
|
||||
dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1");
|
||||
dsInfos.put("zoneId",1L);
|
||||
dsInfos.put("podId",1L);
|
||||
dsInfos.put("clusterId", 1L);
|
||||
|
|
@ -91,31 +175,7 @@ public class OntapPrimaryDatastoreLifecycleTest {
|
|||
dsInfos.put("managed",true);
|
||||
dsInfos.put("tags", "testTag");
|
||||
dsInfos.put("isTagARule", false);
|
||||
dsInfos.put("details", new HashMap<String, String>());
|
||||
|
||||
try(MockedStatic<StorageProviderFactory> storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) {
|
||||
storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy);
|
||||
ontapPrimaryDatastoreLifecycle.initialize(dsInfos);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInitialize_positiveWithIsDisaggregated() {
|
||||
|
||||
Map<String, Object> dsInfos = new HashMap<>();
|
||||
dsInfos.put("username", "testUser");
|
||||
dsInfos.put("password", "testPassword");
|
||||
dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1;isDisaggregated=false");
|
||||
dsInfos.put("zoneId",1L);
|
||||
dsInfos.put("podId",1L);
|
||||
dsInfos.put("clusterId", 1L);
|
||||
dsInfos.put("name", "testStoragePool");
|
||||
dsInfos.put("providerName", "testProvider");
|
||||
dsInfos.put("capacityBytes",200000L);
|
||||
dsInfos.put("managed",true);
|
||||
dsInfos.put("tags", "testTag");
|
||||
dsInfos.put("isTagARule", false);
|
||||
dsInfos.put("details", new HashMap<String, String>());
|
||||
dsInfos.put("details", detailsMap);
|
||||
|
||||
try(MockedStatic<StorageProviderFactory> storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) {
|
||||
storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy);
|
||||
|
|
@ -132,8 +192,14 @@ public class OntapPrimaryDatastoreLifecycleTest {
|
|||
|
||||
@Test
|
||||
public void testInitialize_missingRequiredDetailKey() {
|
||||
|
||||
HashMap<String, String> detailsMap = new HashMap<String, String>();
|
||||
detailsMap.put(OntapStorageConstants.USERNAME, "testUser");
|
||||
detailsMap.put(OntapStorageConstants.PASSWORD, "testPassword");
|
||||
detailsMap.put(OntapStorageConstants.STORAGE_IP, "10.10.10.10");
|
||||
detailsMap.put(OntapStorageConstants.SVM_NAME, "vs0");
|
||||
|
||||
Map<String, Object> dsInfos = new HashMap<>();
|
||||
dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3");
|
||||
dsInfos.put("zoneId",1L);
|
||||
dsInfos.put("podId",1L);
|
||||
dsInfos.put("clusterId", 1L);
|
||||
|
|
@ -143,7 +209,7 @@ public class OntapPrimaryDatastoreLifecycleTest {
|
|||
dsInfos.put("managed",true);
|
||||
dsInfos.put("tags", "testTag");
|
||||
dsInfos.put("isTagARule", false);
|
||||
dsInfos.put("details", new HashMap<String, String>());
|
||||
dsInfos.put("details", detailsMap);
|
||||
|
||||
try (MockedStatic<StorageProviderFactory> storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) {
|
||||
storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy);
|
||||
|
|
@ -154,8 +220,15 @@ public class OntapPrimaryDatastoreLifecycleTest {
|
|||
|
||||
@Test
|
||||
public void testInitialize_invalidCapacityBytes() {
|
||||
|
||||
HashMap<String, String> detailsMap = new HashMap<String, String>();
|
||||
detailsMap.put(OntapStorageConstants.USERNAME, "testUser");
|
||||
detailsMap.put(OntapStorageConstants.PASSWORD, "testPassword");
|
||||
detailsMap.put(OntapStorageConstants.STORAGE_IP, "10.10.10.10");
|
||||
detailsMap.put(OntapStorageConstants.SVM_NAME, "vs0");
|
||||
detailsMap.put(OntapStorageConstants.PROTOCOL, "NFS3");
|
||||
|
||||
Map<String, Object> dsInfos = new HashMap<>();
|
||||
dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1");
|
||||
dsInfos.put("zoneId",1L);
|
||||
dsInfos.put("podId",1L);
|
||||
dsInfos.put("clusterId", 1L);
|
||||
|
|
@ -165,7 +238,7 @@ public class OntapPrimaryDatastoreLifecycleTest {
|
|||
dsInfos.put("managed",true);
|
||||
dsInfos.put("tags", "testTag");
|
||||
dsInfos.put("isTagARule", false);
|
||||
dsInfos.put("details", new HashMap<String, String>());
|
||||
dsInfos.put("details", detailsMap);
|
||||
|
||||
try (MockedStatic<StorageProviderFactory> storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) {
|
||||
storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy);
|
||||
|
|
@ -176,7 +249,6 @@ public class OntapPrimaryDatastoreLifecycleTest {
|
|||
@Test
|
||||
public void testInitialize_unmanagedStorage() {
|
||||
Map<String, Object> dsInfos = new HashMap<>();
|
||||
dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1");
|
||||
dsInfos.put("zoneId",1L);
|
||||
dsInfos.put("podId",1L);
|
||||
dsInfos.put("clusterId", 1L);
|
||||
|
|
@ -200,7 +272,6 @@ public class OntapPrimaryDatastoreLifecycleTest {
|
|||
@Test
|
||||
public void testInitialize_nullStoragePoolName() {
|
||||
Map<String, Object> dsInfos = new HashMap<>();
|
||||
dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1");
|
||||
dsInfos.put("zoneId",1L);
|
||||
dsInfos.put("podId",1L);
|
||||
dsInfos.put("clusterId", 1L);
|
||||
|
|
@ -224,7 +295,6 @@ public class OntapPrimaryDatastoreLifecycleTest {
|
|||
@Test
|
||||
public void testInitialize_nullProviderName() {
|
||||
Map<String, Object> dsInfos = new HashMap<>();
|
||||
dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1");
|
||||
dsInfos.put("zoneId",1L);
|
||||
dsInfos.put("podId",1L);
|
||||
dsInfos.put("clusterId", 1L);
|
||||
|
|
@ -248,7 +318,6 @@ public class OntapPrimaryDatastoreLifecycleTest {
|
|||
@Test
|
||||
public void testInitialize_nullPodAndClusterAndZone() {
|
||||
Map<String, Object> dsInfos = new HashMap<>();
|
||||
dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1");
|
||||
dsInfos.put("zoneId",null);
|
||||
dsInfos.put("podId",null);
|
||||
dsInfos.put("clusterId", null);
|
||||
|
|
@ -276,7 +345,6 @@ public class OntapPrimaryDatastoreLifecycleTest {
|
|||
when(_clusterDao.findById(2L)).thenReturn(clusterVO);
|
||||
|
||||
Map<String, Object> dsInfos = new HashMap<>();
|
||||
dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1");
|
||||
dsInfos.put("zoneId",1L);
|
||||
dsInfos.put("podId",1L);
|
||||
dsInfos.put("clusterId", 2L);
|
||||
|
|
@ -299,8 +367,16 @@ public class OntapPrimaryDatastoreLifecycleTest {
|
|||
|
||||
@Test
|
||||
public void testInitialize_unexpectedDetailKey() {
|
||||
|
||||
HashMap<String, String> detailsMap = new HashMap<String, String>();
|
||||
detailsMap.put(OntapStorageConstants.USERNAME, "testUser");
|
||||
detailsMap.put(OntapStorageConstants.PASSWORD, "testPassword");
|
||||
detailsMap.put(OntapStorageConstants.STORAGE_IP, "10.10.10.10");
|
||||
detailsMap.put(OntapStorageConstants.SVM_NAME, "vs0");
|
||||
detailsMap.put(OntapStorageConstants.PROTOCOL, "NFS3");
|
||||
detailsMap.put("unexpectedKey", "unexpectedValue");
|
||||
|
||||
Map<String, Object> dsInfos = new HashMap<>();
|
||||
dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1;unexpectedKey=unexpectedValue");
|
||||
dsInfos.put("zoneId",1L);
|
||||
dsInfos.put("podId",1L);
|
||||
dsInfos.put("clusterId", 1L);
|
||||
|
|
@ -310,7 +386,7 @@ public class OntapPrimaryDatastoreLifecycleTest {
|
|||
dsInfos.put("managed",true);
|
||||
dsInfos.put("tags", "testTag");
|
||||
dsInfos.put("isTagARule", false);
|
||||
dsInfos.put("details", new HashMap<String, String>());
|
||||
dsInfos.put("details", detailsMap);
|
||||
|
||||
Exception ex = assertThrows(CloudRuntimeException.class, () -> {
|
||||
try (MockedStatic<StorageProviderFactory> storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) {
|
||||
|
|
@ -321,4 +397,409 @@ public class OntapPrimaryDatastoreLifecycleTest {
|
|||
assertTrue(ex.getMessage().contains("Unexpected ONTAP detail key in URL"));
|
||||
}
|
||||
|
||||
// ========== attachCluster Tests ==========
|
||||
|
||||
@Test
|
||||
public void testAttachCluster_positive() throws Exception {
|
||||
// Setup
|
||||
when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any()))
|
||||
.thenReturn(mockHosts);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
|
||||
when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any()))
|
||||
.thenReturn(storageStrategy);
|
||||
when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
|
||||
|
||||
// Mock successful host connections
|
||||
when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true);
|
||||
|
||||
// Execute
|
||||
boolean result = ontapPrimaryDatastoreLifecycle.attachCluster(
|
||||
dataStore, clusterScope);
|
||||
|
||||
// Verify
|
||||
assertTrue(result, "attachCluster should return true on success");
|
||||
verify(_resourceMgr, times(1))
|
||||
.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any());
|
||||
verify(storagePoolDetailsDao, times(1)).listDetailsKeyPairs(1L);
|
||||
verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class));
|
||||
verify(_storageMgr, times(2)).connectHostToSharedPool(any(HostVO.class), eq(1L));
|
||||
verify(_dataStoreHelper, times(1)).attachCluster(any(DataStore.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAttachCluster_withSingleHost() throws Exception {
|
||||
// Setup - only one host in cluster
|
||||
List<HostVO> singleHost = new ArrayList<>();
|
||||
singleHost.add(mockHosts.get(0));
|
||||
|
||||
when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any()))
|
||||
.thenReturn(singleHost);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
|
||||
when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any()))
|
||||
.thenReturn(storageStrategy);
|
||||
when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
|
||||
when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true);
|
||||
|
||||
// Execute
|
||||
boolean result = ontapPrimaryDatastoreLifecycle.attachCluster(
|
||||
dataStore, clusterScope);
|
||||
|
||||
// Verify
|
||||
assertTrue(result, "attachCluster should return true with single host");
|
||||
verify(_storageMgr, times(1)).connectHostToSharedPool(any(HostVO.class), eq(1L));
|
||||
verify(_dataStoreHelper, times(1)).attachCluster(any(DataStore.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAttachCluster_withMultipleHosts() throws Exception {
|
||||
// Setup - add more hosts
|
||||
HostVO host3 = new HostVO("host3-guid");
|
||||
host3.setPrivateIpAddress("192.168.1.12");
|
||||
host3.setStorageIpAddress("192.168.1.12");
|
||||
host3.setClusterId(1L);
|
||||
mockHosts.add(host3);
|
||||
|
||||
when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any()))
|
||||
.thenReturn(mockHosts);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
|
||||
when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any()))
|
||||
.thenReturn(storageStrategy);
|
||||
when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
|
||||
when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true);
|
||||
|
||||
// Execute
|
||||
boolean result = ontapPrimaryDatastoreLifecycle.attachCluster(
|
||||
dataStore, clusterScope);
|
||||
|
||||
// Verify
|
||||
assertTrue(result, "attachCluster should return true with multiple hosts");
|
||||
verify(_storageMgr, times(3)).connectHostToSharedPool(any(HostVO.class), eq(1L));
|
||||
verify(_dataStoreHelper, times(1)).attachCluster(any(DataStore.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAttachCluster_hostConnectionFailure() throws Exception {
|
||||
// Setup
|
||||
when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any()))
|
||||
.thenReturn(mockHosts);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any()))
|
||||
.thenReturn(storageStrategy);
|
||||
when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
|
||||
|
||||
// Mock host connection failure for first host
|
||||
when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong()))
|
||||
.thenThrow(new CloudRuntimeException("Connection failed"));
|
||||
|
||||
// Execute
|
||||
boolean result = ontapPrimaryDatastoreLifecycle.attachCluster(
|
||||
dataStore, clusterScope);
|
||||
|
||||
// Verify
|
||||
assertFalse(result, "attachCluster should return false on host connection failure");
|
||||
verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class));
|
||||
verify(_storageMgr, times(1)).connectHostToSharedPool(any(HostVO.class), eq(1L));
|
||||
// _dataStoreHelper.attachCluster should NOT be called due to early return
|
||||
verify(_dataStoreHelper, times(0)).attachCluster(any(DataStore.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAttachCluster_emptyHostList() throws Exception {
|
||||
// Setup - no hosts in cluster
|
||||
List<HostVO> emptyHosts = new ArrayList<>();
|
||||
|
||||
when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any()))
|
||||
.thenReturn(emptyHosts);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
|
||||
when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any()))
|
||||
.thenReturn(storageStrategy);
|
||||
when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
|
||||
|
||||
// Execute
|
||||
boolean result = ontapPrimaryDatastoreLifecycle.attachCluster(
|
||||
dataStore, clusterScope);
|
||||
|
||||
// Verify
|
||||
assertTrue(result, "attachCluster should return true even with no hosts");
|
||||
verify(_storageMgr, times(0)).connectHostToSharedPool(any(HostVO.class), anyLong());
|
||||
verify(_dataStoreHelper, times(1)).attachCluster(any(DataStore.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAttachCluster_secondHostConnectionFails() throws Exception {
|
||||
// Setup
|
||||
when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any()))
|
||||
.thenReturn(mockHosts);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any()))
|
||||
.thenReturn(storageStrategy);
|
||||
when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
|
||||
|
||||
// Mock: first host succeeds, second host fails
|
||||
when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong()))
|
||||
.thenReturn(true)
|
||||
.thenThrow(new CloudRuntimeException("Connection failed"));
|
||||
|
||||
// Execute
|
||||
boolean result = ontapPrimaryDatastoreLifecycle.attachCluster(
|
||||
dataStore, clusterScope);
|
||||
|
||||
// Verify
|
||||
assertFalse(result, "attachCluster should return false when any host connection fails");
|
||||
verify(_storageMgr, times(2)).connectHostToSharedPool(any(HostVO.class), eq(1L));
|
||||
verify(_dataStoreHelper, times(0)).attachCluster(any(DataStore.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAttachCluster_createAccessGroupCalled() throws Exception {
|
||||
// Setup
|
||||
when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any()))
|
||||
.thenReturn(mockHosts);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
|
||||
when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any()))
|
||||
.thenReturn(storageStrategy);
|
||||
when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
|
||||
when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true);
|
||||
|
||||
// Execute
|
||||
boolean result = ontapPrimaryDatastoreLifecycle.attachCluster(
|
||||
dataStore, clusterScope);
|
||||
|
||||
// Verify - createAccessGroup is called with correct AccessGroup structure
|
||||
assertTrue(result);
|
||||
verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class));
|
||||
}
|
||||
}
|
||||
|
||||
// ========== attachZone Tests ==========
|
||||
|
||||
@Test
|
||||
public void testAttachZone_positive() throws Exception {
|
||||
// Setup
|
||||
when(zoneScope.getScopeId()).thenReturn(1L);
|
||||
when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM)))
|
||||
.thenReturn(mockHosts);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
|
||||
when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any()))
|
||||
.thenReturn(storageStrategy);
|
||||
when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
|
||||
|
||||
// Mock successful host connections
|
||||
when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true);
|
||||
|
||||
// Execute
|
||||
boolean result = ontapPrimaryDatastoreLifecycle.attachZone(
|
||||
dataStore, zoneScope, Hypervisor.HypervisorType.KVM);
|
||||
|
||||
// Verify
|
||||
assertTrue(result, "attachZone should return true on success");
|
||||
verify(_resourceMgr, times(1))
|
||||
.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM));
|
||||
verify(storagePoolDetailsDao, times(1)).listDetailsKeyPairs(1L);
|
||||
verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class));
|
||||
verify(_storageMgr, times(2)).connectHostToSharedPool(any(HostVO.class), eq(1L));
|
||||
verify(_dataStoreHelper, times(1)).attachZone(any(DataStore.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAttachZone_withSingleHost() throws Exception {
|
||||
// Setup - only one host in zone
|
||||
List<HostVO> singleHost = new ArrayList<>();
|
||||
singleHost.add(mockHosts.get(0));
|
||||
|
||||
when(zoneScope.getScopeId()).thenReturn(1L);
|
||||
when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM)))
|
||||
.thenReturn(singleHost);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
|
||||
when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any()))
|
||||
.thenReturn(storageStrategy);
|
||||
when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
|
||||
when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true);
|
||||
|
||||
// Execute
|
||||
boolean result = ontapPrimaryDatastoreLifecycle.attachZone(
|
||||
dataStore, zoneScope, Hypervisor.HypervisorType.KVM);
|
||||
|
||||
// Verify
|
||||
assertTrue(result, "attachZone should return true with single host");
|
||||
verify(_storageMgr, times(1)).connectHostToSharedPool(any(HostVO.class), eq(1L));
|
||||
verify(_dataStoreHelper, times(1)).attachZone(any(DataStore.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAttachZone_withMultipleHosts() throws Exception {
|
||||
// Setup - add more hosts
|
||||
HostVO host3 = new HostVO("host3-guid");
|
||||
host3.setPrivateIpAddress("192.168.1.12");
|
||||
host3.setStorageIpAddress("192.168.1.12");
|
||||
host3.setClusterId(1L);
|
||||
mockHosts.add(host3);
|
||||
|
||||
when(zoneScope.getScopeId()).thenReturn(1L);
|
||||
when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM)))
|
||||
.thenReturn(mockHosts);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
|
||||
when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any()))
|
||||
.thenReturn(storageStrategy);
|
||||
when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
|
||||
when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true);
|
||||
|
||||
// Execute
|
||||
boolean result = ontapPrimaryDatastoreLifecycle.attachZone(
|
||||
dataStore, zoneScope, Hypervisor.HypervisorType.KVM);
|
||||
|
||||
// Verify
|
||||
assertTrue(result, "attachZone should return true with multiple hosts");
|
||||
verify(_storageMgr, times(3)).connectHostToSharedPool(any(HostVO.class), eq(1L));
|
||||
verify(_dataStoreHelper, times(1)).attachZone(any(DataStore.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAttachZone_hostConnectionFailure() throws Exception {
|
||||
// Setup
|
||||
when(zoneScope.getScopeId()).thenReturn(1L);
|
||||
when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM)))
|
||||
.thenReturn(mockHosts);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any()))
|
||||
.thenReturn(storageStrategy);
|
||||
when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
|
||||
|
||||
// Mock host connection failure for first host
|
||||
when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong()))
|
||||
.thenThrow(new CloudRuntimeException("Connection failed"));
|
||||
|
||||
// Execute
|
||||
boolean result = ontapPrimaryDatastoreLifecycle.attachZone(
|
||||
dataStore, zoneScope, Hypervisor.HypervisorType.KVM);
|
||||
|
||||
// Verify
|
||||
assertFalse(result, "attachZone should return false on host connection failure");
|
||||
verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class));
|
||||
verify(_storageMgr, times(1)).connectHostToSharedPool(any(HostVO.class), eq(1L));
|
||||
// _dataStoreHelper.attachZone should NOT be called due to early return
|
||||
verify(_dataStoreHelper, times(0)).attachZone(any(DataStore.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAttachZone_emptyHostList() throws Exception {
|
||||
// Setup - no hosts in zone
|
||||
List<HostVO> emptyHosts = new ArrayList<>();
|
||||
|
||||
when(zoneScope.getScopeId()).thenReturn(1L);
|
||||
when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM)))
|
||||
.thenReturn(emptyHosts);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
|
||||
when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any()))
|
||||
.thenReturn(storageStrategy);
|
||||
when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
|
||||
|
||||
// Execute
|
||||
boolean result = ontapPrimaryDatastoreLifecycle.attachZone(
|
||||
dataStore, zoneScope, Hypervisor.HypervisorType.KVM);
|
||||
|
||||
// Verify
|
||||
assertTrue(result, "attachZone should return true even with no hosts");
|
||||
verify(_storageMgr, times(0)).connectHostToSharedPool(any(HostVO.class), anyLong());
|
||||
verify(_dataStoreHelper, times(1)).attachZone(any(DataStore.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAttachZone_secondHostConnectionFails() throws Exception {
|
||||
// Setup
|
||||
when(zoneScope.getScopeId()).thenReturn(1L);
|
||||
when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM)))
|
||||
.thenReturn(mockHosts);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any()))
|
||||
.thenReturn(storageStrategy);
|
||||
when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
|
||||
|
||||
// Mock: first host succeeds, second host fails
|
||||
when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong()))
|
||||
.thenReturn(true)
|
||||
.thenThrow(new CloudRuntimeException("Connection failed"));
|
||||
|
||||
// Execute
|
||||
boolean result = ontapPrimaryDatastoreLifecycle.attachZone(
|
||||
dataStore, zoneScope, Hypervisor.HypervisorType.KVM);
|
||||
|
||||
// Verify
|
||||
assertFalse(result, "attachZone should return false when any host connection fails");
|
||||
verify(_storageMgr, times(2)).connectHostToSharedPool(any(HostVO.class), eq(1L));
|
||||
verify(_dataStoreHelper, times(0)).attachZone(any(DataStore.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAttachZone_createAccessGroupCalled() throws Exception {
|
||||
// Setup
|
||||
when(zoneScope.getScopeId()).thenReturn(1L);
|
||||
when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM)))
|
||||
.thenReturn(mockHosts);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
|
||||
when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore);
|
||||
|
||||
try (MockedStatic<OntapStorageUtils> utilityMock = Mockito.mockStatic(OntapStorageUtils.class)) {
|
||||
utilityMock.when(() -> OntapStorageUtils.getStrategyByStoragePoolDetails(any()))
|
||||
.thenReturn(storageStrategy);
|
||||
when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
|
||||
when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true);
|
||||
|
||||
// Execute
|
||||
boolean result = ontapPrimaryDatastoreLifecycle.attachZone(
|
||||
dataStore, zoneScope, Hypervisor.HypervisorType.KVM);
|
||||
|
||||
// Verify - createAccessGroup is called with correct AccessGroup structure
|
||||
assertTrue(result);
|
||||
verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,835 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.service;
|
||||
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import feign.FeignException;
|
||||
import org.apache.cloudstack.storage.feign.client.AggregateFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.JobFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.NetworkFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.SANFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.SvmFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.model.Aggregate;
|
||||
import org.apache.cloudstack.storage.feign.model.IpInterface;
|
||||
import org.apache.cloudstack.storage.feign.model.IscsiService;
|
||||
import org.apache.cloudstack.storage.feign.model.Job;
|
||||
import org.apache.cloudstack.storage.feign.model.OntapStorage;
|
||||
import org.apache.cloudstack.storage.feign.model.Svm;
|
||||
import org.apache.cloudstack.storage.feign.model.Volume;
|
||||
import org.apache.cloudstack.storage.feign.model.response.JobResponse;
|
||||
import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
|
||||
import org.apache.cloudstack.storage.service.model.AccessGroup;
|
||||
import org.apache.cloudstack.storage.service.model.CloudStackVolume;
|
||||
import org.apache.cloudstack.storage.service.model.ProtocolType;
|
||||
import org.apache.cloudstack.storage.utils.OntapStorageConstants;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
import org.mockito.junit.jupiter.MockitoSettings;
|
||||
import org.mockito.quality.Strictness;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyMap;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.atLeastOnce;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
@MockitoSettings(strictness = Strictness.LENIENT)
|
||||
public class StorageStrategyTest {
|
||||
|
||||
@Mock
|
||||
private AggregateFeignClient aggregateFeignClient;
|
||||
|
||||
@Mock
|
||||
private VolumeFeignClient volumeFeignClient;
|
||||
|
||||
@Mock
|
||||
private SvmFeignClient svmFeignClient;
|
||||
|
||||
@Mock
|
||||
private JobFeignClient jobFeignClient;
|
||||
|
||||
@Mock
|
||||
private NetworkFeignClient networkFeignClient;
|
||||
|
||||
@Mock
|
||||
private SANFeignClient sanFeignClient;
|
||||
|
||||
private TestableStorageStrategy storageStrategy;
|
||||
|
||||
// Concrete implementation for testing abstract class
|
||||
private static class TestableStorageStrategy extends StorageStrategy {
|
||||
public TestableStorageStrategy(OntapStorage ontapStorage,
|
||||
AggregateFeignClient aggregateFeignClient,
|
||||
VolumeFeignClient volumeFeignClient,
|
||||
SvmFeignClient svmFeignClient,
|
||||
JobFeignClient jobFeignClient,
|
||||
NetworkFeignClient networkFeignClient,
|
||||
SANFeignClient sanFeignClient) {
|
||||
super(ontapStorage);
|
||||
// Use reflection to replace the private Feign client fields with mocked ones
|
||||
injectMockedClient("aggregateFeignClient", aggregateFeignClient);
|
||||
injectMockedClient("volumeFeignClient", volumeFeignClient);
|
||||
injectMockedClient("svmFeignClient", svmFeignClient);
|
||||
injectMockedClient("jobFeignClient", jobFeignClient);
|
||||
injectMockedClient("networkFeignClient", networkFeignClient);
|
||||
injectMockedClient("sanFeignClient", sanFeignClient);
|
||||
}
|
||||
|
||||
private void injectMockedClient(String fieldName, Object mockedClient) {
|
||||
try {
|
||||
Field field = StorageStrategy.class.getDeclaredField(fieldName);
|
||||
field.setAccessible(true);
|
||||
field.set(this, mockedClient);
|
||||
} catch (NoSuchFieldException | IllegalAccessException e) {
|
||||
throw new RuntimeException("Failed to inject mocked client: " + fieldName, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
CloudStackVolume updateCloudStackVolume(CloudStackVolume cloudstackVolume) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copyCloudStackVolume(CloudStackVolume cloudstackVolume) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public CloudStackVolume getCloudStackVolume(Map<String, String> cloudStackVolumeMap) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JobResponse revertSnapshotForCloudStackVolume(String snapshotName, String flexVolUuid, String snapshotUuid, String volumePath, String lunUuid, String flexVolName) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AccessGroup createAccessGroup(AccessGroup accessGroup) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteAccessGroup(AccessGroup accessGroup) {
|
||||
}
|
||||
|
||||
@Override
|
||||
AccessGroup updateAccessGroup(AccessGroup accessGroup) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AccessGroup getAccessGroup(Map<String, String> values) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> enableLogicalAccess(Map<String, String> values) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void disableLogicalAccess(Map<String, String> values) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> getLogicalAccess(Map<String, String> values) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@BeforeEach
|
||||
void setUp() {
|
||||
// Create OntapStorage using constructor (immutable object)
|
||||
OntapStorage ontapStorage = new OntapStorage("admin", "password", "192.168.1.100",
|
||||
"svm1", 5000000000L, ProtocolType.NFS3);
|
||||
|
||||
// Note: In real implementation, StorageStrategy constructor creates Feign clients
|
||||
// For testing, we'll need to mock the FeignClientFactory behavior
|
||||
storageStrategy = new TestableStorageStrategy(ontapStorage,
|
||||
aggregateFeignClient, volumeFeignClient, svmFeignClient,
|
||||
jobFeignClient, networkFeignClient, sanFeignClient);
|
||||
}
|
||||
|
||||
// ========== connect() Tests ==========
|
||||
|
||||
@Test
|
||||
public void testConnect_positive() {
|
||||
// Setup
|
||||
Svm svm = new Svm();
|
||||
svm.setName("svm1");
|
||||
svm.setState(OntapStorageConstants.RUNNING);
|
||||
svm.setNfsEnabled(true);
|
||||
|
||||
Aggregate aggregate = new Aggregate();
|
||||
aggregate.setName("aggr1");
|
||||
aggregate.setUuid("aggr-uuid-1");
|
||||
svm.setAggregates(List.of(aggregate));
|
||||
|
||||
OntapResponse<Svm> svmResponse = new OntapResponse<>();
|
||||
svmResponse.setRecords(List.of(svm));
|
||||
|
||||
when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse);
|
||||
Aggregate aggregateDetail = mock(Aggregate.class);
|
||||
when(aggregateDetail.getName()).thenReturn("aggr1");
|
||||
when(aggregateDetail.getUuid()).thenReturn("aggr-uuid-1");
|
||||
when(aggregateDetail.getState()).thenReturn(Aggregate.StateEnum.ONLINE);
|
||||
when(aggregateDetail.getSpace()).thenReturn(mock(Aggregate.AggregateSpace.class));
|
||||
when(aggregateDetail.getAvailableBlockStorageSpace()).thenReturn(10000000000.0);
|
||||
when(aggregateFeignClient.getAggregateByUUID(anyString(), eq("aggr-uuid-1"))).thenReturn(aggregateDetail);
|
||||
|
||||
// Execute
|
||||
boolean result = storageStrategy.connect();
|
||||
|
||||
// Verify
|
||||
assertTrue(result, "connect() should return true on success");
|
||||
verify(svmFeignClient, times(1)).getSvmResponse(anyMap(), anyString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConnect_svmNotFound() {
|
||||
// Setup
|
||||
OntapResponse<Svm> svmResponse = new OntapResponse<>();
|
||||
svmResponse.setRecords(new ArrayList<>());
|
||||
|
||||
when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse);
|
||||
|
||||
// Execute
|
||||
boolean result = storageStrategy.connect();
|
||||
|
||||
// Verify
|
||||
assertFalse(result, "connect() should return false when SVM is not found");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConnect_svmNotRunning() {
|
||||
// Setup
|
||||
Svm svm = new Svm();
|
||||
svm.setName("svm1");
|
||||
svm.setState("stopped");
|
||||
svm.setNfsEnabled(true);
|
||||
|
||||
OntapResponse<Svm> svmResponse = new OntapResponse<>();
|
||||
svmResponse.setRecords(List.of(svm));
|
||||
|
||||
when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse);
|
||||
|
||||
// Execute
|
||||
boolean result = storageStrategy.connect();
|
||||
|
||||
// Verify
|
||||
assertFalse(result, "connect() should return false when SVM is not running");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConnect_nfsNotEnabled() {
|
||||
// Setup
|
||||
Svm svm = new Svm();
|
||||
svm.setName("svm1");
|
||||
svm.setState(OntapStorageConstants.RUNNING);
|
||||
svm.setNfsEnabled(false);
|
||||
|
||||
Aggregate aggregate = new Aggregate();
|
||||
aggregate.setName("aggr1");
|
||||
aggregate.setUuid("aggr-uuid-1");
|
||||
svm.setAggregates(List.of(aggregate));
|
||||
|
||||
OntapResponse<Svm> svmResponse = new OntapResponse<>();
|
||||
svmResponse.setRecords(List.of(svm));
|
||||
|
||||
when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse);
|
||||
|
||||
// Execute & Verify
|
||||
boolean result = storageStrategy.connect();
|
||||
assertFalse(result, "connect() should fail when NFS is disabled");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConnect_iscsiNotEnabled() {
|
||||
// Setup - recreate with iSCSI protocol
|
||||
OntapStorage iscsiStorage = new OntapStorage("admin", "password", "192.168.1.100",
|
||||
"svm1", 5000000000L, ProtocolType.ISCSI);
|
||||
storageStrategy = new TestableStorageStrategy(iscsiStorage,
|
||||
aggregateFeignClient, volumeFeignClient, svmFeignClient,
|
||||
jobFeignClient, networkFeignClient, sanFeignClient);
|
||||
|
||||
Svm svm = new Svm();
|
||||
svm.setName("svm1");
|
||||
svm.setState(OntapStorageConstants.RUNNING);
|
||||
svm.setIscsiEnabled(false);
|
||||
|
||||
Aggregate aggregate = new Aggregate();
|
||||
aggregate.setName("aggr1");
|
||||
aggregate.setUuid("aggr-uuid-1");
|
||||
svm.setAggregates(List.of(aggregate));
|
||||
|
||||
OntapResponse<Svm> svmResponse = new OntapResponse<>();
|
||||
svmResponse.setRecords(List.of(svm));
|
||||
|
||||
when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse);
|
||||
|
||||
// Execute & Verify
|
||||
boolean result = storageStrategy.connect();
|
||||
assertFalse(result, "connect() should fail when iSCSI is disabled");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConnect_noAggregates() {
|
||||
// Setup
|
||||
Svm svm = new Svm();
|
||||
svm.setName("svm1");
|
||||
svm.setState(OntapStorageConstants.RUNNING);
|
||||
svm.setNfsEnabled(true);
|
||||
svm.setAggregates(new ArrayList<>());
|
||||
|
||||
OntapResponse<Svm> svmResponse = new OntapResponse<>();
|
||||
svmResponse.setRecords(List.of(svm));
|
||||
|
||||
when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse);
|
||||
|
||||
// Execute
|
||||
boolean result = storageStrategy.connect();
|
||||
|
||||
// Verify
|
||||
assertFalse(result, "connect() should return false when no aggregates are assigned");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConnect_nullSvmResponse() {
|
||||
// Setup
|
||||
when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(null);
|
||||
|
||||
// Execute
|
||||
boolean result = storageStrategy.connect();
|
||||
|
||||
// Verify
|
||||
assertFalse(result, "connect() should return false when SVM response is null");
|
||||
}
|
||||
|
||||
// ========== createStorageVolume() Tests ==========
|
||||
|
||||
@Test
|
||||
public void testCreateStorageVolume_positive() {
|
||||
// Setup - First connect to populate aggregates
|
||||
setupSuccessfulConnect();
|
||||
storageStrategy.connect();
|
||||
|
||||
// Setup aggregate details
|
||||
Aggregate aggregateDetail = mock(Aggregate.class);
|
||||
when(aggregateDetail.getName()).thenReturn("aggr1");
|
||||
when(aggregateDetail.getUuid()).thenReturn("aggr-uuid-1");
|
||||
when(aggregateDetail.getState()).thenReturn(Aggregate.StateEnum.ONLINE);
|
||||
when(aggregateDetail.getSpace()).thenReturn(mock(Aggregate.AggregateSpace.class)); // Mock non-null space
|
||||
when(aggregateDetail.getAvailableBlockStorageSpace()).thenReturn(10000000000.0);
|
||||
|
||||
when(aggregateFeignClient.getAggregateByUUID(anyString(), eq("aggr-uuid-1")))
|
||||
.thenReturn(aggregateDetail);
|
||||
|
||||
// Setup job response
|
||||
Job job = new Job();
|
||||
job.setUuid("job-uuid-1");
|
||||
JobResponse jobResponse = new JobResponse();
|
||||
jobResponse.setJob(job);
|
||||
|
||||
when(volumeFeignClient.createVolumeWithJob(anyString(), any(Volume.class)))
|
||||
.thenReturn(jobResponse);
|
||||
|
||||
// Setup job polling
|
||||
Job completedJob = new Job();
|
||||
completedJob.setUuid("job-uuid-1");
|
||||
completedJob.setState(OntapStorageConstants.JOB_SUCCESS);
|
||||
when(jobFeignClient.getJobByUUID(anyString(), eq("job-uuid-1")))
|
||||
.thenReturn(completedJob);
|
||||
|
||||
// Setup volume retrieval after creation
|
||||
Volume createdVolume = new Volume();
|
||||
createdVolume.setName("test-volume");
|
||||
createdVolume.setUuid("vol-uuid-1");
|
||||
OntapResponse<Volume> volumeResponse = new OntapResponse<>();
|
||||
volumeResponse.setRecords(List.of(createdVolume));
|
||||
|
||||
when(volumeFeignClient.getAllVolumes(anyString(), anyMap()))
|
||||
.thenReturn(volumeResponse);
|
||||
when(volumeFeignClient.getVolume(anyString(), anyMap()))
|
||||
.thenReturn(volumeResponse);
|
||||
|
||||
// Execute
|
||||
Volume result = storageStrategy.createStorageVolume("test-volume", 5000000000L);
|
||||
|
||||
// Verify
|
||||
assertNotNull(result);
|
||||
assertEquals("test-volume", result.getName());
|
||||
assertEquals("vol-uuid-1", result.getUuid());
|
||||
verify(volumeFeignClient, times(1)).createVolumeWithJob(anyString(), any(Volume.class));
|
||||
verify(jobFeignClient, atLeastOnce()).getJobByUUID(anyString(), eq("job-uuid-1"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateStorageVolume_invalidSize() {
|
||||
// Setup
|
||||
setupSuccessfulConnect();
|
||||
storageStrategy.connect();
|
||||
|
||||
// Execute & Verify
|
||||
Exception ex = assertThrows(CloudRuntimeException.class,
|
||||
() -> storageStrategy.createStorageVolume("test-volume", -1L));
|
||||
assertTrue(ex.getMessage().contains("Invalid volume size"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateStorageVolume_nullSize() {
|
||||
// Setup
|
||||
setupSuccessfulConnect();
|
||||
storageStrategy.connect();
|
||||
|
||||
// Execute & Verify
|
||||
Exception ex = assertThrows(CloudRuntimeException.class,
|
||||
() -> storageStrategy.createStorageVolume("test-volume", null));
|
||||
assertTrue(ex.getMessage().contains("Invalid volume size"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateStorageVolume_noAggregates() {
|
||||
// Execute & Verify - without calling connect first
|
||||
Exception ex = assertThrows(CloudRuntimeException.class,
|
||||
() -> storageStrategy.createStorageVolume("test-volume", 5000000000L));
|
||||
assertTrue(ex.getMessage().contains("No aggregates available"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateStorageVolume_aggregateNotOnline() {
|
||||
// Setup
|
||||
setupSuccessfulConnect();
|
||||
storageStrategy.connect();
|
||||
|
||||
Aggregate aggregateDetail = mock(Aggregate.class);
|
||||
when(aggregateDetail.getName()).thenReturn("aggr1");
|
||||
when(aggregateDetail.getUuid()).thenReturn("aggr-uuid-1");
|
||||
when(aggregateDetail.getState()).thenReturn(null); // null state to simulate offline
|
||||
|
||||
when(aggregateFeignClient.getAggregateByUUID(anyString(), eq("aggr-uuid-1")))
|
||||
.thenReturn(aggregateDetail);
|
||||
|
||||
// Execute & Verify
|
||||
Exception ex = assertThrows(CloudRuntimeException.class,
|
||||
() -> storageStrategy.createStorageVolume("test-volume", 5000000000L));
|
||||
assertTrue(ex.getMessage().contains("No suitable aggregates found"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateStorageVolume_insufficientSpace() {
|
||||
// Setup
|
||||
setupSuccessfulConnect();
|
||||
storageStrategy.connect();
|
||||
|
||||
Aggregate aggregateDetail = mock(Aggregate.class);
|
||||
when(aggregateDetail.getName()).thenReturn("aggr1");
|
||||
when(aggregateDetail.getUuid()).thenReturn("aggr-uuid-1");
|
||||
when(aggregateDetail.getState()).thenReturn(Aggregate.StateEnum.ONLINE);
|
||||
when(aggregateDetail.getAvailableBlockStorageSpace()).thenReturn(1000000.0); // Only 1MB available
|
||||
|
||||
when(aggregateFeignClient.getAggregateByUUID(anyString(), eq("aggr-uuid-1")))
|
||||
.thenReturn(aggregateDetail);
|
||||
|
||||
// Execute & Verify
|
||||
Exception ex = assertThrows(CloudRuntimeException.class,
|
||||
() -> storageStrategy.createStorageVolume("test-volume", 5000000000L)); // Request 5GB
|
||||
assertTrue(ex.getMessage().contains("No suitable aggregates found"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateStorageVolume_jobFailed() {
|
||||
// Setup
|
||||
setupSuccessfulConnect();
|
||||
storageStrategy.connect();
|
||||
|
||||
setupAggregateForVolumeCreation();
|
||||
|
||||
Job job = new Job();
|
||||
job.setUuid("job-uuid-1");
|
||||
JobResponse jobResponse = new JobResponse();
|
||||
jobResponse.setJob(job);
|
||||
|
||||
when(volumeFeignClient.createVolumeWithJob(anyString(), any(Volume.class)))
|
||||
.thenReturn(jobResponse);
|
||||
|
||||
// Setup failed job
|
||||
Job failedJob = new Job();
|
||||
failedJob.setUuid("job-uuid-1");
|
||||
failedJob.setState(OntapStorageConstants.JOB_FAILURE);
|
||||
failedJob.setMessage("Volume creation failed");
|
||||
when(jobFeignClient.getJobByUUID(anyString(), eq("job-uuid-1")))
|
||||
.thenReturn(failedJob);
|
||||
|
||||
// Execute & Verify
|
||||
Exception ex = assertThrows(CloudRuntimeException.class,
|
||||
() -> storageStrategy.createStorageVolume("test-volume", 5000000000L));
|
||||
assertTrue(ex.getMessage().contains("failed") || ex.getMessage().contains("Job failed"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateStorageVolume_volumeNotFoundAfterCreation() {
|
||||
// Setup
|
||||
setupSuccessfulConnect();
|
||||
storageStrategy.connect();
|
||||
setupAggregateForVolumeCreation();
|
||||
setupSuccessfulJobCreation();
|
||||
|
||||
// Setup empty volume response
|
||||
OntapResponse<Volume> emptyResponse = new OntapResponse<>();
|
||||
emptyResponse.setRecords(new ArrayList<>());
|
||||
|
||||
when(volumeFeignClient.getAllVolumes(anyString(), anyMap()))
|
||||
.thenReturn(emptyResponse);
|
||||
|
||||
// Execute & Verify
|
||||
Exception ex = assertThrows(CloudRuntimeException.class,
|
||||
() -> storageStrategy.createStorageVolume("test-volume", 5000000000L));
|
||||
assertTrue(ex.getMessage() != null && ex.getMessage().contains("not found after creation"));
|
||||
}
|
||||
|
||||
// ========== deleteStorageVolume() Tests ==========
|
||||
|
||||
@Test
|
||||
public void testDeleteStorageVolume_positive() {
|
||||
// Setup
|
||||
Volume volume = new Volume();
|
||||
volume.setName("test-volume");
|
||||
volume.setUuid("vol-uuid-1");
|
||||
|
||||
Job job = new Job();
|
||||
job.setUuid("job-uuid-1");
|
||||
JobResponse jobResponse = new JobResponse();
|
||||
jobResponse.setJob(job);
|
||||
|
||||
when(volumeFeignClient.deleteVolume(anyString(), eq("vol-uuid-1")))
|
||||
.thenReturn(jobResponse);
|
||||
|
||||
Job completedJob = new Job();
|
||||
completedJob.setUuid("job-uuid-1");
|
||||
completedJob.setState(OntapStorageConstants.JOB_SUCCESS);
|
||||
when(jobFeignClient.getJobByUUID(anyString(), eq("job-uuid-1")))
|
||||
.thenReturn(completedJob);
|
||||
|
||||
// Execute
|
||||
storageStrategy.deleteStorageVolume(volume);
|
||||
|
||||
// Verify
|
||||
verify(volumeFeignClient, times(1)).deleteVolume(anyString(), eq("vol-uuid-1"));
|
||||
verify(jobFeignClient, atLeastOnce()).getJobByUUID(anyString(), eq("job-uuid-1"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteStorageVolume_jobFailed() {
|
||||
// Setup
|
||||
Volume volume = new Volume();
|
||||
volume.setName("test-volume");
|
||||
volume.setUuid("vol-uuid-1");
|
||||
|
||||
Job job = new Job();
|
||||
job.setUuid("job-uuid-1");
|
||||
JobResponse jobResponse = new JobResponse();
|
||||
jobResponse.setJob(job);
|
||||
|
||||
when(volumeFeignClient.deleteVolume(anyString(), eq("vol-uuid-1")))
|
||||
.thenReturn(jobResponse);
|
||||
|
||||
Job failedJob = new Job();
|
||||
failedJob.setUuid("job-uuid-1");
|
||||
failedJob.setState(OntapStorageConstants.JOB_FAILURE);
|
||||
failedJob.setMessage("Deletion failed");
|
||||
when(jobFeignClient.getJobByUUID(anyString(), eq("job-uuid-1")))
|
||||
.thenReturn(failedJob);
|
||||
|
||||
// Execute & Verify
|
||||
Exception ex = assertThrows(CloudRuntimeException.class,
|
||||
() -> storageStrategy.deleteStorageVolume(volume));
|
||||
assertTrue(ex.getMessage().contains("Job failed"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteStorageVolume_feignException() {
|
||||
// Setup
|
||||
Volume volume = new Volume();
|
||||
volume.setName("test-volume");
|
||||
volume.setUuid("vol-uuid-1");
|
||||
|
||||
when(volumeFeignClient.deleteVolume(anyString(), eq("vol-uuid-1")))
|
||||
.thenThrow(mock(FeignException.FeignClientException.class));
|
||||
|
||||
// Execute & Verify
|
||||
Exception ex = assertThrows(CloudRuntimeException.class,
|
||||
() -> storageStrategy.deleteStorageVolume(volume));
|
||||
assertTrue(ex.getMessage().contains("Failed to delete volume"));
|
||||
}
|
||||
|
||||
// ========== getStoragePath() Tests ==========
|
||||
|
||||
@Test
|
||||
public void testGetStoragePath_iscsi() {
|
||||
// Setup - recreate with iSCSI protocol
|
||||
OntapStorage iscsiStorage = new OntapStorage("admin", "password", "192.168.1.100",
|
||||
"svm1", null, ProtocolType.ISCSI);
|
||||
storageStrategy = new TestableStorageStrategy(iscsiStorage,
|
||||
aggregateFeignClient, volumeFeignClient, svmFeignClient,
|
||||
jobFeignClient, networkFeignClient, sanFeignClient);
|
||||
|
||||
IscsiService.IscsiServiceTarget target = new IscsiService.IscsiServiceTarget();
|
||||
target.setName("iqn.1992-08.com.netapp:sn.123456:vs.1");
|
||||
|
||||
IscsiService iscsiService = new IscsiService();
|
||||
iscsiService.setTarget(target);
|
||||
|
||||
OntapResponse<IscsiService> iscsiResponse = new OntapResponse<>();
|
||||
iscsiResponse.setRecords(List.of(iscsiService));
|
||||
|
||||
when(sanFeignClient.getIscsiServices(anyString(), anyMap()))
|
||||
.thenReturn(iscsiResponse);
|
||||
|
||||
// Execute
|
||||
String result = storageStrategy.getStoragePath();
|
||||
|
||||
// Verify
|
||||
assertNotNull(result);
|
||||
assertEquals("iqn.1992-08.com.netapp:sn.123456:vs.1", result);
|
||||
verify(sanFeignClient, times(1)).getIscsiServices(anyString(), anyMap());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetStoragePath_iscsi_noService() {
|
||||
// Setup - recreate with iSCSI protocol
|
||||
OntapStorage iscsiStorage = new OntapStorage("admin", "password", "192.168.1.100",
|
||||
"svm1", null, ProtocolType.ISCSI);
|
||||
storageStrategy = new TestableStorageStrategy(iscsiStorage,
|
||||
aggregateFeignClient, volumeFeignClient, svmFeignClient,
|
||||
jobFeignClient, networkFeignClient, sanFeignClient);
|
||||
|
||||
OntapResponse<IscsiService> emptyResponse = new OntapResponse<>();
|
||||
emptyResponse.setRecords(new ArrayList<>());
|
||||
|
||||
when(sanFeignClient.getIscsiServices(anyString(), anyMap()))
|
||||
.thenReturn(emptyResponse);
|
||||
|
||||
// Execute & Verify
|
||||
Exception ex = assertThrows(CloudRuntimeException.class,
|
||||
() -> storageStrategy.getStoragePath());
|
||||
assertTrue(ex.getMessage().contains("No iSCSI service found"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetStoragePath_iscsi_noTargetIqn() {
|
||||
// Setup - recreate with iSCSI protocol
|
||||
OntapStorage iscsiStorage = new OntapStorage("admin", "password", "192.168.1.100",
|
||||
"svm1", null, ProtocolType.ISCSI);
|
||||
storageStrategy = new TestableStorageStrategy(iscsiStorage,
|
||||
aggregateFeignClient, volumeFeignClient, svmFeignClient,
|
||||
jobFeignClient, networkFeignClient, sanFeignClient);
|
||||
|
||||
IscsiService iscsiService = new IscsiService();
|
||||
iscsiService.setTarget(null);
|
||||
|
||||
OntapResponse<IscsiService> iscsiResponse = new OntapResponse<>();
|
||||
iscsiResponse.setRecords(List.of(iscsiService));
|
||||
|
||||
when(sanFeignClient.getIscsiServices(anyString(), anyMap()))
|
||||
.thenReturn(iscsiResponse);
|
||||
|
||||
// Execute & Verify
|
||||
Exception ex = assertThrows(CloudRuntimeException.class,
|
||||
() -> storageStrategy.getStoragePath());
|
||||
assertTrue(ex.getMessage().contains("iSCSI target IQN not found"));
|
||||
}
|
||||
|
||||
// ========== getNetworkInterface() Tests ==========
|
||||
|
||||
@Test
|
||||
public void testGetNetworkInterface_nfs() {
|
||||
// Setup
|
||||
IpInterface.IpInfo ipInfo = new IpInterface.IpInfo();
|
||||
ipInfo.setAddress("192.168.1.50");
|
||||
|
||||
IpInterface ipInterface = new IpInterface();
|
||||
ipInterface.setIp(ipInfo);
|
||||
|
||||
OntapResponse<IpInterface> interfaceResponse = new OntapResponse<>();
|
||||
interfaceResponse.setRecords(List.of(ipInterface));
|
||||
|
||||
when(networkFeignClient.getNetworkIpInterfaces(anyString(), anyMap()))
|
||||
.thenReturn(interfaceResponse);
|
||||
|
||||
// Execute
|
||||
String result = storageStrategy.getNetworkInterface();
|
||||
|
||||
// Verify
|
||||
assertNotNull(result);
|
||||
assertEquals("192.168.1.50", result);
|
||||
verify(networkFeignClient, times(1)).getNetworkIpInterfaces(anyString(), anyMap());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetNetworkInterface_iscsi() {
|
||||
// Setup - recreate with iSCSI protocol
|
||||
OntapStorage iscsiStorage = new OntapStorage("admin", "password", "192.168.1.100",
|
||||
"svm1", null, ProtocolType.ISCSI);
|
||||
storageStrategy = new TestableStorageStrategy(iscsiStorage,
|
||||
aggregateFeignClient, volumeFeignClient, svmFeignClient,
|
||||
jobFeignClient, networkFeignClient, sanFeignClient);
|
||||
|
||||
IpInterface.IpInfo ipInfo = new IpInterface.IpInfo();
|
||||
ipInfo.setAddress("192.168.1.51");
|
||||
|
||||
IpInterface ipInterface = new IpInterface();
|
||||
ipInterface.setIp(ipInfo);
|
||||
|
||||
OntapResponse<IpInterface> interfaceResponse = new OntapResponse<>();
|
||||
interfaceResponse.setRecords(List.of(ipInterface));
|
||||
|
||||
when(networkFeignClient.getNetworkIpInterfaces(anyString(), anyMap()))
|
||||
.thenReturn(interfaceResponse);
|
||||
|
||||
// Execute
|
||||
String result = storageStrategy.getNetworkInterface();
|
||||
|
||||
// Verify
|
||||
assertNotNull(result);
|
||||
assertEquals("192.168.1.51", result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetNetworkInterface_noInterfaces() {
|
||||
// Setup
|
||||
OntapResponse<IpInterface> emptyResponse = new OntapResponse<>();
|
||||
emptyResponse.setRecords(new ArrayList<>());
|
||||
|
||||
when(networkFeignClient.getNetworkIpInterfaces(anyString(), anyMap()))
|
||||
.thenReturn(emptyResponse);
|
||||
|
||||
// Execute & Verify
|
||||
Exception ex = assertThrows(CloudRuntimeException.class,
|
||||
() -> storageStrategy.getNetworkInterface());
|
||||
assertTrue(ex.getMessage().contains("No network interfaces found"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetNetworkInterface_feignException() {
|
||||
// Setup
|
||||
when(networkFeignClient.getNetworkIpInterfaces(anyString(), anyMap()))
|
||||
.thenThrow(mock(FeignException.FeignClientException.class));
|
||||
|
||||
// Execute & Verify
|
||||
Exception ex = assertThrows(CloudRuntimeException.class,
|
||||
() -> storageStrategy.getNetworkInterface());
|
||||
assertTrue(ex.getMessage().contains("Failed to retrieve network interfaces"));
|
||||
}
|
||||
|
||||
// ========== Helper Methods ==========
|
||||
|
||||
private void setupSuccessfulConnect() {
|
||||
Svm svm = new Svm();
|
||||
svm.setName("svm1");
|
||||
svm.setState(OntapStorageConstants.RUNNING);
|
||||
svm.setNfsEnabled(true);
|
||||
|
||||
Aggregate aggregate = new Aggregate();
|
||||
aggregate.setName("aggr1");
|
||||
aggregate.setUuid("aggr-uuid-1");
|
||||
svm.setAggregates(List.of(aggregate));
|
||||
|
||||
OntapResponse<Svm> svmResponse = new OntapResponse<>();
|
||||
svmResponse.setRecords(List.of(svm));
|
||||
|
||||
when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse);
|
||||
|
||||
Aggregate aggregateDetail = mock(Aggregate.class);
|
||||
when(aggregateDetail.getName()).thenReturn("aggr1");
|
||||
when(aggregateDetail.getUuid()).thenReturn("aggr-uuid-1");
|
||||
when(aggregateDetail.getState()).thenReturn(Aggregate.StateEnum.ONLINE);
|
||||
when(aggregateDetail.getSpace()).thenReturn(mock(Aggregate.AggregateSpace.class));
|
||||
when(aggregateDetail.getAvailableBlockStorageSpace()).thenReturn(10000000000.0);
|
||||
when(aggregateFeignClient.getAggregateByUUID(anyString(), eq("aggr-uuid-1"))).thenReturn(aggregateDetail);
|
||||
}
|
||||
|
||||
private void setupAggregateForVolumeCreation() {
|
||||
Aggregate aggregateDetail = mock(Aggregate.class);
|
||||
when(aggregateDetail.getName()).thenReturn("aggr1");
|
||||
when(aggregateDetail.getUuid()).thenReturn("aggr-uuid-1");
|
||||
when(aggregateDetail.getState()).thenReturn(Aggregate.StateEnum.ONLINE);
|
||||
when(aggregateDetail.getSpace()).thenReturn(mock(Aggregate.AggregateSpace.class)); // Mock non-null space
|
||||
when(aggregateDetail.getAvailableBlockStorageSpace()).thenReturn(10000000000.0);
|
||||
|
||||
when(aggregateFeignClient.getAggregateByUUID(anyString(), eq("aggr-uuid-1")))
|
||||
.thenReturn(aggregateDetail);
|
||||
}
|
||||
|
||||
private void setupSuccessfulJobCreation() {
|
||||
Job job = new Job();
|
||||
job.setUuid("job-uuid-1");
|
||||
JobResponse jobResponse = new JobResponse();
|
||||
jobResponse.setJob(job);
|
||||
|
||||
when(volumeFeignClient.createVolumeWithJob(anyString(), any(Volume.class)))
|
||||
.thenReturn(jobResponse);
|
||||
|
||||
Job completedJob = new Job();
|
||||
completedJob.setUuid("job-uuid-1");
|
||||
completedJob.setState(OntapStorageConstants.JOB_SUCCESS);
|
||||
when(jobFeignClient.getJobByUUID(anyString(), eq("job-uuid-1")))
|
||||
.thenReturn(completedJob);
|
||||
|
||||
Volume createdVolume = new Volume();
|
||||
createdVolume.setName("test-volume");
|
||||
createdVolume.setUuid("vol-uuid-1");
|
||||
OntapResponse<Volume> volumeResponse = new OntapResponse<>();
|
||||
volumeResponse.setRecords(List.of(createdVolume));
|
||||
|
||||
when(volumeFeignClient.getAllVolumes(anyString(), anyMap()))
|
||||
.thenReturn(volumeResponse);
|
||||
when(volumeFeignClient.getVolume(anyString(), anyMap()))
|
||||
.thenReturn(volumeResponse);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,585 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.cloudstack.storage.service;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.storage.command.CreateObjectCommand;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.storage.feign.client.JobFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.NASFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.AggregateFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.SvmFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.NetworkFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.client.SANFeignClient;
|
||||
import org.apache.cloudstack.storage.feign.model.ExportPolicy;
|
||||
import org.apache.cloudstack.storage.feign.model.Job;
|
||||
import org.apache.cloudstack.storage.feign.model.OntapStorage;
|
||||
import org.apache.cloudstack.storage.feign.model.response.JobResponse;
|
||||
import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
|
||||
import org.apache.cloudstack.storage.service.model.AccessGroup;
|
||||
import org.apache.cloudstack.storage.service.model.CloudStackVolume;
|
||||
import org.apache.cloudstack.storage.service.model.ProtocolType;
|
||||
import org.apache.cloudstack.storage.utils.OntapStorageConstants;
|
||||
import org.apache.cloudstack.storage.volume.VolumeObject;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
import org.mockito.junit.jupiter.MockitoSettings;
|
||||
import org.mockito.quality.Strictness;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyLong;
|
||||
import static org.mockito.ArgumentMatchers.anyMap;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.doNothing;
|
||||
import static org.mockito.Mockito.doThrow;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
@MockitoSettings(strictness = Strictness.LENIENT)
|
||||
public class UnifiedNASStrategyTest {
|
||||
|
||||
@Mock
|
||||
private NASFeignClient nasFeignClient;
|
||||
|
||||
@Mock
|
||||
private VolumeFeignClient volumeFeignClient;
|
||||
|
||||
@Mock
|
||||
private JobFeignClient jobFeignClient;
|
||||
|
||||
@Mock
|
||||
private AggregateFeignClient aggregateFeignClient;
|
||||
|
||||
@Mock
|
||||
private SvmFeignClient svmFeignClient;
|
||||
|
||||
@Mock
|
||||
private NetworkFeignClient networkFeignClient;
|
||||
|
||||
@Mock
|
||||
private SANFeignClient sanFeignClient;
|
||||
|
||||
@Mock
|
||||
private VolumeDao volumeDao;
|
||||
|
||||
@Mock
|
||||
private EndPointSelector epSelector;
|
||||
|
||||
@Mock
|
||||
private StoragePoolDetailsDao storagePoolDetailsDao;
|
||||
|
||||
private TestableUnifiedNASStrategy strategy;
|
||||
|
||||
private OntapStorage ontapStorage;
|
||||
|
||||
@BeforeEach
|
||||
public void setUp() throws Exception {
|
||||
ontapStorage = new OntapStorage(
|
||||
"admin",
|
||||
"password",
|
||||
"192.168.1.100",
|
||||
"svm1",
|
||||
100L,
|
||||
ProtocolType.NFS3
|
||||
);
|
||||
strategy = new TestableUnifiedNASStrategy(ontapStorage, nasFeignClient, volumeFeignClient, jobFeignClient, aggregateFeignClient, svmFeignClient, networkFeignClient, sanFeignClient);
|
||||
injectField("volumeDao", volumeDao);
|
||||
injectField("epSelector", epSelector);
|
||||
injectField("storagePoolDetailsDao", storagePoolDetailsDao);
|
||||
}
|
||||
|
||||
private void injectField(String fieldName, Object mockedField) throws Exception {
|
||||
Field field = UnifiedNASStrategy.class.getDeclaredField(fieldName);
|
||||
field.setAccessible(true);
|
||||
field.set(strategy, mockedField);
|
||||
}
|
||||
|
||||
private class TestableUnifiedNASStrategy extends UnifiedNASStrategy {
|
||||
public TestableUnifiedNASStrategy(OntapStorage ontapStorage,
|
||||
NASFeignClient nasFeignClient,
|
||||
VolumeFeignClient volumeFeignClient,
|
||||
JobFeignClient jobFeignClient,
|
||||
AggregateFeignClient aggregateFeignClient,
|
||||
SvmFeignClient svmFeignClient,
|
||||
NetworkFeignClient networkFeignClient,
|
||||
SANFeignClient sanFeignClient) {
|
||||
super(ontapStorage);
|
||||
// All Feign clients are in StorageStrategy parent class
|
||||
injectParentMockedClient("nasFeignClient", nasFeignClient);
|
||||
injectParentMockedClient("volumeFeignClient", volumeFeignClient);
|
||||
injectParentMockedClient("jobFeignClient", jobFeignClient);
|
||||
injectParentMockedClient("aggregateFeignClient", aggregateFeignClient);
|
||||
injectParentMockedClient("svmFeignClient", svmFeignClient);
|
||||
injectParentMockedClient("networkFeignClient", networkFeignClient);
|
||||
injectParentMockedClient("sanFeignClient", sanFeignClient);
|
||||
}
|
||||
|
||||
private void injectParentMockedClient(String fieldName, Object mockedClient) {
|
||||
try {
|
||||
Field field = StorageStrategy.class.getDeclaredField(fieldName);
|
||||
field.setAccessible(true);
|
||||
field.set(this, mockedClient);
|
||||
} catch (NoSuchFieldException | IllegalAccessException e) {
|
||||
throw new RuntimeException("Failed to inject parent mocked client: " + fieldName, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test createCloudStackVolume - Success
|
||||
@Test
|
||||
public void testCreateCloudStackVolume_Success() throws Exception {
|
||||
// Setup CloudStackVolume
|
||||
CloudStackVolume cloudStackVolume = mock(CloudStackVolume.class);
|
||||
VolumeObject volumeObject = mock(VolumeObject.class);
|
||||
VolumeVO volumeVO = mock(VolumeVO.class);
|
||||
EndPoint endPoint = mock(EndPoint.class);
|
||||
Answer answer = new Answer(null, true, "Success");
|
||||
|
||||
when(cloudStackVolume.getDatastoreId()).thenReturn("1");
|
||||
when(cloudStackVolume.getVolumeInfo()).thenReturn(volumeObject);
|
||||
when(volumeObject.getId()).thenReturn(100L);
|
||||
when(volumeObject.getUuid()).thenReturn("volume-uuid-123");
|
||||
when(volumeDao.findById(100L)).thenReturn(volumeVO);
|
||||
when(volumeDao.update(anyLong(), any(VolumeVO.class))).thenReturn(true);
|
||||
when(epSelector.select(volumeObject)).thenReturn(endPoint);
|
||||
when(endPoint.sendMessage(any(CreateObjectCommand.class))).thenReturn(answer);
|
||||
|
||||
// Execute
|
||||
CloudStackVolume result = strategy.createCloudStackVolume(cloudStackVolume);
|
||||
|
||||
// Verify
|
||||
assertNotNull(result);
|
||||
verify(volumeDao).update(anyLong(), any(VolumeVO.class));
|
||||
verify(epSelector).select(volumeObject);
|
||||
verify(endPoint).sendMessage(any(CreateObjectCommand.class));
|
||||
}
|
||||
|
||||
// Test createCloudStackVolume - Volume Not Found
|
||||
@Test
|
||||
public void testCreateCloudStackVolume_VolumeNotFound() {
|
||||
CloudStackVolume cloudStackVolume = mock(CloudStackVolume.class);
|
||||
VolumeObject volumeObject = mock(VolumeObject.class);
|
||||
|
||||
when(cloudStackVolume.getDatastoreId()).thenReturn("1");
|
||||
when(cloudStackVolume.getVolumeInfo()).thenReturn(volumeObject);
|
||||
when(volumeObject.getId()).thenReturn(100L);
|
||||
when(volumeDao.findById(100L)).thenReturn(null);
|
||||
|
||||
assertThrows(CloudRuntimeException.class, () -> {
|
||||
strategy.createCloudStackVolume(cloudStackVolume);
|
||||
});
|
||||
}
|
||||
|
||||
// Test createCloudStackVolume - KVM Host Creation Failed
|
||||
@Test
|
||||
public void testCreateCloudStackVolume_KVMHostFailed() {
|
||||
CloudStackVolume cloudStackVolume = mock(CloudStackVolume.class);
|
||||
VolumeObject volumeObject = mock(VolumeObject.class);
|
||||
VolumeVO volumeVO = mock(VolumeVO.class);
|
||||
EndPoint endPoint = mock(EndPoint.class);
|
||||
Answer answer = new Answer(null, false, "Failed to create volume");
|
||||
|
||||
when(cloudStackVolume.getDatastoreId()).thenReturn("1");
|
||||
when(cloudStackVolume.getVolumeInfo()).thenReturn(volumeObject);
|
||||
when(volumeObject.getId()).thenReturn(100L);
|
||||
when(volumeObject.getUuid()).thenReturn("volume-uuid-123");
|
||||
when(volumeDao.findById(100L)).thenReturn(volumeVO);
|
||||
when(volumeDao.update(anyLong(), any(VolumeVO.class))).thenReturn(true);
|
||||
when(epSelector.select(volumeObject)).thenReturn(endPoint);
|
||||
when(endPoint.sendMessage(any(CreateObjectCommand.class))).thenReturn(answer);
|
||||
|
||||
assertThrows(CloudRuntimeException.class, () -> {
|
||||
strategy.createCloudStackVolume(cloudStackVolume);
|
||||
});
|
||||
}
|
||||
|
||||
// Test createCloudStackVolume - No Endpoint
|
||||
@Test
|
||||
public void testCreateCloudStackVolume_NoEndpoint() {
|
||||
CloudStackVolume cloudStackVolume = mock(CloudStackVolume.class);
|
||||
VolumeObject volumeObject = mock(VolumeObject.class);
|
||||
VolumeVO volumeVO = mock(VolumeVO.class);
|
||||
|
||||
when(cloudStackVolume.getDatastoreId()).thenReturn("1");
|
||||
when(cloudStackVolume.getVolumeInfo()).thenReturn(volumeObject);
|
||||
when(volumeObject.getId()).thenReturn(100L);
|
||||
when(volumeObject.getUuid()).thenReturn("volume-uuid-123");
|
||||
when(volumeDao.findById(100L)).thenReturn(volumeVO);
|
||||
when(volumeDao.update(anyLong(), any(VolumeVO.class))).thenReturn(true);
|
||||
when(epSelector.select(volumeObject)).thenReturn(null);
|
||||
|
||||
assertThrows(CloudRuntimeException.class, () -> {
|
||||
strategy.createCloudStackVolume(cloudStackVolume);
|
||||
});
|
||||
}
|
||||
|
||||
// Test createAccessGroup - Success
|
||||
@Test
|
||||
public void testCreateAccessGroup_Success() throws Exception {
|
||||
// Setup
|
||||
AccessGroup accessGroup = mock(AccessGroup.class);
|
||||
Map<String, String> details = new HashMap<>();
|
||||
details.put(OntapStorageConstants.SVM_NAME, "svm1");
|
||||
details.put(OntapStorageConstants.VOLUME_UUID, "vol-uuid-123");
|
||||
details.put(OntapStorageConstants.VOLUME_NAME, "vol1");
|
||||
|
||||
List<HostVO> hosts = new ArrayList<>();
|
||||
HostVO host1 = mock(HostVO.class);
|
||||
when(host1.getStorageIpAddress()).thenReturn("10.0.0.1");
|
||||
hosts.add(host1);
|
||||
|
||||
ExportPolicy createdPolicy = mock(ExportPolicy.class);
|
||||
when(createdPolicy.getId()).thenReturn(java.math.BigInteger.ONE);
|
||||
when(createdPolicy.getName()).thenReturn("export-policy-1");
|
||||
|
||||
OntapResponse<ExportPolicy> policyResponse = new OntapResponse<>();
|
||||
List<ExportPolicy> policies = new ArrayList<>();
|
||||
policies.add(createdPolicy);
|
||||
policyResponse.setRecords(policies);
|
||||
|
||||
JobResponse jobResponse = new JobResponse();
|
||||
Job job = new Job();
|
||||
job.setUuid("job-uuid-123");
|
||||
job.setState(OntapStorageConstants.JOB_SUCCESS);
|
||||
jobResponse.setJob(job);
|
||||
|
||||
// Removed primaryDataStoreInfo mock - using storage pool ID directly
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details);
|
||||
when(accessGroup.getStoragePoolId()).thenReturn(1L);
|
||||
when(accessGroup.getHostsToConnect()).thenReturn(hosts);
|
||||
doNothing().when(nasFeignClient).createExportPolicy(anyString(), any(ExportPolicy.class));
|
||||
when(nasFeignClient.getExportPolicyResponse(anyString(), anyMap())).thenReturn(policyResponse);
|
||||
when(volumeFeignClient.updateVolumeRebalancing(anyString(), anyString(), any())).thenReturn(jobResponse);
|
||||
when(jobFeignClient.getJobByUUID(anyString(), anyString())).thenReturn(job);
|
||||
doNothing().when(storagePoolDetailsDao).addDetail(anyLong(), anyString(), anyString(), eq(true));
|
||||
|
||||
// Execute
|
||||
AccessGroup result = strategy.createAccessGroup(accessGroup);
|
||||
|
||||
// Verify
|
||||
assertNotNull(result);
|
||||
verify(nasFeignClient).createExportPolicy(anyString(), any(ExportPolicy.class));
|
||||
verify(nasFeignClient).getExportPolicyResponse(anyString(), anyMap());
|
||||
verify(volumeFeignClient).updateVolumeRebalancing(anyString(), eq("vol-uuid-123"), any());
|
||||
verify(storagePoolDetailsDao, times(2)).addDetail(anyLong(), anyString(), anyString(), eq(true));
|
||||
}
|
||||
|
||||
// Test createAccessGroup - Failed to Create Policy
|
||||
@Test
|
||||
public void testCreateAccessGroup_FailedToCreatePolicy() {
|
||||
AccessGroup accessGroup = mock(AccessGroup.class);
|
||||
Map<String, String> details = new HashMap<>();
|
||||
details.put(OntapStorageConstants.SVM_NAME, "svm1");
|
||||
details.put(OntapStorageConstants.VOLUME_UUID, "vol-uuid-123");
|
||||
details.put(OntapStorageConstants.VOLUME_NAME, "vol1");
|
||||
|
||||
List<HostVO> hosts = new ArrayList<>();
|
||||
HostVO host1 = mock(HostVO.class);
|
||||
when(host1.getStorageIpAddress()).thenReturn("10.0.0.1");
|
||||
hosts.add(host1);
|
||||
|
||||
// Removed primaryDataStoreInfo mock - using storage pool ID directly
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details);
|
||||
when(accessGroup.getHostsToConnect()).thenReturn(hosts);
|
||||
doThrow(new RuntimeException("Failed to create policy")).when(nasFeignClient)
|
||||
.createExportPolicy(anyString(), any(ExportPolicy.class));
|
||||
|
||||
assertThrows(CloudRuntimeException.class, () -> {
|
||||
strategy.createAccessGroup(accessGroup);
|
||||
});
|
||||
}
|
||||
|
||||
// Test createAccessGroup - Failed to Verify Policy
|
||||
@Test
|
||||
public void testCreateAccessGroup_FailedToVerifyPolicy() {
|
||||
AccessGroup accessGroup = mock(AccessGroup.class);
|
||||
Map<String, String> details = new HashMap<>();
|
||||
details.put(OntapStorageConstants.SVM_NAME, "svm1");
|
||||
details.put(OntapStorageConstants.VOLUME_UUID, "vol-uuid-123");
|
||||
details.put(OntapStorageConstants.VOLUME_NAME, "vol1");
|
||||
|
||||
List<HostVO> hosts = new ArrayList<>();
|
||||
HostVO host1 = mock(HostVO.class);
|
||||
when(host1.getStorageIpAddress()).thenReturn("10.0.0.1");
|
||||
hosts.add(host1);
|
||||
|
||||
OntapResponse<ExportPolicy> emptyResponse = new OntapResponse<>();
|
||||
emptyResponse.setRecords(new ArrayList<>());
|
||||
|
||||
// Removed primaryDataStoreInfo mock - using storage pool ID directly
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details);
|
||||
when(accessGroup.getHostsToConnect()).thenReturn(hosts);
|
||||
doNothing().when(nasFeignClient).createExportPolicy(anyString(), any(ExportPolicy.class));
|
||||
when(nasFeignClient.getExportPolicyResponse(anyString(), anyMap())).thenReturn(emptyResponse);
|
||||
|
||||
assertThrows(CloudRuntimeException.class, () -> {
|
||||
strategy.createAccessGroup(accessGroup);
|
||||
});
|
||||
}
|
||||
|
||||
// Test createAccessGroup - Job Timeout
|
||||
// Note: This test is simplified to avoid 200 second wait time.
|
||||
// In reality, testing timeout would require mocking Thread.sleep() or refactoring the code.
|
||||
@Test
|
||||
public void testCreateAccessGroup_JobFailure() throws Exception {
|
||||
AccessGroup accessGroup = mock(AccessGroup.class);
|
||||
Map<String, String> details = new HashMap<>();
|
||||
details.put(OntapStorageConstants.SVM_NAME, "svm1");
|
||||
details.put(OntapStorageConstants.VOLUME_UUID, "vol-uuid-123");
|
||||
details.put(OntapStorageConstants.VOLUME_NAME, "vol1");
|
||||
|
||||
List<HostVO> hosts = new ArrayList<>();
|
||||
HostVO host1 = mock(HostVO.class);
|
||||
when(host1.getStorageIpAddress()).thenReturn("10.0.0.1");
|
||||
hosts.add(host1);
|
||||
|
||||
ExportPolicy createdPolicy = mock(ExportPolicy.class);
|
||||
when(createdPolicy.getId()).thenReturn(java.math.BigInteger.ONE);
|
||||
when(createdPolicy.getName()).thenReturn("export-policy-1");
|
||||
|
||||
OntapResponse<ExportPolicy> policyResponse = new OntapResponse<>();
|
||||
List<ExportPolicy> policies = new ArrayList<>();
|
||||
policies.add(createdPolicy);
|
||||
policyResponse.setRecords(policies);
|
||||
|
||||
JobResponse jobResponse = new JobResponse();
|
||||
Job job = new Job();
|
||||
job.setUuid("job-uuid-123");
|
||||
job.setState(OntapStorageConstants.JOB_FAILURE); // Set to FAILURE instead of timeout
|
||||
job.setMessage("Job failed");
|
||||
jobResponse.setJob(job);
|
||||
|
||||
// Removed primaryDataStoreInfo mock - using storage pool ID directly
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details);
|
||||
when(accessGroup.getStoragePoolId()).thenReturn(1L);
|
||||
when(accessGroup.getHostsToConnect()).thenReturn(hosts);
|
||||
doNothing().when(nasFeignClient).createExportPolicy(anyString(), any(ExportPolicy.class));
|
||||
when(nasFeignClient.getExportPolicyResponse(anyString(), anyMap())).thenReturn(policyResponse);
|
||||
when(volumeFeignClient.updateVolumeRebalancing(anyString(), anyString(), any())).thenReturn(jobResponse);
|
||||
when(jobFeignClient.getJobByUUID(anyString(), anyString())).thenReturn(job);
|
||||
|
||||
assertThrows(CloudRuntimeException.class, () -> {
|
||||
strategy.createAccessGroup(accessGroup);
|
||||
});
|
||||
}
|
||||
|
||||
// Test createAccessGroup - Host with Private IP
|
||||
@Test
|
||||
public void testCreateAccessGroup_HostWithPrivateIP() throws Exception {
|
||||
AccessGroup accessGroup = mock(AccessGroup.class);
|
||||
Map<String, String> details = new HashMap<>();
|
||||
details.put(OntapStorageConstants.SVM_NAME, "svm1");
|
||||
details.put(OntapStorageConstants.VOLUME_UUID, "vol-uuid-123");
|
||||
details.put(OntapStorageConstants.VOLUME_NAME, "vol1");
|
||||
|
||||
List<HostVO> hosts = new ArrayList<>();
|
||||
HostVO host1 = mock(HostVO.class);
|
||||
when(host1.getStorageIpAddress()).thenReturn(null);
|
||||
when(host1.getPrivateIpAddress()).thenReturn("192.168.1.10");
|
||||
hosts.add(host1);
|
||||
|
||||
ExportPolicy createdPolicy = mock(ExportPolicy.class);
|
||||
when(createdPolicy.getId()).thenReturn(java.math.BigInteger.ONE);
|
||||
when(createdPolicy.getName()).thenReturn("export-policy-1");
|
||||
|
||||
OntapResponse<ExportPolicy> policyResponse = new OntapResponse<>();
|
||||
List<ExportPolicy> policies = new ArrayList<>();
|
||||
policies.add(createdPolicy);
|
||||
policyResponse.setRecords(policies);
|
||||
|
||||
JobResponse jobResponse = new JobResponse();
|
||||
Job job = new Job();
|
||||
job.setUuid("job-uuid-123");
|
||||
job.setState(OntapStorageConstants.JOB_SUCCESS);
|
||||
jobResponse.setJob(job);
|
||||
|
||||
// Removed primaryDataStoreInfo mock - using storage pool ID directly
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details);
|
||||
when(accessGroup.getStoragePoolId()).thenReturn(1L);
|
||||
when(accessGroup.getHostsToConnect()).thenReturn(hosts);
|
||||
doNothing().when(nasFeignClient).createExportPolicy(anyString(), any(ExportPolicy.class));
|
||||
when(nasFeignClient.getExportPolicyResponse(anyString(), anyMap())).thenReturn(policyResponse);
|
||||
when(volumeFeignClient.updateVolumeRebalancing(anyString(), anyString(), any())).thenReturn(jobResponse);
|
||||
when(jobFeignClient.getJobByUUID(anyString(), anyString())).thenReturn(job);
|
||||
doNothing().when(storagePoolDetailsDao).addDetail(anyLong(), anyString(), anyString(), eq(true));
|
||||
|
||||
// Execute
|
||||
AccessGroup result = strategy.createAccessGroup(accessGroup);
|
||||
|
||||
// Verify
|
||||
assertNotNull(result);
|
||||
ArgumentCaptor<ExportPolicy> policyCaptor = ArgumentCaptor.forClass(ExportPolicy.class);
|
||||
verify(nasFeignClient).createExportPolicy(anyString(), policyCaptor.capture());
|
||||
ExportPolicy capturedPolicy = policyCaptor.getValue();
|
||||
assertEquals("192.168.1.10/32", capturedPolicy.getRules().get(0).getClients().get(0).getMatch());
|
||||
}
|
||||
|
||||
// Test deleteAccessGroup - Success
|
||||
@Test
|
||||
public void testDeleteAccessGroup_Success() {
|
||||
AccessGroup accessGroup = mock(AccessGroup.class);
|
||||
Map<String, String> details = new HashMap<>();
|
||||
details.put(OntapStorageConstants.EXPORT_POLICY_NAME, "export-policy-1");
|
||||
details.put(OntapStorageConstants.EXPORT_POLICY_ID, "1");
|
||||
|
||||
when(accessGroup.getStoragePoolId()).thenReturn(1L);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details);
|
||||
// Removed primaryDataStoreInfo.getName() - not used
|
||||
doNothing().when(nasFeignClient).deleteExportPolicyById(anyString(), anyString());
|
||||
|
||||
// Execute
|
||||
strategy.deleteAccessGroup(accessGroup);
|
||||
|
||||
// Verify
|
||||
verify(nasFeignClient).deleteExportPolicyById(anyString(), eq("1"));
|
||||
}
|
||||
|
||||
// Test deleteAccessGroup - Null AccessGroup
|
||||
@Test
|
||||
public void testDeleteAccessGroup_NullAccessGroup() {
|
||||
assertThrows(CloudRuntimeException.class, () -> {
|
||||
strategy.deleteAccessGroup(null);
|
||||
});
|
||||
}
|
||||
|
||||
// Test deleteAccessGroup - Null PrimaryDataStoreInfo
|
||||
@Test
|
||||
public void testDeleteAccessGroup_NullPrimaryDataStoreInfo() {
|
||||
AccessGroup accessGroup = mock(AccessGroup.class);
|
||||
when(accessGroup.getStoragePoolId()).thenReturn(null);
|
||||
|
||||
assertThrows(CloudRuntimeException.class, () -> {
|
||||
strategy.deleteAccessGroup(accessGroup);
|
||||
});
|
||||
}
|
||||
|
||||
// Test deleteAccessGroup - Failed to Delete
|
||||
@Test
|
||||
public void testDeleteAccessGroup_Failed() {
|
||||
AccessGroup accessGroup = mock(AccessGroup.class);
|
||||
Map<String, String> details = new HashMap<>();
|
||||
details.put(OntapStorageConstants.EXPORT_POLICY_NAME, "export-policy-1");
|
||||
details.put(OntapStorageConstants.EXPORT_POLICY_ID, "1");
|
||||
|
||||
when(accessGroup.getStoragePoolId()).thenReturn(1L);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(details);
|
||||
doThrow(new RuntimeException("Failed to delete")).when(nasFeignClient)
|
||||
.deleteExportPolicyById(anyString(), anyString());
|
||||
|
||||
assertThrows(CloudRuntimeException.class, () -> {
|
||||
strategy.deleteAccessGroup(accessGroup);
|
||||
});
|
||||
}
|
||||
|
||||
// Test deleteCloudStackVolume - Success
|
||||
@Test
|
||||
public void testDeleteCloudStackVolume_Success() throws Exception {
|
||||
CloudStackVolume cloudStackVolume = mock(CloudStackVolume.class);
|
||||
VolumeInfo volumeInfo = mock(VolumeInfo.class);
|
||||
EndPoint endpoint = mock(EndPoint.class);
|
||||
Answer answer = mock(Answer.class);
|
||||
|
||||
when(cloudStackVolume.getVolumeInfo()).thenReturn(volumeInfo);
|
||||
when(epSelector.select(volumeInfo)).thenReturn(endpoint);
|
||||
when(endpoint.sendMessage(any())).thenReturn(answer);
|
||||
when(answer.getResult()).thenReturn(true);
|
||||
|
||||
// Execute - should not throw exception
|
||||
strategy.deleteCloudStackVolume(cloudStackVolume);
|
||||
|
||||
// Verify endpoint was selected and message sent
|
||||
verify(epSelector).select(volumeInfo);
|
||||
verify(endpoint).sendMessage(any());
|
||||
}
|
||||
|
||||
// Test deleteCloudStackVolume - Endpoint Not Found
|
||||
@Test
|
||||
public void testDeleteCloudStackVolume_EndpointNotFound() {
|
||||
CloudStackVolume cloudStackVolume = mock(CloudStackVolume.class);
|
||||
VolumeInfo volumeInfo = mock(VolumeInfo.class);
|
||||
|
||||
when(cloudStackVolume.getVolumeInfo()).thenReturn(volumeInfo);
|
||||
when(epSelector.select(volumeInfo)).thenReturn(null);
|
||||
|
||||
assertThrows(CloudRuntimeException.class, () -> {
|
||||
strategy.deleteCloudStackVolume(cloudStackVolume);
|
||||
});
|
||||
}
|
||||
|
||||
// Test deleteCloudStackVolume - Answer Result False
|
||||
@Test
|
||||
public void testDeleteCloudStackVolume_AnswerResultFalse() throws Exception {
|
||||
CloudStackVolume cloudStackVolume = mock(CloudStackVolume.class);
|
||||
VolumeInfo volumeInfo = mock(VolumeInfo.class);
|
||||
EndPoint endpoint = mock(EndPoint.class);
|
||||
Answer answer = mock(Answer.class);
|
||||
|
||||
when(cloudStackVolume.getVolumeInfo()).thenReturn(volumeInfo);
|
||||
when(epSelector.select(volumeInfo)).thenReturn(endpoint);
|
||||
when(endpoint.sendMessage(any())).thenReturn(answer);
|
||||
when(answer.getResult()).thenReturn(false);
|
||||
when(answer.getDetails()).thenReturn("Failed to delete volume file");
|
||||
|
||||
assertThrows(CloudRuntimeException.class, () -> {
|
||||
strategy.deleteCloudStackVolume(cloudStackVolume);
|
||||
});
|
||||
}
|
||||
|
||||
// Test deleteCloudStackVolume - Answer is Null
|
||||
@Test
|
||||
public void testDeleteCloudStackVolume_AnswerNull() throws Exception {
|
||||
CloudStackVolume cloudStackVolume = mock(CloudStackVolume.class);
|
||||
VolumeInfo volumeInfo = mock(VolumeInfo.class);
|
||||
EndPoint endpoint = mock(EndPoint.class);
|
||||
|
||||
when(cloudStackVolume.getVolumeInfo()).thenReturn(volumeInfo);
|
||||
when(epSelector.select(volumeInfo)).thenReturn(endpoint);
|
||||
when(endpoint.sendMessage(any())).thenReturn(null);
|
||||
|
||||
assertThrows(CloudRuntimeException.class, () -> {
|
||||
strategy.deleteCloudStackVolume(cloudStackVolume);
|
||||
});
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,908 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.cloudstack.storage.vmsnapshot;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
import static org.mockito.Mockito.doThrow;
|
||||
import static org.mockito.Mockito.lenient;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotOptions;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.to.VolumeObjectTO;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Spy;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
import org.mockito.junit.jupiter.MockitoSettings;
|
||||
import org.mockito.quality.Strictness;
|
||||
import org.apache.cloudstack.storage.utils.OntapStorageConstants;
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.api.FreezeThawVMAnswer;
|
||||
import com.cloud.agent.api.FreezeThawVMCommand;
|
||||
import com.cloud.agent.api.VMSnapshotTO;
|
||||
import com.cloud.exception.AgentUnavailableException;
|
||||
import com.cloud.exception.OperationTimedoutException;
|
||||
import com.cloud.hypervisor.Hypervisor;
|
||||
import com.cloud.storage.GuestOSVO;
|
||||
import com.cloud.storage.VolumeDetailVO;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.dao.GuestOSDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.storage.dao.VolumeDetailsDao;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.fsm.NoTransitionException;
|
||||
import com.cloud.vm.UserVmVO;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.dao.UserVmDao;
|
||||
import com.cloud.vm.snapshot.VMSnapshot;
|
||||
import com.cloud.vm.snapshot.VMSnapshotDetailsVO;
|
||||
import com.cloud.vm.snapshot.VMSnapshotVO;
|
||||
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
|
||||
import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao;
|
||||
|
||||
/**
|
||||
* Unit tests for {@link OntapVMSnapshotStrategy}.
|
||||
*
|
||||
* <p>Tests cover:
|
||||
* <ul>
|
||||
* <li>canHandle(VMSnapshot) — various conditions for Allocated and non-Allocated states</li>
|
||||
* <li>canHandle(Long vmId, Long rootPoolId, boolean snapshotMemory) — allocation-phase checks</li>
|
||||
* <li>takeVMSnapshot — state transition failure scenarios</li>
|
||||
* <li>Freeze/thaw behavior (freeze success/failure, thaw success/failure, agent errors)</li>
|
||||
* <li>Quiesce behavior (honors user input; freeze/thaw only when quiesce=true)</li>
|
||||
* </ul>
|
||||
*/
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
@MockitoSettings(strictness = Strictness.LENIENT)
|
||||
class OntapVMSnapshotStrategyTest {
|
||||
|
||||
private static final long VM_ID = 100L;
|
||||
private static final long HOST_ID = 10L;
|
||||
private static final long SNAPSHOT_ID = 200L;
|
||||
private static final long VOLUME_ID_1 = 301L;
|
||||
private static final long VOLUME_ID_2 = 302L;
|
||||
private static final long POOL_ID_1 = 401L;
|
||||
private static final long POOL_ID_2 = 402L;
|
||||
private static final long GUEST_OS_ID = 50L;
|
||||
private static final String VM_INSTANCE_NAME = "i-2-100-VM";
|
||||
private static final String VM_UUID = "vm-uuid-123";
|
||||
|
||||
@Spy
|
||||
@InjectMocks
|
||||
private OntapVMSnapshotStrategy strategy;
|
||||
|
||||
@Mock
|
||||
private UserVmDao userVmDao;
|
||||
@Mock
|
||||
private VolumeDao volumeDao;
|
||||
@Mock
|
||||
private PrimaryDataStoreDao storagePool;
|
||||
@Mock
|
||||
private StoragePoolDetailsDao storagePoolDetailsDao;
|
||||
@Mock
|
||||
private VMSnapshotDetailsDao vmSnapshotDetailsDao;
|
||||
@Mock
|
||||
private VMSnapshotHelper vmSnapshotHelper;
|
||||
@Mock
|
||||
private VMSnapshotDao vmSnapshotDao;
|
||||
@Mock
|
||||
private AgentManager agentMgr;
|
||||
@Mock
|
||||
private GuestOSDao guestOSDao;
|
||||
@Mock
|
||||
private VolumeDataFactory volumeDataFactory;
|
||||
@Mock
|
||||
private VolumeDetailsDao volumeDetailsDao;
|
||||
|
||||
@BeforeEach
|
||||
void setUp() {
|
||||
// @InjectMocks handles injection into inherited fields
|
||||
}
|
||||
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
// Helper: create common mocks
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
|
||||
private UserVmVO createMockUserVm(Hypervisor.HypervisorType hypervisorType, VirtualMachine.State state) {
|
||||
UserVmVO userVm = mock(UserVmVO.class);
|
||||
when(userVm.getHypervisorType()).thenReturn(hypervisorType);
|
||||
when(userVm.getState()).thenReturn(state);
|
||||
return userVm;
|
||||
}
|
||||
|
||||
private VolumeVO createMockVolume(long volumeId, long poolId) {
|
||||
VolumeVO volume = mock(VolumeVO.class);
|
||||
when(volume.getId()).thenReturn(volumeId);
|
||||
when(volume.getPoolId()).thenReturn(poolId);
|
||||
return volume;
|
||||
}
|
||||
|
||||
private StoragePoolVO createOntapManagedPool(long poolId) {
|
||||
StoragePoolVO pool = mock(StoragePoolVO.class);
|
||||
when(pool.isManaged()).thenReturn(true);
|
||||
when(pool.getStorageProviderName()).thenReturn(OntapStorageConstants.ONTAP_PLUGIN_NAME);
|
||||
return pool;
|
||||
}
|
||||
|
||||
private VMSnapshotVO createMockVmSnapshot(VMSnapshot.State state, VMSnapshot.Type type) {
|
||||
VMSnapshotVO vmSnapshot = mock(VMSnapshotVO.class);
|
||||
when(vmSnapshot.getId()).thenReturn(SNAPSHOT_ID);
|
||||
when(vmSnapshot.getVmId()).thenReturn(VM_ID);
|
||||
when(vmSnapshot.getState()).thenReturn(state);
|
||||
lenient().when(vmSnapshot.getType()).thenReturn(type);
|
||||
return vmSnapshot;
|
||||
}
|
||||
|
||||
private void setupAllVolumesOnOntap() {
|
||||
UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Running);
|
||||
when(userVmDao.findById(VM_ID)).thenReturn(userVm);
|
||||
|
||||
VolumeVO vol1 = createMockVolume(VOLUME_ID_1, POOL_ID_1);
|
||||
VolumeVO vol2 = createMockVolume(VOLUME_ID_2, POOL_ID_2);
|
||||
when(volumeDao.findByInstance(VM_ID)).thenReturn(Arrays.asList(vol1, vol2));
|
||||
|
||||
StoragePoolVO pool1 = createOntapManagedPool(POOL_ID_1);
|
||||
StoragePoolVO pool2 = createOntapManagedPool(POOL_ID_2);
|
||||
when(storagePool.findById(POOL_ID_1)).thenReturn(pool1);
|
||||
when(storagePool.findById(POOL_ID_2)).thenReturn(pool2);
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
// Tests: canHandle(VMSnapshot)
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
@Test
|
||||
void testCanHandle_AllocatedDiskType_AllVolumesOnOntap_ReturnsHighest() {
|
||||
setupAllVolumesOnOntap();
|
||||
VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk);
|
||||
|
||||
StrategyPriority result = strategy.canHandle(vmSnapshot);
|
||||
|
||||
assertEquals(StrategyPriority.HIGHEST, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHandle_AllocatedDiskAndMemoryType_ReturnsCantHandle() {
|
||||
VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.DiskAndMemory);
|
||||
when(vmSnapshot.getVmId()).thenReturn(VM_ID);
|
||||
|
||||
StrategyPriority result = strategy.canHandle(vmSnapshot);
|
||||
|
||||
assertEquals(StrategyPriority.CANT_HANDLE, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHandle_AllocatedDiskType_VmNotFound_ReturnsCantHandle() {
|
||||
when(userVmDao.findById(VM_ID)).thenReturn(null);
|
||||
VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk);
|
||||
|
||||
StrategyPriority result = strategy.canHandle(vmSnapshot);
|
||||
|
||||
assertEquals(StrategyPriority.CANT_HANDLE, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHandle_AllocatedDiskType_VmxenHypervisor_ReturnsCantHandle() {
|
||||
UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.XenServer, VirtualMachine.State.Running);
|
||||
when(userVmDao.findById(VM_ID)).thenReturn(userVm);
|
||||
VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk);
|
||||
|
||||
StrategyPriority result = strategy.canHandle(vmSnapshot);
|
||||
|
||||
assertEquals(StrategyPriority.CANT_HANDLE, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHandle_AllocatedDiskType_VmNotRunning_ReturnsCantHandle() {
|
||||
UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Stopped);
|
||||
when(userVmDao.findById(VM_ID)).thenReturn(userVm);
|
||||
VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk);
|
||||
|
||||
StrategyPriority result = strategy.canHandle(vmSnapshot);
|
||||
|
||||
assertEquals(StrategyPriority.CANT_HANDLE, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHandle_AllocatedDiskType_NoVolumes_ReturnsCantHandle() {
|
||||
UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Running);
|
||||
when(userVmDao.findById(VM_ID)).thenReturn(userVm);
|
||||
when(volumeDao.findByInstance(VM_ID)).thenReturn(Collections.emptyList());
|
||||
VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk);
|
||||
|
||||
StrategyPriority result = strategy.canHandle(vmSnapshot);
|
||||
|
||||
assertEquals(StrategyPriority.CANT_HANDLE, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHandle_AllocatedDiskType_VolumeOnNonManagedPool_ReturnsCantHandle() {
|
||||
UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Running);
|
||||
when(userVmDao.findById(VM_ID)).thenReturn(userVm);
|
||||
|
||||
VolumeVO vol = createMockVolume(VOLUME_ID_1, POOL_ID_1);
|
||||
when(volumeDao.findByInstance(VM_ID)).thenReturn(Collections.singletonList(vol));
|
||||
|
||||
StoragePoolVO pool = mock(StoragePoolVO.class);
|
||||
when(pool.isManaged()).thenReturn(false);
|
||||
when(pool.getName()).thenReturn("non-managed-pool");
|
||||
when(storagePool.findById(POOL_ID_1)).thenReturn(pool);
|
||||
|
||||
VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk);
|
||||
|
||||
StrategyPriority result = strategy.canHandle(vmSnapshot);
|
||||
|
||||
assertEquals(StrategyPriority.CANT_HANDLE, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHandle_AllocatedDiskType_VolumeOnNonOntapManagedPool_ReturnsCantHandle() {
|
||||
UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Running);
|
||||
when(userVmDao.findById(VM_ID)).thenReturn(userVm);
|
||||
|
||||
VolumeVO vol = createMockVolume(VOLUME_ID_1, POOL_ID_1);
|
||||
when(volumeDao.findByInstance(VM_ID)).thenReturn(Collections.singletonList(vol));
|
||||
|
||||
StoragePoolVO pool = mock(StoragePoolVO.class);
|
||||
when(pool.isManaged()).thenReturn(true);
|
||||
when(pool.getStorageProviderName()).thenReturn("SolidFire");
|
||||
when(pool.getName()).thenReturn("solidfire-pool");
|
||||
when(storagePool.findById(POOL_ID_1)).thenReturn(pool);
|
||||
|
||||
VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk);
|
||||
|
||||
StrategyPriority result = strategy.canHandle(vmSnapshot);
|
||||
|
||||
assertEquals(StrategyPriority.CANT_HANDLE, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHandle_AllocatedDiskType_VolumeWithNullPoolId_ReturnsCantHandle() {
|
||||
UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Running);
|
||||
when(userVmDao.findById(VM_ID)).thenReturn(userVm);
|
||||
|
||||
VolumeVO vol = mock(VolumeVO.class);
|
||||
when(vol.getPoolId()).thenReturn(null);
|
||||
when(volumeDao.findByInstance(VM_ID)).thenReturn(Collections.singletonList(vol));
|
||||
|
||||
VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk);
|
||||
|
||||
StrategyPriority result = strategy.canHandle(vmSnapshot);
|
||||
|
||||
assertEquals(StrategyPriority.CANT_HANDLE, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHandle_AllocatedDiskType_PoolNotFound_ReturnsCantHandle() {
|
||||
UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Running);
|
||||
when(userVmDao.findById(VM_ID)).thenReturn(userVm);
|
||||
|
||||
VolumeVO vol = createMockVolume(VOLUME_ID_1, POOL_ID_1);
|
||||
when(volumeDao.findByInstance(VM_ID)).thenReturn(Collections.singletonList(vol));
|
||||
when(storagePool.findById(POOL_ID_1)).thenReturn(null);
|
||||
|
||||
VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk);
|
||||
|
||||
StrategyPriority result = strategy.canHandle(vmSnapshot);
|
||||
|
||||
assertEquals(StrategyPriority.CANT_HANDLE, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHandle_NonAllocated_HasFlexVolSnapshotDetails_AllOnOntap_ReturnsHighest() {
|
||||
setupAllVolumesOnOntap();
|
||||
VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Ready, VMSnapshot.Type.Disk);
|
||||
|
||||
List<VMSnapshotDetailsVO> details = new ArrayList<>();
|
||||
details.add(new VMSnapshotDetailsVO(SNAPSHOT_ID, OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT,
|
||||
"flex-uuid::snap-uuid::vmsnap_200_123::401", true));
|
||||
when(vmSnapshotDetailsDao.findDetails(SNAPSHOT_ID, OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT)).thenReturn(details);
|
||||
|
||||
StrategyPriority result = strategy.canHandle(vmSnapshot);
|
||||
|
||||
assertEquals(StrategyPriority.HIGHEST, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHandle_NonAllocated_HasLegacyStorageSnapshotDetails_AllOnOntap_ReturnsHighest() {
|
||||
setupAllVolumesOnOntap();
|
||||
VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Ready, VMSnapshot.Type.Disk);
|
||||
|
||||
// No FlexVol details
|
||||
when(vmSnapshotDetailsDao.findDetails(SNAPSHOT_ID, OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT)).thenReturn(Collections.emptyList());
|
||||
// Has legacy details
|
||||
List<VMSnapshotDetailsVO> details = new ArrayList<>();
|
||||
details.add(new VMSnapshotDetailsVO(SNAPSHOT_ID, "kvmStorageSnapshot", "123", true));
|
||||
when(vmSnapshotDetailsDao.findDetails(SNAPSHOT_ID, "kvmStorageSnapshot")).thenReturn(details);
|
||||
|
||||
StrategyPriority result = strategy.canHandle(vmSnapshot);
|
||||
|
||||
assertEquals(StrategyPriority.HIGHEST, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHandle_NonAllocated_NoDetails_ReturnsCantHandle() {
|
||||
VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Ready, VMSnapshot.Type.Disk);
|
||||
when(vmSnapshotDetailsDao.findDetails(SNAPSHOT_ID, OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT)).thenReturn(Collections.emptyList());
|
||||
when(vmSnapshotDetailsDao.findDetails(SNAPSHOT_ID, "kvmStorageSnapshot")).thenReturn(Collections.emptyList());
|
||||
|
||||
StrategyPriority result = strategy.canHandle(vmSnapshot);
|
||||
|
||||
assertEquals(StrategyPriority.CANT_HANDLE, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHandle_NonAllocated_HasFlexVolDetails_NotOnOntap_ReturnsCantHandle() {
|
||||
// VM has FlexVol details but volumes are now on non-ONTAP storage
|
||||
UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Running);
|
||||
when(userVmDao.findById(VM_ID)).thenReturn(userVm);
|
||||
|
||||
VolumeVO vol = createMockVolume(VOLUME_ID_1, POOL_ID_1);
|
||||
when(volumeDao.findByInstance(VM_ID)).thenReturn(Collections.singletonList(vol));
|
||||
|
||||
StoragePoolVO pool = mock(StoragePoolVO.class);
|
||||
when(pool.isManaged()).thenReturn(false);
|
||||
when(pool.getName()).thenReturn("other-pool");
|
||||
when(storagePool.findById(POOL_ID_1)).thenReturn(pool);
|
||||
|
||||
VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Ready, VMSnapshot.Type.Disk);
|
||||
List<VMSnapshotDetailsVO> flexVolDetails = new ArrayList<>();
|
||||
flexVolDetails.add(new VMSnapshotDetailsVO(SNAPSHOT_ID, OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT,
|
||||
"flex-uuid::snap-uuid::vmsnap_200_123::401", true));
|
||||
when(vmSnapshotDetailsDao.findDetails(SNAPSHOT_ID, OntapStorageConstants.ONTAP_FLEXVOL_SNAPSHOT)).thenReturn(flexVolDetails);
|
||||
|
||||
StrategyPriority result = strategy.canHandle(vmSnapshot);
|
||||
|
||||
assertEquals(StrategyPriority.CANT_HANDLE, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHandle_MixedPools_OneOntapOneNot_ReturnsCantHandle() {
|
||||
UserVmVO userVm = createMockUserVm(Hypervisor.HypervisorType.KVM, VirtualMachine.State.Running);
|
||||
when(userVmDao.findById(VM_ID)).thenReturn(userVm);
|
||||
|
||||
VolumeVO vol1 = createMockVolume(VOLUME_ID_1, POOL_ID_1);
|
||||
VolumeVO vol2 = createMockVolume(VOLUME_ID_2, POOL_ID_2);
|
||||
when(volumeDao.findByInstance(VM_ID)).thenReturn(Arrays.asList(vol1, vol2));
|
||||
|
||||
StoragePoolVO ontapPool = createOntapManagedPool(POOL_ID_1);
|
||||
StoragePoolVO otherPool = mock(StoragePoolVO.class);
|
||||
when(otherPool.isManaged()).thenReturn(true);
|
||||
when(otherPool.getStorageProviderName()).thenReturn("SolidFire");
|
||||
when(otherPool.getName()).thenReturn("sf-pool");
|
||||
when(storagePool.findById(POOL_ID_1)).thenReturn(ontapPool);
|
||||
when(storagePool.findById(POOL_ID_2)).thenReturn(otherPool);
|
||||
|
||||
VMSnapshotVO vmSnapshot = createMockVmSnapshot(VMSnapshot.State.Allocated, VMSnapshot.Type.Disk);
|
||||
|
||||
StrategyPriority result = strategy.canHandle(vmSnapshot);
|
||||
|
||||
assertEquals(StrategyPriority.CANT_HANDLE, result);
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
// Tests: canHandle(Long vmId, Long rootPoolId, boolean snapshotMemory)
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
@Test
|
||||
void testCanHandleByVmId_MemorySnapshot_ReturnsCantHandle() {
|
||||
StrategyPriority result = strategy.canHandle(VM_ID, POOL_ID_1, true);
|
||||
|
||||
assertEquals(StrategyPriority.CANT_HANDLE, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHandleByVmId_DiskOnly_AllOnOntap_ReturnsHighest() {
|
||||
setupAllVolumesOnOntap();
|
||||
|
||||
StrategyPriority result = strategy.canHandle(VM_ID, POOL_ID_1, false);
|
||||
|
||||
assertEquals(StrategyPriority.HIGHEST, result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanHandleByVmId_DiskOnly_NotOnOntap_ReturnsCantHandle() {
|
||||
when(userVmDao.findById(VM_ID)).thenReturn(null);
|
||||
|
||||
StrategyPriority result = strategy.canHandle(VM_ID, POOL_ID_1, false);
|
||||
|
||||
assertEquals(StrategyPriority.CANT_HANDLE, result);
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
// Tests: groupVolumesByFlexVol
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
@Test
|
||||
void testGroupVolumesByFlexVol_SingleFlexVol_TwoVolumes() {
|
||||
VolumeObjectTO volumeTO1 = mock(VolumeObjectTO.class);
|
||||
when(volumeTO1.getId()).thenReturn(VOLUME_ID_1);
|
||||
VolumeObjectTO volumeTO2 = mock(VolumeObjectTO.class);
|
||||
when(volumeTO2.getId()).thenReturn(VOLUME_ID_2);
|
||||
|
||||
VolumeVO vol1 = mock(VolumeVO.class);
|
||||
when(vol1.getId()).thenReturn(VOLUME_ID_1);
|
||||
when(vol1.getPoolId()).thenReturn(POOL_ID_1);
|
||||
VolumeVO vol2 = mock(VolumeVO.class);
|
||||
when(vol2.getId()).thenReturn(VOLUME_ID_2);
|
||||
when(vol2.getPoolId()).thenReturn(POOL_ID_1); // same pool → same FlexVol
|
||||
when(volumeDao.findById(VOLUME_ID_1)).thenReturn(vol1);
|
||||
when(volumeDao.findById(VOLUME_ID_2)).thenReturn(vol2);
|
||||
|
||||
Map<String, String> poolDetails = new HashMap<>();
|
||||
poolDetails.put(OntapStorageConstants.VOLUME_UUID, "flexvol-uuid-1");
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(POOL_ID_1)).thenReturn(poolDetails);
|
||||
|
||||
Map<String, OntapVMSnapshotStrategy.FlexVolGroupInfo> groups =
|
||||
strategy.groupVolumesByFlexVol(Arrays.asList(volumeTO1, volumeTO2));
|
||||
|
||||
assertEquals(1, groups.size());
|
||||
assertEquals(2, groups.get("flexvol-uuid-1").volumeIds.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
void testGroupVolumesByFlexVol_TwoFlexVols() {
|
||||
VolumeObjectTO volumeTO1 = mock(VolumeObjectTO.class);
|
||||
when(volumeTO1.getId()).thenReturn(VOLUME_ID_1);
|
||||
VolumeObjectTO volumeTO2 = mock(VolumeObjectTO.class);
|
||||
when(volumeTO2.getId()).thenReturn(VOLUME_ID_2);
|
||||
|
||||
VolumeVO vol1 = mock(VolumeVO.class);
|
||||
when(vol1.getId()).thenReturn(VOLUME_ID_1);
|
||||
when(vol1.getPoolId()).thenReturn(POOL_ID_1);
|
||||
VolumeVO vol2 = mock(VolumeVO.class);
|
||||
when(vol2.getId()).thenReturn(VOLUME_ID_2);
|
||||
when(vol2.getPoolId()).thenReturn(POOL_ID_2); // different pool → different FlexVol
|
||||
when(volumeDao.findById(VOLUME_ID_1)).thenReturn(vol1);
|
||||
when(volumeDao.findById(VOLUME_ID_2)).thenReturn(vol2);
|
||||
|
||||
Map<String, String> poolDetails1 = new HashMap<>();
|
||||
poolDetails1.put(OntapStorageConstants.VOLUME_UUID, "flexvol-uuid-1");
|
||||
Map<String, String> poolDetails2 = new HashMap<>();
|
||||
poolDetails2.put(OntapStorageConstants.VOLUME_UUID, "flexvol-uuid-2");
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(POOL_ID_1)).thenReturn(poolDetails1);
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(POOL_ID_2)).thenReturn(poolDetails2);
|
||||
|
||||
Map<String, OntapVMSnapshotStrategy.FlexVolGroupInfo> groups =
|
||||
strategy.groupVolumesByFlexVol(Arrays.asList(volumeTO1, volumeTO2));
|
||||
|
||||
assertEquals(2, groups.size());
|
||||
assertEquals(1, groups.get("flexvol-uuid-1").volumeIds.size());
|
||||
assertEquals(1, groups.get("flexvol-uuid-2").volumeIds.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
void testGroupVolumesByFlexVol_MissingFlexVolUuid_ThrowsException() {
|
||||
VolumeObjectTO volumeTO1 = mock(VolumeObjectTO.class);
|
||||
when(volumeTO1.getId()).thenReturn(VOLUME_ID_1);
|
||||
|
||||
VolumeVO vol1 = mock(VolumeVO.class);
|
||||
when(vol1.getId()).thenReturn(VOLUME_ID_1);
|
||||
when(vol1.getPoolId()).thenReturn(POOL_ID_1);
|
||||
when(volumeDao.findById(VOLUME_ID_1)).thenReturn(vol1);
|
||||
|
||||
Map<String, String> poolDetails = new HashMap<>();
|
||||
// No VOLUME_UUID key
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(POOL_ID_1)).thenReturn(poolDetails);
|
||||
|
||||
assertThrows(CloudRuntimeException.class,
|
||||
() -> strategy.groupVolumesByFlexVol(Collections.singletonList(volumeTO1)));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testGroupVolumesByFlexVol_VolumeNotFound_ThrowsException() {
|
||||
VolumeObjectTO volumeTO1 = mock(VolumeObjectTO.class);
|
||||
when(volumeTO1.getId()).thenReturn(VOLUME_ID_1);
|
||||
when(volumeDao.findById(VOLUME_ID_1)).thenReturn(null);
|
||||
|
||||
assertThrows(CloudRuntimeException.class,
|
||||
() -> strategy.groupVolumesByFlexVol(Collections.singletonList(volumeTO1)));
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
// Tests: FlexVolSnapshotDetail parse/toString
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
@Test
|
||||
void testFlexVolSnapshotDetail_ParseAndToString_NewFormat() {
|
||||
String value = "flexvol-uuid-1::snap-uuid-1::vmsnap_200_1234567890::root-disk.qcow2::401::NFS3";
|
||||
OntapVMSnapshotStrategy.FlexVolSnapshotDetail detail =
|
||||
OntapVMSnapshotStrategy.FlexVolSnapshotDetail.parse(value);
|
||||
|
||||
assertEquals("flexvol-uuid-1", detail.flexVolUuid);
|
||||
assertEquals("snap-uuid-1", detail.snapshotUuid);
|
||||
assertEquals("vmsnap_200_1234567890", detail.snapshotName);
|
||||
assertEquals("root-disk.qcow2", detail.volumePath);
|
||||
assertEquals(401L, detail.poolId);
|
||||
assertEquals("NFS3", detail.protocol);
|
||||
assertEquals(value, detail.toString());
|
||||
}
|
||||
|
||||
@Test
|
||||
void testFlexVolSnapshotDetail_ParseLegacy4FieldFormat() {
|
||||
// Legacy format without volumePath and protocol
|
||||
String value = "flexvol-uuid-1::snap-uuid-1::vmsnap_200_1234567890::401";
|
||||
OntapVMSnapshotStrategy.FlexVolSnapshotDetail detail =
|
||||
OntapVMSnapshotStrategy.FlexVolSnapshotDetail.parse(value);
|
||||
|
||||
assertEquals("flexvol-uuid-1", detail.flexVolUuid);
|
||||
assertEquals("snap-uuid-1", detail.snapshotUuid);
|
||||
assertEquals("vmsnap_200_1234567890", detail.snapshotName);
|
||||
assertEquals(null, detail.volumePath);
|
||||
assertEquals(401L, detail.poolId);
|
||||
assertEquals(null, detail.protocol);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testFlexVolSnapshotDetail_ParseInvalidFormat_ThrowsException() {
|
||||
assertThrows(CloudRuntimeException.class,
|
||||
() -> OntapVMSnapshotStrategy.FlexVolSnapshotDetail.parse("invalid-format"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testFlexVolSnapshotDetail_ParseTooFewParts_ThrowsException() {
|
||||
assertThrows(CloudRuntimeException.class,
|
||||
() -> OntapVMSnapshotStrategy.FlexVolSnapshotDetail.parse("a::b::c"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testFlexVolSnapshotDetail_Parse5Parts_ThrowsException() {
|
||||
// 5 parts is neither legacy (4) nor current (6) format
|
||||
assertThrows(CloudRuntimeException.class,
|
||||
() -> OntapVMSnapshotStrategy.FlexVolSnapshotDetail.parse("a::b::c::d::e"));
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
// Tests: buildSnapshotName
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
@Test
|
||||
void testBuildSnapshotName_Format() {
|
||||
VMSnapshotVO vmSnapshot = mock(VMSnapshotVO.class);
|
||||
when(vmSnapshot.getId()).thenReturn(SNAPSHOT_ID);
|
||||
|
||||
String name = strategy.buildSnapshotName(vmSnapshot);
|
||||
|
||||
assertEquals(true, name.startsWith("vmsnap_200_"));
|
||||
assertEquals(true, name.length() <= OntapStorageConstants.MAX_SNAPSHOT_NAME_LENGTH);
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
// Tests: resolveVolumePathOnOntap
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
@Test
|
||||
void testResolveVolumePathOnOntap_NFS_ReturnsVolumePath() {
|
||||
VolumeVO vol = mock(VolumeVO.class);
|
||||
when(vol.getPath()).thenReturn("abc123-def456.qcow2");
|
||||
when(volumeDao.findById(VOLUME_ID_1)).thenReturn(vol);
|
||||
|
||||
String path = strategy.resolveVolumePathOnOntap(VOLUME_ID_1, "NFS3", new HashMap<>());
|
||||
|
||||
assertEquals("abc123-def456.qcow2", path);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testResolveVolumePathOnOntap_ISCSI_ReturnsLunName() {
|
||||
VolumeDetailVO lunDetail = mock(VolumeDetailVO.class);
|
||||
when(lunDetail.getValue()).thenReturn("/vol/vol1/lun_301");
|
||||
when(volumeDetailsDao.findDetail(VOLUME_ID_1, OntapStorageConstants.LUN_DOT_NAME)).thenReturn(lunDetail);
|
||||
|
||||
String path = strategy.resolveVolumePathOnOntap(VOLUME_ID_1, "ISCSI", new HashMap<>());
|
||||
|
||||
assertEquals("/vol/vol1/lun_301", path);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testResolveVolumePathOnOntap_ISCSI_NoLunDetail_ThrowsException() {
|
||||
when(volumeDetailsDao.findDetail(VOLUME_ID_1, OntapStorageConstants.LUN_DOT_NAME)).thenReturn(null);
|
||||
|
||||
assertThrows(CloudRuntimeException.class,
|
||||
() -> strategy.resolveVolumePathOnOntap(VOLUME_ID_1, "ISCSI", new HashMap<>()));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testResolveVolumePathOnOntap_NFS_VolumeNotFound_ThrowsException() {
|
||||
when(volumeDao.findById(VOLUME_ID_1)).thenReturn(null);
|
||||
|
||||
assertThrows(CloudRuntimeException.class,
|
||||
() -> strategy.resolveVolumePathOnOntap(VOLUME_ID_1, "NFS3", new HashMap<>()));
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
// Tests: takeVMSnapshot — State transitions & Freeze/Thaw
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
@Test
|
||||
void testTakeVMSnapshot_StateTransitionFails_ThrowsCloudRuntimeException() throws Exception {
|
||||
VMSnapshotVO vmSnapshot = createTakeSnapshotVmSnapshot();
|
||||
when(vmSnapshotHelper.pickRunningHost(VM_ID)).thenReturn(HOST_ID);
|
||||
UserVmVO userVm = mock(UserVmVO.class);
|
||||
when(userVmDao.findById(VM_ID)).thenReturn(userVm);
|
||||
|
||||
// State transition fails
|
||||
doThrow(new NoTransitionException("Cannot transition")).when(vmSnapshotHelper)
|
||||
.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.CreateRequested);
|
||||
|
||||
assertThrows(CloudRuntimeException.class, () -> strategy.takeVMSnapshot(vmSnapshot));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testTakeVMSnapshot_FreezeFailure_ThrowsException() throws Exception {
|
||||
VMSnapshotVO vmSnapshot = createTakeSnapshotVmSnapshot();
|
||||
setupTakeSnapshotCommon(vmSnapshot);
|
||||
setupSingleVolumeForTakeSnapshot();
|
||||
|
||||
// Freeze failure
|
||||
FreezeThawVMAnswer freezeAnswer = mock(FreezeThawVMAnswer.class);
|
||||
when(freezeAnswer.getResult()).thenReturn(false);
|
||||
when(freezeAnswer.getDetails()).thenReturn("qemu-guest-agent not responding");
|
||||
when(agentMgr.send(eq(HOST_ID), any(FreezeThawVMCommand.class))).thenReturn(freezeAnswer);
|
||||
|
||||
// Cleanup mocks for finally block
|
||||
when(vmSnapshotDetailsDao.listDetails(SNAPSHOT_ID)).thenReturn(Collections.emptyList());
|
||||
doReturn(true).when(vmSnapshotHelper).vmSnapshotStateTransitTo(any(), eq(VMSnapshot.Event.OperationFailed));
|
||||
|
||||
CloudRuntimeException ex = assertThrows(CloudRuntimeException.class,
|
||||
() -> strategy.takeVMSnapshot(vmSnapshot));
|
||||
|
||||
assertEquals(true, ex.getMessage().contains("Could not freeze VM"));
|
||||
assertEquals(true, ex.getMessage().contains("qemu-guest-agent"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testTakeVMSnapshot_FreezeReturnsNull_ThrowsException() throws Exception {
|
||||
VMSnapshotVO vmSnapshot = createTakeSnapshotVmSnapshot();
|
||||
setupTakeSnapshotCommon(vmSnapshot);
|
||||
setupSingleVolumeForTakeSnapshot();
|
||||
|
||||
// Freeze returns null
|
||||
when(agentMgr.send(eq(HOST_ID), any(FreezeThawVMCommand.class))).thenReturn(null);
|
||||
|
||||
when(vmSnapshotDetailsDao.listDetails(SNAPSHOT_ID)).thenReturn(Collections.emptyList());
|
||||
doReturn(true).when(vmSnapshotHelper).vmSnapshotStateTransitTo(any(), eq(VMSnapshot.Event.OperationFailed));
|
||||
|
||||
assertThrows(CloudRuntimeException.class, () -> strategy.takeVMSnapshot(vmSnapshot));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testTakeVMSnapshot_AgentUnavailable_ThrowsCloudRuntimeException() throws Exception {
|
||||
VMSnapshotVO vmSnapshot = createTakeSnapshotVmSnapshot();
|
||||
setupTakeSnapshotCommon(vmSnapshot);
|
||||
setupSingleVolumeForTakeSnapshot();
|
||||
|
||||
when(agentMgr.send(eq(HOST_ID), any(FreezeThawVMCommand.class)))
|
||||
.thenThrow(new AgentUnavailableException(HOST_ID));
|
||||
|
||||
when(vmSnapshotDetailsDao.listDetails(SNAPSHOT_ID)).thenReturn(Collections.emptyList());
|
||||
doReturn(true).when(vmSnapshotHelper).vmSnapshotStateTransitTo(any(), eq(VMSnapshot.Event.OperationFailed));
|
||||
|
||||
CloudRuntimeException ex = assertThrows(CloudRuntimeException.class,
|
||||
() -> strategy.takeVMSnapshot(vmSnapshot));
|
||||
assertEquals(true, ex.getMessage().contains("failed"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void testTakeVMSnapshot_OperationTimeout_ThrowsCloudRuntimeException() throws Exception {
|
||||
VMSnapshotVO vmSnapshot = createTakeSnapshotVmSnapshot();
|
||||
setupTakeSnapshotCommon(vmSnapshot);
|
||||
setupSingleVolumeForTakeSnapshot();
|
||||
|
||||
when(agentMgr.send(eq(HOST_ID), any(FreezeThawVMCommand.class)))
|
||||
.thenThrow(new OperationTimedoutException(null, 0, 0, 0, false));
|
||||
|
||||
when(vmSnapshotDetailsDao.listDetails(SNAPSHOT_ID)).thenReturn(Collections.emptyList());
|
||||
doReturn(true).when(vmSnapshotHelper).vmSnapshotStateTransitTo(any(), eq(VMSnapshot.Event.OperationFailed));
|
||||
|
||||
CloudRuntimeException ex = assertThrows(CloudRuntimeException.class,
|
||||
() -> strategy.takeVMSnapshot(vmSnapshot));
|
||||
assertEquals(true, ex.getMessage().contains("timed out"));
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
// Tests: Quiesce Behavior
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
@Test
|
||||
void testTakeVMSnapshot_QuiesceFalse_SkipsFreezeThaw() throws Exception {
|
||||
VMSnapshotVO vmSnapshot = createTakeSnapshotVmSnapshot();
|
||||
// Explicitly set quiesce to false
|
||||
VMSnapshotOptions options = mock(VMSnapshotOptions.class);
|
||||
when(options.needQuiesceVM()).thenReturn(false);
|
||||
when(vmSnapshot.getOptions()).thenReturn(options);
|
||||
|
||||
setupTakeSnapshotCommon(vmSnapshot);
|
||||
setupSingleVolumeForTakeSnapshot();
|
||||
|
||||
// The FlexVolume snapshot flow will try to call Utility.getStrategyByStoragePoolDetails
|
||||
// which is a static method that makes real connections. We expect this to fail in unit tests.
|
||||
// The important thing is that freeze/thaw was NOT called before the failure.
|
||||
when(vmSnapshotDetailsDao.listDetails(SNAPSHOT_ID)).thenReturn(Collections.emptyList());
|
||||
doReturn(true).when(vmSnapshotHelper).vmSnapshotStateTransitTo(any(), eq(VMSnapshot.Event.OperationFailed));
|
||||
|
||||
// Since Utility.getStrategyByStoragePoolDetails is static and creates real Feign clients,
|
||||
// this will fail. We just verify that freeze was never called.
|
||||
try {
|
||||
strategy.takeVMSnapshot(vmSnapshot);
|
||||
} catch (Exception e) {
|
||||
// Expected — static utility can't be mocked in unit test
|
||||
}
|
||||
|
||||
// No freeze/thaw commands should be sent when quiesce is false
|
||||
verify(agentMgr, never()).send(eq(HOST_ID), any(FreezeThawVMCommand.class));
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
// Tests: Parent snapshot chain
|
||||
// ══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
@Test
|
||||
void testTakeVMSnapshot_WithParentSnapshot_SetsParentId() throws Exception {
|
||||
VMSnapshotVO vmSnapshot = createTakeSnapshotVmSnapshot();
|
||||
setupTakeSnapshotCommon(vmSnapshot);
|
||||
setupSingleVolumeForTakeSnapshot();
|
||||
|
||||
// Has a current (parent) snapshot
|
||||
VMSnapshotVO currentSnapshot = mock(VMSnapshotVO.class);
|
||||
when(vmSnapshotDao.findCurrentSnapshotByVmId(VM_ID)).thenReturn(currentSnapshot);
|
||||
VMSnapshotTO parentTO = mock(VMSnapshotTO.class);
|
||||
when(parentTO.getId()).thenReturn(199L);
|
||||
when(vmSnapshotHelper.getSnapshotWithParents(currentSnapshot)).thenReturn(parentTO);
|
||||
|
||||
// Freeze success (since quiesce=true by default)
|
||||
FreezeThawVMAnswer freezeAnswer = mock(FreezeThawVMAnswer.class);
|
||||
when(freezeAnswer.getResult()).thenReturn(true);
|
||||
FreezeThawVMAnswer thawAnswer = mock(FreezeThawVMAnswer.class);
|
||||
when(thawAnswer.getResult()).thenReturn(true);
|
||||
when(agentMgr.send(eq(HOST_ID), any(FreezeThawVMCommand.class)))
|
||||
.thenReturn(freezeAnswer)
|
||||
.thenReturn(thawAnswer);
|
||||
|
||||
when(vmSnapshotDetailsDao.listDetails(SNAPSHOT_ID)).thenReturn(Collections.emptyList());
|
||||
doReturn(true).when(vmSnapshotHelper).vmSnapshotStateTransitTo(any(), eq(VMSnapshot.Event.OperationFailed));
|
||||
|
||||
// FlexVol snapshot flow will fail on static method, but parent should already be set
|
||||
try {
|
||||
strategy.takeVMSnapshot(vmSnapshot);
|
||||
} catch (Exception e) {
|
||||
// Expected
|
||||
}
|
||||
|
||||
// Verify parent was set on the VM snapshot before the FlexVol snapshot attempt
|
||||
verify(vmSnapshot).setParent(199L);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testTakeVMSnapshot_WithNoParentSnapshot_SetsParentNull() throws Exception {
|
||||
VMSnapshotVO vmSnapshot = createTakeSnapshotVmSnapshot();
|
||||
setupTakeSnapshotCommon(vmSnapshot);
|
||||
setupSingleVolumeForTakeSnapshot();
|
||||
|
||||
when(vmSnapshotDao.findCurrentSnapshotByVmId(VM_ID)).thenReturn(null);
|
||||
|
||||
FreezeThawVMAnswer freezeAnswer = mock(FreezeThawVMAnswer.class);
|
||||
when(freezeAnswer.getResult()).thenReturn(true);
|
||||
FreezeThawVMAnswer thawAnswer = mock(FreezeThawVMAnswer.class);
|
||||
when(thawAnswer.getResult()).thenReturn(true);
|
||||
when(agentMgr.send(eq(HOST_ID), any(FreezeThawVMCommand.class)))
|
||||
.thenReturn(freezeAnswer)
|
||||
.thenReturn(thawAnswer);
|
||||
|
||||
when(vmSnapshotDetailsDao.listDetails(SNAPSHOT_ID)).thenReturn(Collections.emptyList());
|
||||
doReturn(true).when(vmSnapshotHelper).vmSnapshotStateTransitTo(any(), eq(VMSnapshot.Event.OperationFailed));
|
||||
|
||||
try {
|
||||
strategy.takeVMSnapshot(vmSnapshot);
|
||||
} catch (Exception e) {
|
||||
// Expected
|
||||
}
|
||||
|
||||
verify(vmSnapshot).setParent(null);
|
||||
}
|
||||
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
// Helper: Set up common mocks for takeVMSnapshot tests
|
||||
// ──────────────────────────────────────────────────────────────────────────
|
||||
|
||||
private VMSnapshotVO createTakeSnapshotVmSnapshot() {
|
||||
VMSnapshotVO vmSnapshot = mock(VMSnapshotVO.class);
|
||||
when(vmSnapshot.getId()).thenReturn(SNAPSHOT_ID);
|
||||
when(vmSnapshot.getVmId()).thenReturn(VM_ID);
|
||||
lenient().when(vmSnapshot.getName()).thenReturn("vm-snap-1");
|
||||
lenient().when(vmSnapshot.getType()).thenReturn(VMSnapshot.Type.Disk);
|
||||
lenient().when(vmSnapshot.getDescription()).thenReturn("Test ONTAP VM Snapshot");
|
||||
lenient().when(vmSnapshot.getOptions()).thenReturn(new VMSnapshotOptions(true));
|
||||
return vmSnapshot;
|
||||
}
|
||||
|
||||
private UserVmVO setupTakeSnapshotCommon(VMSnapshotVO vmSnapshot) throws Exception {
|
||||
when(vmSnapshotHelper.pickRunningHost(VM_ID)).thenReturn(HOST_ID);
|
||||
|
||||
UserVmVO userVm = mock(UserVmVO.class);
|
||||
when(userVm.getId()).thenReturn(VM_ID);
|
||||
when(userVm.getGuestOSId()).thenReturn(GUEST_OS_ID);
|
||||
when(userVm.getInstanceName()).thenReturn(VM_INSTANCE_NAME);
|
||||
when(userVm.getUuid()).thenReturn(VM_UUID);
|
||||
when(userVm.getState()).thenReturn(VirtualMachine.State.Running);
|
||||
when(userVmDao.findById(VM_ID)).thenReturn(userVm);
|
||||
|
||||
GuestOSVO guestOS = mock(GuestOSVO.class);
|
||||
when(guestOS.getDisplayName()).thenReturn("CentOS 8");
|
||||
when(guestOSDao.findById(GUEST_OS_ID)).thenReturn(guestOS);
|
||||
|
||||
when(vmSnapshotDao.findCurrentSnapshotByVmId(VM_ID)).thenReturn(null);
|
||||
|
||||
doReturn(true).when(vmSnapshotHelper).vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.CreateRequested);
|
||||
|
||||
return userVm;
|
||||
}
|
||||
|
||||
private void setupSingleVolumeForTakeSnapshot() {
|
||||
VolumeObjectTO volumeTO = mock(VolumeObjectTO.class);
|
||||
when(volumeTO.getId()).thenReturn(VOLUME_ID_1);
|
||||
when(volumeTO.getSize()).thenReturn(10737418240L);
|
||||
List<VolumeObjectTO> volumeTOs = Collections.singletonList(volumeTO);
|
||||
when(vmSnapshotHelper.getVolumeTOList(VM_ID)).thenReturn(volumeTOs);
|
||||
|
||||
VolumeVO volumeVO = mock(VolumeVO.class);
|
||||
when(volumeVO.getId()).thenReturn(VOLUME_ID_1);
|
||||
when(volumeVO.getPoolId()).thenReturn(POOL_ID_1);
|
||||
when(volumeVO.getVmSnapshotChainSize()).thenReturn(null);
|
||||
when(volumeDao.findById(VOLUME_ID_1)).thenReturn(volumeVO);
|
||||
|
||||
// Pool details for FlexVol grouping
|
||||
Map<String, String> poolDetails = new HashMap<>();
|
||||
poolDetails.put(OntapStorageConstants.VOLUME_UUID, "flexvol-uuid-1");
|
||||
poolDetails.put(OntapStorageConstants.USERNAME, "admin");
|
||||
poolDetails.put(OntapStorageConstants.PASSWORD, "pass");
|
||||
poolDetails.put(OntapStorageConstants.STORAGE_IP, "10.0.0.1");
|
||||
poolDetails.put(OntapStorageConstants.SVM_NAME, "svm1");
|
||||
poolDetails.put(OntapStorageConstants.SIZE, "107374182400");
|
||||
poolDetails.put(OntapStorageConstants.PROTOCOL, "NFS3");
|
||||
when(storagePoolDetailsDao.listDetailsKeyPairs(POOL_ID_1)).thenReturn(poolDetails);
|
||||
|
||||
VolumeInfo volumeInfo = mock(VolumeInfo.class);
|
||||
when(volumeInfo.getId()).thenReturn(VOLUME_ID_1);
|
||||
when(volumeInfo.getName()).thenReturn("vol-1");
|
||||
when(volumeDataFactory.getVolume(VOLUME_ID_1)).thenReturn(volumeInfo);
|
||||
}
|
||||
}
|
||||
|
|
@ -135,6 +135,7 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao;
|
|||
public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase implements VMSnapshotManager, VMSnapshotService, VmWorkJobHandler, Configurable {
|
||||
|
||||
public static final String VM_WORK_JOB_HANDLER = VMSnapshotManagerImpl.class.getSimpleName();
|
||||
public static final String ONTAP_PLUGIN_NAME = "NetApp ONTAP";
|
||||
|
||||
@Inject
|
||||
VMInstanceDao _vmInstanceDao;
|
||||
|
|
@ -390,6 +391,15 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme
|
|||
//Other Storage volume plugins could integrate this with their own functionality for group snapshots
|
||||
VMSnapshotStrategy snapshotStrategy = storageStrategyFactory.getVmSnapshotStrategy(userVmVo.getId(), rootVolumePool.getId(), snapshotMemory);
|
||||
if (snapshotStrategy == null) {
|
||||
// Check if this is ONTAP managed storage with memory snapshot request - provide specific error message
|
||||
if (snapshotMemory && rootVolumePool.isManaged() &&
|
||||
ONTAP_PLUGIN_NAME.equals(rootVolumePool.getStorageProviderName())) {
|
||||
String message = String.format("Memory snapshots (snapshotmemory=true) are not supported for VMs on ONTAP managed storage. " +
|
||||
"Instance [%s] uses ONTAP storage which only supports disk-only (crash-consistent) snapshots. " +
|
||||
"Please use snapshotmemory=false for disk-only snapshots.", userVmVo.getUuid());
|
||||
logger.error(message);
|
||||
throw new CloudRuntimeException(message);
|
||||
}
|
||||
String message = String.format("No strategy was able to handle requested snapshot for Instance [%s].", userVmVo.getUuid());
|
||||
logger.error(message);
|
||||
throw new CloudRuntimeException(message);
|
||||
|
|
|
|||
|
|
@ -2949,6 +2949,12 @@
|
|||
"label.leased": "Leased",
|
||||
"label.totalduration": "Total duration",
|
||||
"label.usestoragereplication": "Use primary storage replication",
|
||||
"label.ontap.username.tooltip": "The Username for the NetApp ONTAP storage array",
|
||||
"label.ontap.password.tooltip": "The Password for the NetApp ONTAP storage array",
|
||||
"label.ontap.ip.tooltip": "The IP for the NetApp ONTAP storage array",
|
||||
"label.ontap.svm.name.tooltip": "The SVM Name for the NetApp ONTAP storage array",
|
||||
"label.ontap.ip": "Storage Array IP",
|
||||
"label.ontap.svm.name": "SVM Name",
|
||||
"message.acquire.ip.failed": "Failed to acquire IP.",
|
||||
"message.action.acquire.ip": "Please confirm that you want to acquire new IP.",
|
||||
"message.action.cancel.maintenance": "Your host has been successfully canceled for maintenance. This process can take up to several minutes.",
|
||||
|
|
|
|||
|
|
@ -242,7 +242,7 @@
|
|||
</a-select>
|
||||
</a-form-item>
|
||||
</div>
|
||||
<div v-if="form.provider !== 'DefaultPrimary' && form.provider !== 'PowerFlex' && form.provider !== 'Linstor' && form.protocol !== 'FiberChannel'">
|
||||
<div v-if="form.provider !== 'DefaultPrimary' && form.provider !== 'PowerFlex' && form.provider !== 'Linstor' && form.provider !== 'NetApp ONTAP' && form.protocol !== 'FiberChannel'">
|
||||
<a-form-item name="managed" ref="managed">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.ismanaged')" :tooltip="apiParams.managed.description"/>
|
||||
|
|
@ -270,6 +270,38 @@
|
|||
<a-input v-model:value="form.url" :placeholder="apiParams.url.description" />
|
||||
</a-form-item>
|
||||
</div>
|
||||
<div v-if="form.provider === 'NetApp ONTAP'">
|
||||
<a-form-item name="ontapIP" ref="ontapIP">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.ontap.ip')" :tooltip="$t('label.ontap.ip.tooltip')"/>
|
||||
</template>
|
||||
<a-input v-model:value="form.ontapIP" :placeholder="$t('label.ontap.ip.tooltip')"/>
|
||||
</a-form-item>
|
||||
<a-form-item name="ontapUsername" ref="ontapUsername">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.username')" :tooltip="$t('label.ontap.username.tooltip')"/>
|
||||
</template>
|
||||
<a-input v-model:value="form.ontapUsername" :placeholder="$t('label.ontap.username.tooltip')"/>
|
||||
</a-form-item>
|
||||
<a-form-item name="ontapPassword" ref="ontapPassword">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.password')" :tooltip="$t('label.ontap.password.tooltip')"/>
|
||||
</template>
|
||||
<a-input-password v-model:value="form.ontapPassword" :placeholder="$t('label.ontap.password.tooltip')"/>
|
||||
</a-form-item>
|
||||
<a-form-item name="ontapSvmName" ref="ontapSvmName">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.ontap.svm.name')" :tooltip="$t('label.ontap.svm.name.tooltip')"/>
|
||||
</template>
|
||||
<a-input v-model:value="form.ontapSvmName" :placeholder="$t('label.ontap.svm.name.tooltip')"/>
|
||||
</a-form-item>
|
||||
<a-form-item name="capacityBytes" ref="capacityBytes">
|
||||
<template #label>
|
||||
<tooltip-label :title="$t('label.capacitybytes')" :tooltip="apiParams.capacitybytes.description"/>
|
||||
</template>
|
||||
<a-input v-model:value="form.capacityBytes" :placeholder="apiParams.capacitybytes.description" />
|
||||
</a-form-item>
|
||||
</div>
|
||||
<div v-if="form.provider === 'PowerFlex'">
|
||||
<a-form-item name="powerflexGateway" ref="powerflexGateway">
|
||||
<template #label>
|
||||
|
|
@ -516,7 +548,11 @@ export default {
|
|||
primeraPassword: [{ required: true, message: this.$t('label.password') }],
|
||||
flashArrayURL: [{ required: true, message: this.$t('label.url') }],
|
||||
flashArrayUsername: [{ required: true, message: this.$t('label.username') }],
|
||||
flashArrayPassword: [{ required: true, message: this.$t('label.password') }]
|
||||
flashArrayPassword: [{ required: true, message: this.$t('label.password') }],
|
||||
ontapIP: [{ required: true, message: this.$t('label.required') }],
|
||||
ontapUsername: [{ required: true, message: this.$t('label.required') }],
|
||||
ontapPassword: [{ required: true, message: this.$t('label.required') }],
|
||||
ontapSvmName: [{ required: true, message: this.$t('label.required') }]
|
||||
})
|
||||
},
|
||||
fetchData () {
|
||||
|
|
@ -761,6 +797,12 @@ export default {
|
|||
gateway + '/' + encodeURIComponent(pool)
|
||||
return url
|
||||
},
|
||||
|
||||
ontapURL (ontapIp) {
|
||||
var url = 'https://' + ontapIp
|
||||
return url
|
||||
},
|
||||
|
||||
updateProviderAndProtocol (value) {
|
||||
if (value === 'PowerFlex') {
|
||||
this.protocols = ['custom']
|
||||
|
|
@ -768,6 +810,9 @@ export default {
|
|||
} else if (value === 'Flash Array' || value === 'Primera') {
|
||||
this.protocols = ['FiberChannel']
|
||||
this.form.protocol = 'FiberChannel'
|
||||
} else if (value === 'NetApp ONTAP') {
|
||||
this.protocols = ['NFS3', 'ISCSI']
|
||||
this.form.protocol = 'NFS3'
|
||||
} else {
|
||||
this.fetchHypervisor(value)
|
||||
}
|
||||
|
|
@ -890,6 +935,14 @@ export default {
|
|||
params['details[0].api_username'] = values.flashArrayUsername
|
||||
params['details[0].api_password'] = values.flashArrayPassword
|
||||
url = values.flashArrayURL
|
||||
} else if (values.provider === 'NetApp ONTAP') {
|
||||
params['details[0].storageIP'] = values.ontapIP
|
||||
params['details[0].username'] = values.ontapUsername
|
||||
params['details[0].password'] = btoa(values.ontapPassword)
|
||||
params['details[0].svmName'] = values.ontapSvmName
|
||||
params['details[0].protocol'] = values.protocol
|
||||
values.managed = true
|
||||
url = this.ontapURL(values.ontapIP)
|
||||
}
|
||||
|
||||
if (values.provider === 'Linstor' || values.protocol === 'Linstor') {
|
||||
|
|
|
|||
|
|
@ -575,7 +575,7 @@ export default {
|
|||
key: 'managed',
|
||||
checkbox: true,
|
||||
hidden: {
|
||||
provider: ['DefaultPrimary', 'PowerFlex', 'Linstor']
|
||||
provider: ['DefaultPrimary', 'PowerFlex', 'Linstor', 'NetApp ONTAP']
|
||||
}
|
||||
},
|
||||
{
|
||||
|
|
@ -589,14 +589,14 @@ export default {
|
|||
title: 'label.capacityiops',
|
||||
key: 'capacityIops',
|
||||
hidden: {
|
||||
provider: ['DefaultPrimary', 'PowerFlex', 'Linstor']
|
||||
provider: ['DefaultPrimary', 'PowerFlex', 'Linstor', 'NetApp ONTAP']
|
||||
}
|
||||
},
|
||||
{
|
||||
title: 'label.url',
|
||||
key: 'url',
|
||||
hidden: {
|
||||
provider: ['DefaultPrimary', 'PowerFlex', 'Linstor']
|
||||
provider: ['DefaultPrimary', 'PowerFlex', 'Linstor', 'NetApp ONTAP']
|
||||
}
|
||||
},
|
||||
{
|
||||
|
|
@ -636,6 +636,43 @@ export default {
|
|||
provider: 'PowerFlex'
|
||||
}
|
||||
},
|
||||
{
|
||||
title: 'label.ontap.ip',
|
||||
key: 'ontapIP',
|
||||
required: true,
|
||||
placeHolder: 'message.error.input.value',
|
||||
display: {
|
||||
provider: 'NetApp ONTAP'
|
||||
}
|
||||
},
|
||||
{
|
||||
title: 'label.username',
|
||||
key: 'ontapUsername',
|
||||
required: true,
|
||||
placeHolder: 'message.error.input.value',
|
||||
display: {
|
||||
provider: 'NetApp ONTAP'
|
||||
}
|
||||
},
|
||||
{
|
||||
title: 'label.password',
|
||||
key: 'ontapPassword',
|
||||
required: true,
|
||||
placeHolder: 'message.error.input.value',
|
||||
password: true,
|
||||
display: {
|
||||
provider: 'NetApp ONTAP'
|
||||
}
|
||||
},
|
||||
{
|
||||
title: 'label.ontap.svm.name',
|
||||
key: 'ontapSvmName',
|
||||
required: true,
|
||||
placeHolder: 'message.error.input.value',
|
||||
display: {
|
||||
provider: 'NetApp ONTAP'
|
||||
}
|
||||
},
|
||||
{
|
||||
title: 'label.storage.tags',
|
||||
key: 'primaryStorageTags',
|
||||
|
|
@ -909,9 +946,9 @@ export default {
|
|||
},
|
||||
watch: {
|
||||
'prefillContent.provider' (newVal, oldVal) {
|
||||
if (['SolidFire', 'PowerFlex'].includes(newVal) && !['SolidFire', 'PowerFlex'].includes(oldVal)) {
|
||||
if (['SolidFire', 'PowerFlex', 'NetApp ONTAP'].includes(newVal) && !['SolidFire', 'PowerFlex', 'NetApp ONTAP'].includes(oldVal)) {
|
||||
this.$emit('fieldsChanged', { primaryStorageProtocol: undefined })
|
||||
} else if (!['SolidFire', 'PowerFlex'].includes(newVal) && ['SolidFire', 'PowerFlex'].includes(oldVal)) {
|
||||
} else if (!['SolidFire', 'PowerFlex', 'NetApp ONTAP'].includes(newVal) && ['SolidFire', 'PowerFlex', 'NetApp ONTAP'].includes(oldVal)) {
|
||||
this.$emit('fieldsChanged', { primaryStorageProtocol: undefined })
|
||||
}
|
||||
|
||||
|
|
@ -996,6 +1033,17 @@ export default {
|
|||
this.primaryStorageScopes = scope
|
||||
},
|
||||
fetchProtocol () {
|
||||
const provider = this.prefillContent?.provider || null
|
||||
if (provider === 'NetApp ONTAP') {
|
||||
this.primaryStorageProtocols = [
|
||||
{ id: 'NFS3', description: 'NFS3' },
|
||||
{ id: 'ISCSI', description: 'ISCSI' }
|
||||
]
|
||||
if (!['NFS3', 'ISCSI'].includes(this.prefillContent?.primaryStorageProtocol)) {
|
||||
this.$emit('fieldsChanged', { primaryStorageProtocol: 'NFS3' })
|
||||
}
|
||||
return
|
||||
}
|
||||
const hypervisor = this.prefillContent?.hypervisor || null
|
||||
const protocols = []
|
||||
if (hypervisor === 'KVM') {
|
||||
|
|
|
|||
|
|
@ -1584,7 +1584,7 @@ export default {
|
|||
}
|
||||
|
||||
params.url = url
|
||||
if (this.prefillContent.provider !== 'DefaultPrimary' && this.prefillContent.provider !== 'PowerFlex') {
|
||||
if (this.prefillContent.provider !== 'DefaultPrimary' && this.prefillContent.provider !== 'PowerFlex' && this.prefillContent.provider !== 'NetApp ONTAP') {
|
||||
if (this.prefillContent.managed) {
|
||||
params.managed = true
|
||||
} else {
|
||||
|
|
@ -1604,6 +1604,18 @@ export default {
|
|||
params.url = this.powerflexURL(this.prefillContent.powerflexGateway, this.prefillContent.powerflexGatewayUsername,
|
||||
this.prefillContent.powerflexGatewayPassword, this.prefillContent.powerflexStoragePool)
|
||||
}
|
||||
if (this.prefillContent.provider === 'NetApp ONTAP') {
|
||||
params['details[0].storageIP'] = this.prefillContent.ontapIP
|
||||
params['details[0].username'] = this.prefillContent.ontapUsername
|
||||
params['details[0].password'] = btoa(this.prefillContent.ontapPassword)
|
||||
params['details[0].svmName'] = this.prefillContent.ontapSvmName
|
||||
params['details[0].protocol'] = this.prefillContent.primaryStorageProtocol
|
||||
params.managed = true
|
||||
params.url = this.ontapURL(this.prefillContent.ontapIP)
|
||||
if (this.prefillContent.capacityBytes && this.prefillContent.capacityBytes.length > 0) {
|
||||
params.capacityBytes = this.prefillContent.capacityBytes.split(',').join('')
|
||||
}
|
||||
}
|
||||
|
||||
params.tags = this.prefillContent?.primaryStorageTags || ''
|
||||
|
||||
|
|
@ -2486,6 +2498,10 @@ export default {
|
|||
var url = 'powerflex://' + encodeURIComponent(username) + ':' + encodeURIComponent(password) + '@' +
|
||||
gateway + '/' + encodeURIComponent(pool)
|
||||
return url
|
||||
},
|
||||
ontapURL (ontapIp) {
|
||||
var url = 'https://' + ontapIp
|
||||
return url
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue