mirror of https://github.com/apache/cloudstack.git
storage: Linstor volume plugin (#4994)
This adds a volume(primary) storage plugin for the Linstor SDS. Currently it can create/delete/migrate volumes, snapshots should be possible, but currently don't work for RAW volume types in cloudstack. * plugin-storage-volume-linstor: notify libvirt guests about the resize
This commit is contained in:
parent
1bf686269f
commit
66c39c1589
|
|
@ -147,6 +147,7 @@ public class Storage {
|
|||
Gluster(true, false),
|
||||
PowerFlex(true, true), // Dell EMC PowerFlex/ScaleIO (formerly VxFlexOS)
|
||||
ManagedNFS(true, false),
|
||||
Linstor(true, true),
|
||||
DatastoreCluster(true, true); // for VMware, to abstract pool of clusters
|
||||
|
||||
private final boolean shared;
|
||||
|
|
|
|||
|
|
@ -48,6 +48,7 @@ public class StorageTest {
|
|||
Assert.assertTrue(StoragePoolType.Gluster.isShared());
|
||||
Assert.assertTrue(StoragePoolType.ManagedNFS.isShared());
|
||||
Assert.assertTrue(StoragePoolType.DatastoreCluster.isShared());
|
||||
Assert.assertTrue(StoragePoolType.Linstor.isShared());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -71,5 +72,6 @@ public class StorageTest {
|
|||
Assert.assertFalse(StoragePoolType.Gluster.supportsOverProvisioning());
|
||||
Assert.assertFalse(StoragePoolType.ManagedNFS.supportsOverProvisioning());
|
||||
Assert.assertTrue(StoragePoolType.DatastoreCluster.supportsOverProvisioning());
|
||||
Assert.assertTrue(StoragePoolType.Linstor.supportsOverProvisioning());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -92,6 +92,11 @@
|
|||
<artifactId>cloud-plugin-storage-volume-scaleio</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-plugin-storage-volume-linstor</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-server</artifactId>
|
||||
|
|
|
|||
|
|
@ -99,6 +99,12 @@
|
|||
<version>${project.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-plugin-storage-volume-linstor</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-secondary-storage</artifactId>
|
||||
|
|
|
|||
|
|
@ -62,6 +62,11 @@
|
|||
<artifactId>rados</artifactId>
|
||||
<version>${cs.rados-java.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.linbit.linstor.api</groupId>
|
||||
<artifactId>java-linstor</artifactId>
|
||||
<version>${cs.java-linstor.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.java.dev.jna</groupId>
|
||||
<artifactId>jna</artifactId>
|
||||
|
|
|
|||
|
|
@ -279,6 +279,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
*/
|
||||
private static final String AARCH64 = "aarch64";
|
||||
|
||||
public static final String RESIZE_NOTIFY_ONLY = "NOTIFYONLY";
|
||||
|
||||
private String _modifyVlanPath;
|
||||
private String _versionstringpath;
|
||||
private String _patchScriptPath;
|
||||
|
|
@ -1910,6 +1912,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
|
|||
|| poolType == StoragePoolType.Gluster)
|
||||
&& volFormat == PhysicalDiskFormat.QCOW2 ) {
|
||||
return "QCOW2";
|
||||
} else if (poolType == StoragePoolType.Linstor) {
|
||||
return RESIZE_NOTIFY_ONLY;
|
||||
}
|
||||
throw new CloudRuntimeException("Cannot determine resize type from pool type " + pool.getType());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper<Resi
|
|||
final String vmInstanceName = command.getInstanceName();
|
||||
final boolean shrinkOk = command.getShrinkOk();
|
||||
final StorageFilerTO spool = command.getPool();
|
||||
final String notifyOnlyType = "NOTIFYONLY";
|
||||
final String notifyOnlyType = LibvirtComputingResource.RESIZE_NOTIFY_ONLY;
|
||||
|
||||
if ( currentSize == newSize) {
|
||||
// nothing to do
|
||||
|
|
@ -73,19 +73,20 @@ public final class LibvirtResizeVolumeCommandWrapper extends CommandWrapper<Resi
|
|||
final String path = vol.getPath();
|
||||
String type = notifyOnlyType;
|
||||
|
||||
if (pool.getType() != StoragePoolType.RBD) {
|
||||
if (pool.getType() != StoragePoolType.RBD && pool.getType() != StoragePoolType.Linstor) {
|
||||
type = libvirtComputingResource.getResizeScriptType(pool, vol);
|
||||
if (type.equals("QCOW2") && shrinkOk) {
|
||||
return new ResizeVolumeAnswer(command, false, "Unable to shrink volumes of type " + type);
|
||||
}
|
||||
} else {
|
||||
s_logger.debug("Volume " + path + " is on a RBD storage pool. No need to query for additional information.");
|
||||
s_logger.debug("Volume " + path + " is on a RBD/Linstor storage pool. No need to query for additional information.");
|
||||
}
|
||||
|
||||
s_logger.debug("Resizing volume: " + path + ", from: " + toHumanReadableSize(currentSize) + ", to: " + toHumanReadableSize(newSize) + ", type: " + type + ", name: " + vmInstanceName + ", shrinkOk: " + shrinkOk);
|
||||
|
||||
/* libvirt doesn't support resizing (C)LVM devices, and corrupts QCOW2 in some scenarios, so we have to do these via Bash script */
|
||||
if (pool.getType() != StoragePoolType.CLVM && vol.getFormat() != PhysicalDiskFormat.QCOW2) {
|
||||
if (pool.getType() != StoragePoolType.CLVM && pool.getType() != StoragePoolType.Linstor &&
|
||||
vol.getFormat() != PhysicalDiskFormat.QCOW2) {
|
||||
s_logger.debug("Volume " + path + " can be resized by libvirt. Asking libvirt to resize the volume.");
|
||||
try {
|
||||
final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
|
||||
|
|
|
|||
|
|
@ -378,7 +378,7 @@ public class KVMStoragePoolManager {
|
|||
return adaptor.createDiskFromTemplate(template, name,
|
||||
PhysicalDiskFormat.DIR, provisioningType,
|
||||
size, destPool, timeout);
|
||||
} else if (destPool.getType() == StoragePoolType.PowerFlex) {
|
||||
} else if (destPool.getType() == StoragePoolType.PowerFlex || destPool.getType() == StoragePoolType.Linstor) {
|
||||
return adaptor.createDiskFromTemplate(template, name,
|
||||
PhysicalDiskFormat.RAW, provisioningType,
|
||||
size, destPool, timeout);
|
||||
|
|
|
|||
|
|
@ -282,7 +282,9 @@ public class KVMStorageProcessor implements StorageProcessor {
|
|||
final TemplateObjectTO newTemplate = new TemplateObjectTO();
|
||||
newTemplate.setPath(primaryVol.getName());
|
||||
newTemplate.setSize(primaryVol.getSize());
|
||||
if (primaryPool.getType() == StoragePoolType.RBD || primaryPool.getType() == StoragePoolType.PowerFlex) {
|
||||
if (primaryPool.getType() == StoragePoolType.RBD ||
|
||||
primaryPool.getType() == StoragePoolType.PowerFlex ||
|
||||
primaryPool.getType() == StoragePoolType.Linstor) {
|
||||
newTemplate.setFormat(ImageFormat.RAW);
|
||||
} else {
|
||||
newTemplate.setFormat(ImageFormat.QCOW2);
|
||||
|
|
|
|||
|
|
@ -0,0 +1,586 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.hypervisor.kvm.storage;
|
||||
|
||||
import com.linbit.linstor.api.ApiClient;
|
||||
import com.linbit.linstor.api.ApiException;
|
||||
import com.linbit.linstor.api.Configuration;
|
||||
import com.linbit.linstor.api.DevelopersApi;
|
||||
import com.linbit.linstor.api.model.ApiCallRc;
|
||||
import com.linbit.linstor.api.model.ApiCallRcList;
|
||||
import com.linbit.linstor.api.model.Properties;
|
||||
import com.linbit.linstor.api.model.ProviderKind;
|
||||
import com.linbit.linstor.api.model.ResourceDefinition;
|
||||
import com.linbit.linstor.api.model.ResourceDefinitionModify;
|
||||
import com.linbit.linstor.api.model.ResourceGroup;
|
||||
import com.linbit.linstor.api.model.ResourceGroupSpawn;
|
||||
import com.linbit.linstor.api.model.ResourceMakeAvailable;
|
||||
import com.linbit.linstor.api.model.ResourceWithVolumes;
|
||||
import com.linbit.linstor.api.model.StoragePool;
|
||||
import com.linbit.linstor.api.model.VolumeDefinition;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.StringJoiner;
|
||||
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgException;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImgFile;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.libvirt.LibvirtException;
|
||||
|
||||
@StorageAdaptorInfo(storagePoolType=Storage.StoragePoolType.Linstor)
|
||||
public class LinstorStorageAdaptor implements StorageAdaptor {
|
||||
private static final Logger s_logger = Logger.getLogger(LinstorStorageAdaptor.class);
|
||||
private static final Map<String, KVMStoragePool> MapStorageUuidToStoragePool = new HashMap<>();
|
||||
private final String localNodeName;
|
||||
|
||||
private DevelopersApi getLinstorAPI(KVMStoragePool pool) {
|
||||
ApiClient client = Configuration.getDefaultApiClient();
|
||||
client.setBasePath(pool.getSourceHost());
|
||||
return new DevelopersApi(client);
|
||||
}
|
||||
|
||||
private String getLinstorRscName(String name) {
|
||||
return "cs-" + name;
|
||||
}
|
||||
|
||||
private String getHostname() {
|
||||
// either there is already some function for that in the agent or a better way.
|
||||
ProcessBuilder pb = new ProcessBuilder("hostname");
|
||||
try
|
||||
{
|
||||
String result;
|
||||
Process p = pb.start();
|
||||
final BufferedReader reader = new BufferedReader(new InputStreamReader(p.getInputStream()));
|
||||
|
||||
StringJoiner sj = new StringJoiner(System.getProperty("line.separator"));
|
||||
reader.lines().iterator().forEachRemaining(sj::add);
|
||||
result = sj.toString();
|
||||
|
||||
p.waitFor();
|
||||
p.destroy();
|
||||
return result.trim();
|
||||
} catch (IOException | InterruptedException exc) {
|
||||
throw new CloudRuntimeException("Unable to run 'hostname' command.");
|
||||
}
|
||||
}
|
||||
|
||||
private void logLinstorAnswer(@Nonnull ApiCallRc answer) {
|
||||
if (answer.isError()) {
|
||||
s_logger.error(answer.getMessage());
|
||||
} else if (answer.isWarning()) {
|
||||
s_logger.warn(answer.getMessage());
|
||||
} else if (answer.isInfo()) {
|
||||
s_logger.info(answer.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private void checkLinstorAnswersThrow(@Nonnull ApiCallRcList answers) {
|
||||
answers.forEach(this::logLinstorAnswer);
|
||||
if (answers.hasError())
|
||||
{
|
||||
String errMsg = answers.stream()
|
||||
.filter(ApiCallRc::isError)
|
||||
.findFirst()
|
||||
.map(ApiCallRc::getMessage).orElse("Unknown linstor error");
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
}
|
||||
|
||||
private void handleLinstorApiAnswers(ApiCallRcList answers, String excMessage) {
|
||||
answers.forEach(this::logLinstorAnswer);
|
||||
if (answers.hasError()) {
|
||||
throw new CloudRuntimeException(excMessage);
|
||||
}
|
||||
}
|
||||
|
||||
public LinstorStorageAdaptor() {
|
||||
localNodeName = getHostname();
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMStoragePool getStoragePool(String uuid) {
|
||||
return MapStorageUuidToStoragePool.get(uuid);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) {
|
||||
s_logger.debug("Linstor getStoragePool: " + uuid + " -> " + refreshInfo);
|
||||
return MapStorageUuidToStoragePool.get(uuid);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk getPhysicalDisk(String name, KVMStoragePool pool)
|
||||
{
|
||||
s_logger.debug("Linstor: getPhysicalDisk for " + name);
|
||||
if (name == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final DevelopersApi api = getLinstorAPI(pool);
|
||||
try {
|
||||
final String rscName = getLinstorRscName(name);
|
||||
|
||||
List<VolumeDefinition> volumeDefs = api.volumeDefinitionList(rscName, null, null);
|
||||
final long size = volumeDefs.isEmpty() ? 0 : volumeDefs.get(0).getSizeKib() * 1024;
|
||||
|
||||
List<ResourceWithVolumes> resources = api.viewResources(
|
||||
Collections.emptyList(),
|
||||
Collections.singletonList(rscName),
|
||||
Collections.emptyList(),
|
||||
null,
|
||||
null,
|
||||
null);
|
||||
if (!resources.isEmpty() && !resources.get(0).getVolumes().isEmpty()) {
|
||||
final String devPath = resources.get(0).getVolumes().get(0).getDevicePath();
|
||||
final KVMPhysicalDisk kvmDisk = new KVMPhysicalDisk(devPath, name, pool);
|
||||
kvmDisk.setFormat(QemuImg.PhysicalDiskFormat.RAW);
|
||||
kvmDisk.setSize(size);
|
||||
kvmDisk.setVirtualSize(size);
|
||||
return kvmDisk;
|
||||
} else {
|
||||
s_logger.error("Linstor: viewResources didn't return resources or volumes for " + rscName);
|
||||
throw new CloudRuntimeException("Linstor: viewResources didn't return resources or volumes.");
|
||||
}
|
||||
} catch (ApiException apiEx) {
|
||||
s_logger.error(apiEx);
|
||||
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo,
|
||||
Storage.StoragePoolType type)
|
||||
{
|
||||
s_logger.debug(String.format(
|
||||
"Linstor createStoragePool: name: '%s', host: '%s', path: %s, userinfo: %s", name, host, path, userInfo));
|
||||
LinstorStoragePool storagePool = new LinstorStoragePool(name, host, port, userInfo, type, this);
|
||||
|
||||
MapStorageUuidToStoragePool.put(name, storagePool);
|
||||
|
||||
return storagePool;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean deleteStoragePool(String uuid) {
|
||||
return MapStorageUuidToStoragePool.remove(uuid) != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean deleteStoragePool(KVMStoragePool pool) {
|
||||
return deleteStoragePool(pool.getUuid());
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, QemuImg.PhysicalDiskFormat format,
|
||||
Storage.ProvisioningType provisioningType, long size)
|
||||
{
|
||||
final String rscName = getLinstorRscName(name);
|
||||
LinstorStoragePool lpool = (LinstorStoragePool) pool;
|
||||
final DevelopersApi api = getLinstorAPI(pool);
|
||||
|
||||
try {
|
||||
List<ResourceDefinition> definitionList = api.resourceDefinitionList(
|
||||
Collections.singletonList(rscName), null, null, null);
|
||||
|
||||
if (definitionList.isEmpty()) {
|
||||
ResourceGroupSpawn rgSpawn = new ResourceGroupSpawn();
|
||||
rgSpawn.setResourceDefinitionName(rscName);
|
||||
rgSpawn.addVolumeSizesItem(size / 1024); // linstor uses KiB
|
||||
|
||||
s_logger.debug("Linstor: Spawn resource " + rscName);
|
||||
ApiCallRcList answers = api.resourceGroupSpawn(lpool.getResourceGroup(), rgSpawn);
|
||||
handleLinstorApiAnswers(answers, "Linstor: Unable to spawn resource.");
|
||||
}
|
||||
|
||||
// query linstor for the device path
|
||||
List<ResourceWithVolumes> resources = api.viewResources(
|
||||
Collections.emptyList(),
|
||||
Collections.singletonList(rscName),
|
||||
Collections.emptyList(),
|
||||
null,
|
||||
null,
|
||||
null);
|
||||
|
||||
// TODO make avialable on node
|
||||
|
||||
if (!resources.isEmpty() && !resources.get(0).getVolumes().isEmpty()) {
|
||||
final String devPath = resources.get(0).getVolumes().get(0).getDevicePath();
|
||||
s_logger.info("Linstor: Created drbd device: " + devPath);
|
||||
final KVMPhysicalDisk kvmDisk = new KVMPhysicalDisk(devPath, name, pool);
|
||||
kvmDisk.setFormat(QemuImg.PhysicalDiskFormat.RAW);
|
||||
return kvmDisk;
|
||||
} else {
|
||||
s_logger.error("Linstor: viewResources didn't return resources or volumes.");
|
||||
throw new CloudRuntimeException("Linstor: viewResources didn't return resources or volumes.");
|
||||
}
|
||||
} catch (ApiException apiEx) {
|
||||
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map<String, String> details)
|
||||
{
|
||||
s_logger.debug(String.format("Linstor: connectPhysicalDisk %s:%s -> %s", pool.getUuid(), volumePath, details));
|
||||
if (volumePath == null) {
|
||||
s_logger.warn("volumePath is null, ignoring");
|
||||
return false;
|
||||
}
|
||||
|
||||
final DevelopersApi api = getLinstorAPI(pool);
|
||||
try
|
||||
{
|
||||
final String rscName = getLinstorRscName(volumePath);
|
||||
|
||||
ResourceMakeAvailable rma = new ResourceMakeAvailable();
|
||||
rma.setDiskful(true);
|
||||
ApiCallRcList answers = api.resourceMakeAvailableOnNode(rscName, localNodeName, rma);
|
||||
checkLinstorAnswersThrow(answers);
|
||||
|
||||
// allow 2 primaries for live migration, should be removed by disconnect on the other end
|
||||
ResourceDefinitionModify rdm = new ResourceDefinitionModify();
|
||||
Properties props = new Properties();
|
||||
props.put("DrbdOptions/Net/allow-two-primaries", "yes");
|
||||
rdm.setOverrideProps(props);
|
||||
answers = api.resourceDefinitionModify(rscName, rdm);
|
||||
if (answers.hasError()) {
|
||||
s_logger.error("Unable to set 'allow-two-primaries' on " + rscName);
|
||||
throw new CloudRuntimeException(answers.get(0).getMessage());
|
||||
}
|
||||
} catch (ApiException apiEx) {
|
||||
s_logger.error(apiEx);
|
||||
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool)
|
||||
{
|
||||
s_logger.debug("Linstor: disconnectPhysicalDisk " + pool.getUuid() + ":" + volumePath);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect)
|
||||
{
|
||||
s_logger.debug("Linstor: disconnectPhysicalDisk map");
|
||||
return true;
|
||||
}
|
||||
|
||||
private Optional<ResourceWithVolumes> getResourceByPath(final List<ResourceWithVolumes> resources, String path) {
|
||||
return resources.stream()
|
||||
.filter(rsc -> rsc.getVolumes().stream()
|
||||
.anyMatch(v -> v.getDevicePath().equals(path)))
|
||||
.findFirst();
|
||||
}
|
||||
|
||||
/**
|
||||
* disconnectPhysicalDiskByPath is called after e.g. a live migration.
|
||||
* The problem is we have no idea just from the path to which linstor-controller
|
||||
* this resource would belong to. But as it should be highly unlikely that someone
|
||||
* uses more than one linstor-controller to manage resource on the same kvm host.
|
||||
* We will just take the first stored storagepool.
|
||||
*/
|
||||
@Override
|
||||
public boolean disconnectPhysicalDiskByPath(String localPath)
|
||||
{
|
||||
s_logger.debug("Linstor: disconnectPhysicalDiskByPath " + localPath);
|
||||
// get first storage pool from the map, as we don't know any better:
|
||||
if (!MapStorageUuidToStoragePool.isEmpty())
|
||||
{
|
||||
String firstKey = MapStorageUuidToStoragePool.keySet().stream().findFirst().get();
|
||||
final KVMStoragePool pool = MapStorageUuidToStoragePool.get(firstKey);
|
||||
|
||||
s_logger.debug("Linstor: Using storpool: " + pool.getUuid());
|
||||
final DevelopersApi api = getLinstorAPI(pool);
|
||||
|
||||
try
|
||||
{
|
||||
List<ResourceWithVolumes> resources = api.viewResources(
|
||||
Collections.singletonList(localNodeName),
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null);
|
||||
|
||||
Optional<ResourceWithVolumes> rsc = getResourceByPath(resources, localPath);
|
||||
|
||||
if (rsc.isPresent())
|
||||
{
|
||||
ResourceDefinitionModify rdm = new ResourceDefinitionModify();
|
||||
rdm.deleteProps(Collections.singletonList("DrbdOptions/Net/allow-two-primaries"));
|
||||
ApiCallRcList answers = api.resourceDefinitionModify(rsc.get().getName(), rdm);
|
||||
if (answers.hasError())
|
||||
{
|
||||
s_logger.error("Failed to remove 'allow-two-primaries' on " + rsc.get().getName());
|
||||
throw new CloudRuntimeException(answers.get(0).getMessage());
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
s_logger.warn("Linstor: Couldn't find resource for this path: " + localPath);
|
||||
} catch (ApiException apiEx) {
|
||||
s_logger.error(apiEx);
|
||||
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean deletePhysicalDisk(String name, KVMStoragePool pool, Storage.ImageFormat format)
|
||||
{
|
||||
s_logger.debug("Linstor: deletePhysicalDisk " + name);
|
||||
final DevelopersApi api = getLinstorAPI(pool);
|
||||
|
||||
try {
|
||||
final String rscName = getLinstorRscName(name);
|
||||
s_logger.debug("Linstor: delete resource definition " + rscName);
|
||||
ApiCallRcList answers = api.resourceDefinitionDelete(rscName);
|
||||
handleLinstorApiAnswers(answers, "Linstor: Unable to delete resource definition " + rscName);
|
||||
} catch (ApiException apiEx) {
|
||||
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk createDiskFromTemplate(
|
||||
KVMPhysicalDisk template,
|
||||
String name,
|
||||
QemuImg.PhysicalDiskFormat format,
|
||||
Storage.ProvisioningType provisioningType,
|
||||
long size,
|
||||
KVMStoragePool destPool,
|
||||
int timeout)
|
||||
{
|
||||
s_logger.info("Linstor: createDiskFromTemplate");
|
||||
return copyPhysicalDisk(template, name, destPool, timeout);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<KVMPhysicalDisk> listPhysicalDisks(String storagePoolUuid, KVMStoragePool pool)
|
||||
{
|
||||
throw new UnsupportedOperationException("Listing disks is not supported for this configuration.");
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk createTemplateFromDisk(
|
||||
KVMPhysicalDisk disk,
|
||||
String name,
|
||||
QemuImg.PhysicalDiskFormat format,
|
||||
long size,
|
||||
KVMStoragePool destPool)
|
||||
{
|
||||
throw new UnsupportedOperationException("Copying a template from disk is not supported in this configuration.");
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPools, int timeout)
|
||||
{
|
||||
s_logger.debug("Linstor: copyPhysicalDisk");
|
||||
final QemuImg.PhysicalDiskFormat sourceFormat = disk.getFormat();
|
||||
final String sourcePath = disk.getPath();
|
||||
final QemuImg qemu = new QemuImg(timeout);
|
||||
|
||||
final QemuImgFile srcFile = new QemuImgFile(sourcePath, sourceFormat);
|
||||
|
||||
final KVMPhysicalDisk dstDisk = destPools.createPhysicalDisk(
|
||||
name, QemuImg.PhysicalDiskFormat.RAW, Storage.ProvisioningType.FAT, disk.getVirtualSize());
|
||||
|
||||
final QemuImgFile destFile = new QemuImgFile(dstDisk.getPath());
|
||||
destFile.setFormat(dstDisk.getFormat());
|
||||
destFile.setSize(disk.getVirtualSize());
|
||||
|
||||
try {
|
||||
qemu.convert(srcFile, destFile);
|
||||
} catch (QemuImgException | LibvirtException e) {
|
||||
s_logger.error(e);
|
||||
destPools.deletePhysicalDisk(name, Storage.ImageFormat.RAW);
|
||||
throw new CloudRuntimeException("Failed to copy " + disk.getPath() + " to " + name);
|
||||
}
|
||||
|
||||
return dstDisk;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk createDiskFromSnapshot(
|
||||
KVMPhysicalDisk snapshot,
|
||||
String snapshotName,
|
||||
String name,
|
||||
KVMStoragePool destPool,
|
||||
int timeout)
|
||||
{
|
||||
s_logger.debug("Linstor: createDiskFromSnapshot");
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean refresh(KVMStoragePool pool)
|
||||
{
|
||||
s_logger.debug("Linstor: refresh");
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean createFolder(String uuid, String path)
|
||||
{
|
||||
throw new UnsupportedOperationException("A folder cannot be created in this configuration.");
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk createDiskFromTemplateBacking(
|
||||
KVMPhysicalDisk template,
|
||||
String name,
|
||||
QemuImg.PhysicalDiskFormat format,
|
||||
long size,
|
||||
KVMStoragePool destPool,
|
||||
int timeout)
|
||||
{
|
||||
s_logger.debug("Linstor: createDiskFromTemplateBacking");
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath,
|
||||
KVMStoragePool destPool, Storage.ImageFormat format,
|
||||
int timeout)
|
||||
{
|
||||
s_logger.debug("Linstor: createTemplateFromDirectDownloadFile");
|
||||
return null;
|
||||
}
|
||||
|
||||
public long getCapacity(LinstorStoragePool pool) {
|
||||
DevelopersApi linstorApi = getLinstorAPI(pool);
|
||||
final String rscGroupName = pool.getResourceGroup();
|
||||
try {
|
||||
List<ResourceGroup> rscGrps = linstorApi.resourceGroupList(
|
||||
Collections.singletonList(rscGroupName),
|
||||
null,
|
||||
null,
|
||||
null);
|
||||
|
||||
if (rscGrps.isEmpty()) {
|
||||
final String errMsg = String.format("Linstor: Resource group '%s' not found", rscGroupName);
|
||||
s_logger.error(errMsg);
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
|
||||
List<StoragePool> storagePools = linstorApi.viewStoragePools(
|
||||
Collections.emptyList(),
|
||||
rscGrps.get(0).getSelectFilter().getStoragePoolList(),
|
||||
null,
|
||||
null,
|
||||
null
|
||||
);
|
||||
|
||||
final long capacity = storagePools.stream()
|
||||
.filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)
|
||||
.mapToLong(sp -> sp.getTotalCapacity() != null ? sp.getTotalCapacity() : 0)
|
||||
.sum() * 1024; // linstor uses kiB
|
||||
s_logger.debug("Linstor: GetCapacity() -> " + capacity);
|
||||
return capacity;
|
||||
} catch (ApiException apiEx) {
|
||||
s_logger.error(apiEx.getMessage());
|
||||
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
|
||||
}
|
||||
}
|
||||
|
||||
public long getAvailable(LinstorStoragePool pool) {
|
||||
DevelopersApi linstorApi = getLinstorAPI(pool);
|
||||
final String rscGroupName = pool.getResourceGroup();
|
||||
try {
|
||||
List<ResourceGroup> rscGrps = linstorApi.resourceGroupList(
|
||||
Collections.singletonList(rscGroupName),
|
||||
null,
|
||||
null,
|
||||
null);
|
||||
|
||||
if (rscGrps.isEmpty()) {
|
||||
final String errMsg = String.format("Linstor: Resource group '%s' not found", rscGroupName);
|
||||
s_logger.error(errMsg);
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
|
||||
List<StoragePool> storagePools = linstorApi.viewStoragePools(
|
||||
Collections.emptyList(),
|
||||
rscGrps.get(0).getSelectFilter().getStoragePoolList(),
|
||||
null,
|
||||
null,
|
||||
null
|
||||
);
|
||||
|
||||
final long free = storagePools.stream()
|
||||
.filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)
|
||||
.mapToLong(StoragePool::getFreeCapacity).sum() * 1024; // linstor uses KiB
|
||||
|
||||
s_logger.debug("Linstor: getAvailable() -> " + free);
|
||||
return free;
|
||||
} catch (ApiException apiEx) {
|
||||
s_logger.error(apiEx.getMessage());
|
||||
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
|
||||
}
|
||||
}
|
||||
|
||||
public long getUsed(LinstorStoragePool pool) {
|
||||
DevelopersApi linstorApi = getLinstorAPI(pool);
|
||||
final String rscGroupName = pool.getResourceGroup();
|
||||
try {
|
||||
List<ResourceGroup> rscGrps = linstorApi.resourceGroupList(
|
||||
Collections.singletonList(rscGroupName),
|
||||
null,
|
||||
null,
|
||||
null);
|
||||
|
||||
if (rscGrps.isEmpty()) {
|
||||
final String errMsg = String.format("Linstor: Resource group '%s' not found", rscGroupName);
|
||||
s_logger.error(errMsg);
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
|
||||
List<StoragePool> storagePools = linstorApi.viewStoragePools(
|
||||
Collections.emptyList(),
|
||||
rscGrps.get(0).getSelectFilter().getStoragePoolList(),
|
||||
null,
|
||||
null,
|
||||
null
|
||||
);
|
||||
|
||||
final long used = storagePools.stream()
|
||||
.filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)
|
||||
.mapToLong(sp -> sp.getTotalCapacity() - sp.getFreeCapacity()).sum() * 1024; // linstor uses Kib
|
||||
s_logger.debug("Linstor: getUsed() -> " + used);
|
||||
return used;
|
||||
} catch (ApiException apiEx) {
|
||||
s_logger.error(apiEx.getMessage());
|
||||
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,191 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.hypervisor.kvm.storage;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.storage.Storage;
|
||||
import org.apache.cloudstack.utils.qemu.QemuImg;
|
||||
|
||||
public class LinstorStoragePool implements KVMStoragePool {
|
||||
private final String _uuid;
|
||||
private final String _sourceHost;
|
||||
private final int _sourcePort;
|
||||
private final Storage.StoragePoolType _storagePoolType;
|
||||
private final StorageAdaptor _storageAdaptor;
|
||||
private final String _resourceGroup;
|
||||
|
||||
public LinstorStoragePool(String uuid, String host, int port, String resourceGroup,
|
||||
Storage.StoragePoolType storagePoolType, StorageAdaptor storageAdaptor) {
|
||||
_uuid = uuid;
|
||||
_sourceHost = host;
|
||||
_sourcePort = port;
|
||||
_storagePoolType = storagePoolType;
|
||||
_storageAdaptor = storageAdaptor;
|
||||
_resourceGroup = resourceGroup;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk createPhysicalDisk(String name, QemuImg.PhysicalDiskFormat format,
|
||||
Storage.ProvisioningType provisioningType, long size)
|
||||
{
|
||||
return _storageAdaptor.createPhysicalDisk(name, this, format, provisioningType, size);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, Storage.ProvisioningType provisioningType, long size)
|
||||
{
|
||||
return _storageAdaptor.createPhysicalDisk(volumeUuid,this, getDefaultFormat(), provisioningType, size);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean connectPhysicalDisk(String volumeUuid, Map<String, String> details)
|
||||
{
|
||||
return _storageAdaptor.connectPhysicalDisk(volumeUuid, this, details);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk getPhysicalDisk(String volumeUuid)
|
||||
{
|
||||
return _storageAdaptor.getPhysicalDisk(volumeUuid, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean disconnectPhysicalDisk(String volumeUuid)
|
||||
{
|
||||
return _storageAdaptor.disconnectPhysicalDisk(volumeUuid, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean deletePhysicalDisk(String volumeUuid, Storage.ImageFormat format)
|
||||
{
|
||||
return _storageAdaptor.deletePhysicalDisk(volumeUuid, this, format);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<KVMPhysicalDisk> listPhysicalDisks()
|
||||
{
|
||||
return _storageAdaptor.listPhysicalDisks(_uuid, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getUuid()
|
||||
{
|
||||
return _uuid;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCapacity()
|
||||
{
|
||||
return ((LinstorStorageAdaptor)_storageAdaptor).getCapacity(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getUsed()
|
||||
{
|
||||
return ((LinstorStorageAdaptor)_storageAdaptor).getUsed(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getAvailable()
|
||||
{
|
||||
return ((LinstorStorageAdaptor)_storageAdaptor).getAvailable(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean refresh()
|
||||
{
|
||||
return _storageAdaptor.refresh(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isExternalSnapshot()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getLocalPath()
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getSourceHost()
|
||||
{
|
||||
return _sourceHost;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getSourceDir()
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getSourcePort()
|
||||
{
|
||||
return _sourcePort;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getAuthUserName()
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getAuthSecret()
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Storage.StoragePoolType getType()
|
||||
{
|
||||
return _storagePoolType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean delete()
|
||||
{
|
||||
return _storageAdaptor.deleteStoragePool(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public QemuImg.PhysicalDiskFormat getDefaultFormat()
|
||||
{
|
||||
return QemuImg.PhysicalDiskFormat.RAW;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean createFolder(String path)
|
||||
{
|
||||
return _storageAdaptor.createFolder(_uuid, path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supportsConfigDriveIso()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
public String getResourceGroup() {
|
||||
return _resourceGroup;
|
||||
}
|
||||
}
|
||||
|
|
@ -4880,6 +4880,46 @@ public class LibvirtComputingResourceTest {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testResizeVolumeCommandLinstorNotifyOnly() {
|
||||
final String path = "/dev/drbd1000";
|
||||
final StorageFilerTO pool = Mockito.mock(StorageFilerTO.class);
|
||||
final Long currentSize = 100l;
|
||||
final Long newSize = 200l;
|
||||
final boolean shrinkOk = false;
|
||||
final String vmInstance = "Test";
|
||||
|
||||
final ResizeVolumeCommand command = new ResizeVolumeCommand(path, pool, currentSize, newSize, shrinkOk, vmInstance);
|
||||
|
||||
final KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class);
|
||||
final KVMStoragePool storagePool = Mockito.mock(KVMStoragePool.class);
|
||||
final KVMPhysicalDisk vol = Mockito.mock(KVMPhysicalDisk.class);
|
||||
final LibvirtUtilitiesHelper libvirtUtilitiesHelper = Mockito.mock(LibvirtUtilitiesHelper.class);
|
||||
|
||||
when(libvirtComputingResource.getStoragePoolMgr()).thenReturn(storagePoolMgr);
|
||||
when(storagePoolMgr.getStoragePool(pool.getType(), pool.getUuid())).thenReturn(storagePool);
|
||||
when(storagePool.getPhysicalDisk(path)).thenReturn(vol);
|
||||
when(vol.getPath()).thenReturn(path);
|
||||
when(storagePool.getType()).thenReturn(StoragePoolType.Linstor);
|
||||
when(vol.getFormat()).thenReturn(PhysicalDiskFormat.RAW);
|
||||
|
||||
final LibvirtRequestWrapper wrapper = LibvirtRequestWrapper.getInstance();
|
||||
assertNotNull(wrapper);
|
||||
|
||||
final Answer answer = wrapper.execute(command, libvirtComputingResource);
|
||||
assertTrue(answer.getResult());
|
||||
|
||||
verify(libvirtComputingResource, times(1)).getStoragePoolMgr();
|
||||
verify(libvirtComputingResource, times(0)).getResizeScriptType(storagePool, vol);
|
||||
|
||||
verify(libvirtComputingResource, times(0)).getLibvirtUtilitiesHelper();
|
||||
try {
|
||||
verify(libvirtUtilitiesHelper, times(0)).getConnection();
|
||||
} catch (final LibvirtException e) {
|
||||
fail(e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testResizeVolumeCommandSameSize() {
|
||||
final String path = "nfs:/127.0.0.1/storage/secondary";
|
||||
|
|
|
|||
|
|
@ -122,6 +122,7 @@
|
|||
<module>storage/volume/sample</module>
|
||||
<module>storage/volume/solidfire</module>
|
||||
<module>storage/volume/scaleio</module>
|
||||
<module>storage/volume/linstor</module>
|
||||
|
||||
<module>storage-allocators/random</module>
|
||||
|
||||
|
|
|
|||
|
|
@ -48,8 +48,9 @@
|
|||
<artifactId>gson</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-bundle</artifactId>
|
||||
<groupId>org.glassfish.jersey.core</groupId>
|
||||
<artifactId>jersey-client</artifactId>
|
||||
<version>${cs.jersey-client.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
|
|
|
|||
|
|
@ -20,20 +20,17 @@
|
|||
package org.apache.cloudstack.storage.datastore.util;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.google.gson.Gson;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
import com.sun.jersey.api.client.Client;
|
||||
import com.sun.jersey.api.client.ClientResponse;
|
||||
import com.sun.jersey.api.client.WebResource;
|
||||
import com.sun.jersey.api.client.config.ClientConfig;
|
||||
import com.sun.jersey.api.client.config.DefaultClientConfig;
|
||||
import com.sun.jersey.core.util.MultivaluedMapImpl;
|
||||
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
|
||||
import org.apache.cloudstack.utils.security.SSLUtils;
|
||||
import org.apache.cloudstack.utils.security.SecureSSLSocketFactory;
|
||||
import org.apache.http.auth.InvalidCredentialsException;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.glassfish.jersey.client.ClientConfig;
|
||||
import org.glassfish.jersey.client.ClientResponse;
|
||||
|
||||
import javax.naming.ServiceUnavailableException;
|
||||
import javax.net.ssl.HostnameVerifier;
|
||||
|
|
@ -43,14 +40,18 @@ import javax.net.ssl.SSLHandshakeException;
|
|||
import javax.net.ssl.SSLSession;
|
||||
import javax.net.ssl.TrustManager;
|
||||
import javax.net.ssl.X509TrustManager;
|
||||
import javax.ws.rs.client.Client;
|
||||
import javax.ws.rs.client.ClientBuilder;
|
||||
import javax.ws.rs.client.WebTarget;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import javax.ws.rs.core.MultivaluedMap;
|
||||
import javax.ws.rs.core.UriBuilder;
|
||||
import java.net.ConnectException;
|
||||
import java.security.InvalidParameterException;
|
||||
import java.security.SecureRandom;
|
||||
import java.security.cert.X509Certificate;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
||||
public class ElastistorUtil {
|
||||
|
||||
|
|
@ -960,7 +961,7 @@ public class ElastistorUtil {
|
|||
* Returns null if there are query parameters associated with the
|
||||
* command
|
||||
*/
|
||||
public MultivaluedMap<String, String> getCommandParameters();
|
||||
public List<Pair<String, String>> getCommandParameters();
|
||||
|
||||
/*
|
||||
* Adds new key-value pair to the query paramters lists.
|
||||
|
|
@ -978,7 +979,7 @@ public class ElastistorUtil {
|
|||
private static class BaseCommand implements ElastiCenterCommand {
|
||||
|
||||
private String commandName = null;
|
||||
private MultivaluedMap<String, String> commandParameters = null;
|
||||
private List<Pair<String, String>> commandParameters = null;
|
||||
private Object responseObject = null;
|
||||
|
||||
/*
|
||||
|
|
@ -1003,16 +1004,16 @@ public class ElastistorUtil {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MultivaluedMap<String, String> getCommandParameters() {
|
||||
public List<Pair<String, String>> getCommandParameters() {
|
||||
return commandParameters;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putCommandParameter(String key, String value) {
|
||||
if (null == commandParameters) {
|
||||
commandParameters = new MultivaluedMapImpl();
|
||||
commandParameters = new ArrayList<>();
|
||||
}
|
||||
commandParameters.add(key, value);
|
||||
commandParameters.add(new Pair<>(key, value));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -1134,7 +1135,7 @@ public class ElastistorUtil {
|
|||
return executeCommand(cmd.getCommandName(), cmd.getCommandParameters(), cmd.getResponseObject());
|
||||
}
|
||||
|
||||
public Object executeCommand(String command, MultivaluedMap<String, String> params, Object responeObj) throws Throwable {
|
||||
public Object executeCommand(String command, List<Pair<String, String>> params, Object responeObj) throws Throwable {
|
||||
|
||||
if (!initialized) {
|
||||
throw new IllegalStateException("Error : ElastiCenterClient is not initialized.");
|
||||
|
|
@ -1145,25 +1146,27 @@ public class ElastistorUtil {
|
|||
}
|
||||
|
||||
try {
|
||||
ClientConfig config = new DefaultClientConfig();
|
||||
Client client = Client.create(config);
|
||||
WebResource webResource = client.resource(UriBuilder.fromUri(restprotocol + elastiCenterAddress + restpath).build());
|
||||
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedMapImpl();
|
||||
queryParams.add(queryparamapikey, apiKey);
|
||||
queryParams.add(queryparamresponse, responseType);
|
||||
|
||||
queryParams.add(queryparamcommand, command);
|
||||
ClientConfig config = new ClientConfig();
|
||||
Client client = ClientBuilder.newClient(config);
|
||||
WebTarget webResource = client.target(UriBuilder.fromUri(restprotocol + elastiCenterAddress + restpath).build())
|
||||
.queryParam(queryparamapikey, apiKey)
|
||||
.queryParam(queryparamresponse, responseType)
|
||||
.queryParam(queryparamcommand, command);
|
||||
|
||||
if (null != params) {
|
||||
for (String key : params.keySet()) {
|
||||
queryParams.add(key, params.getFirst(key));
|
||||
for (Pair<String, String> pair : params) {
|
||||
webResource = webResource.queryParam(pair.first(), pair.second());
|
||||
}
|
||||
}
|
||||
if (debug) {
|
||||
System.out.println("Command Sent " + command + " : " + queryParams);
|
||||
List<Pair<String, String>> qryParams = new ArrayList<>();
|
||||
qryParams.add(new Pair<>(queryparamapikey, apiKey));
|
||||
qryParams.add(new Pair<>(queryparamresponse, responseType));
|
||||
qryParams.add(new Pair<>(queryparamcommand, command));
|
||||
qryParams.addAll(params);
|
||||
System.out.println("Command Sent " + command + " : " + params);
|
||||
}
|
||||
ClientResponse response = webResource.queryParams(queryParams).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
|
||||
ClientResponse response = webResource.request(MediaType.APPLICATION_JSON).get(ClientResponse.class);
|
||||
|
||||
if (response.getStatus() >= 300) {
|
||||
if (debug)
|
||||
|
|
@ -1178,7 +1181,7 @@ public class ElastistorUtil {
|
|||
throw new ServiceUnavailableException("Internal Error. Please contact your ElastiCenter Administrator.");
|
||||
}
|
||||
} else if (null != responeObj) {
|
||||
String jsonResponse = response.getEntity(String.class);
|
||||
String jsonResponse = String.valueOf(response.readEntity(String.class));
|
||||
if (debug) {
|
||||
System.out.println("Command Response : " + jsonResponse);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,60 @@
|
|||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>cloud-plugin-storage-volume-linstor</artifactId>
|
||||
<name>Apache CloudStack Plugin - Storage Volume Linstor provider</name>
|
||||
<parent>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloudstack-plugins</artifactId>
|
||||
<version>4.16.0.0-SNAPSHOT</version>
|
||||
<relativePath>../../../pom.xml</relativePath>
|
||||
</parent>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.cloudstack</groupId>
|
||||
<artifactId>cloud-engine-storage-volume</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.linbit.linstor.api</groupId>
|
||||
<artifactId>java-linstor</artifactId>
|
||||
<version>${cs.java-linstor.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<skipTests>true</skipTests>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>integration-test</phase>
|
||||
<goals>
|
||||
<goal>test</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
|
|
@ -0,0 +1,768 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.storage.datastore.driver;
|
||||
|
||||
import com.linbit.linstor.api.ApiException;
|
||||
import com.linbit.linstor.api.CloneWaiter;
|
||||
import com.linbit.linstor.api.DevelopersApi;
|
||||
import com.linbit.linstor.api.model.ApiCallRc;
|
||||
import com.linbit.linstor.api.model.ApiCallRcList;
|
||||
import com.linbit.linstor.api.model.ResourceDefinition;
|
||||
import com.linbit.linstor.api.model.ResourceDefinitionCloneRequest;
|
||||
import com.linbit.linstor.api.model.ResourceDefinitionCloneStarted;
|
||||
import com.linbit.linstor.api.model.ResourceDefinitionCreate;
|
||||
import com.linbit.linstor.api.model.ResourceGroupSpawn;
|
||||
import com.linbit.linstor.api.model.ResourceWithVolumes;
|
||||
import com.linbit.linstor.api.model.Snapshot;
|
||||
import com.linbit.linstor.api.model.SnapshotRestore;
|
||||
import com.linbit.linstor.api.model.VolumeDefinitionModify;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import javax.inject.Inject;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.storage.ResizeVolumeAnswer;
|
||||
import com.cloud.agent.api.storage.ResizeVolumeCommand;
|
||||
import com.cloud.agent.api.to.DataStoreTO;
|
||||
import com.cloud.agent.api.to.DataTO;
|
||||
import com.cloud.agent.api.to.DiskTO;
|
||||
import com.cloud.agent.api.to.StorageFilerTO;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.storage.ResizeVolumePayload;
|
||||
import com.cloud.storage.SnapshotVO;
|
||||
import com.cloud.storage.StorageManager;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.VMTemplateStoragePoolVO;
|
||||
import com.cloud.storage.VolumeDetailVO;
|
||||
import com.cloud.storage.VolumeVO;
|
||||
import com.cloud.storage.dao.SnapshotDao;
|
||||
import com.cloud.storage.dao.SnapshotDetailsDao;
|
||||
import com.cloud.storage.dao.SnapshotDetailsVO;
|
||||
import com.cloud.storage.dao.VMTemplatePoolDao;
|
||||
import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.storage.dao.VolumeDetailsDao;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
|
||||
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
|
||||
import org.apache.cloudstack.storage.command.CommandResult;
|
||||
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.datastore.util.LinstorUtil;
|
||||
import org.apache.cloudstack.storage.to.SnapshotObjectTO;
|
||||
import org.apache.cloudstack.storage.volume.VolumeObject;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
public class LinstorPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver {
|
||||
private static final Logger s_logger = Logger.getLogger(LinstorPrimaryDataStoreDriverImpl.class);
|
||||
@Inject private PrimaryDataStoreDao _storagePoolDao;
|
||||
@Inject private VolumeDao _volumeDao;
|
||||
@Inject private VolumeDetailsDao _volumeDetailsDao;
|
||||
@Inject private VMTemplatePoolDao _vmTemplatePoolDao;
|
||||
@Inject private SnapshotDao _snapshotDao;
|
||||
@Inject private SnapshotDetailsDao _snapshotDetailsDao;
|
||||
@Inject private StorageManager _storageMgr;
|
||||
|
||||
public LinstorPrimaryDataStoreDriverImpl()
|
||||
{
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> getCapabilities()
|
||||
{
|
||||
Map<String, String> mapCapabilities = new HashMap<>();
|
||||
|
||||
// Linstor will be restricted to only run on LVM-THIN and ZFS storage pools with ACS
|
||||
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString());
|
||||
|
||||
// fetch if lvm-thin or ZFS
|
||||
mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString());
|
||||
|
||||
// CAN_CREATE_VOLUME_FROM_SNAPSHOT see note from CAN_CREATE_VOLUME_FROM_VOLUME
|
||||
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
|
||||
mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString());
|
||||
|
||||
return mapCapabilities;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataTO getTO(DataObject data)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataStoreTO getStoreTO(DataStore store)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ChapInfo getChapInfo(DataObject dataObject)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore)
|
||||
{
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getUsedBytes(StoragePool storagePool)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getUsedIops(StoragePool storagePool)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool)
|
||||
{
|
||||
return dataObject.getSize();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool storagePool)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
private String getSnapshotName(String snapshotUuid) {
|
||||
return LinstorUtil.RSC_PREFIX + snapshotUuid;
|
||||
}
|
||||
|
||||
private void deleteResourceDefinition(StoragePoolVO storagePoolVO, String rscDefName)
|
||||
{
|
||||
DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
|
||||
|
||||
try
|
||||
{
|
||||
ApiCallRcList answers = linstorApi.resourceDefinitionDelete(rscDefName);
|
||||
if (answers.hasError())
|
||||
{
|
||||
for (ApiCallRc answer : answers)
|
||||
{
|
||||
s_logger.error(answer.getMessage());
|
||||
}
|
||||
throw new CloudRuntimeException("Linstor: Unable to delete resource definition: " + rscDefName);
|
||||
}
|
||||
} catch (ApiException apiEx)
|
||||
{
|
||||
s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
|
||||
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteSnapshot(@Nonnull DataStore dataStore, @Nonnull String rscDefName, @Nonnull String snapshotName)
|
||||
{
|
||||
StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId());
|
||||
DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePool.getHostAddress());
|
||||
|
||||
try
|
||||
{
|
||||
ApiCallRcList answers = linstorApi.resourceSnapshotDelete(rscDefName, snapshotName);
|
||||
if (answers.hasError())
|
||||
{
|
||||
for (ApiCallRc answer : answers)
|
||||
{
|
||||
s_logger.error(answer.getMessage());
|
||||
}
|
||||
throw new CloudRuntimeException("Linstor: Unable to delete snapshot: " + rscDefName);
|
||||
}
|
||||
} catch (ApiException apiEx)
|
||||
{
|
||||
s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
|
||||
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
|
||||
}
|
||||
}
|
||||
|
||||
private long getCsIdForCloning(long volumeId, String cloneOf) {
|
||||
VolumeDetailVO volumeDetail = _volumeDetailsDao.findDetail(volumeId, cloneOf);
|
||||
|
||||
if (volumeDetail != null && volumeDetail.getValue() != null) {
|
||||
return Long.parseLong(volumeDetail.getValue());
|
||||
}
|
||||
|
||||
return Long.MIN_VALUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback<CommandResult> callback)
|
||||
{
|
||||
s_logger.debug("deleteAsync: " + dataObject.getType() + ";" + dataObject.getUuid());
|
||||
String errMsg = null;
|
||||
|
||||
final long storagePoolId = dataStore.getId();
|
||||
final StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
|
||||
|
||||
switch (dataObject.getType()) {
|
||||
case VOLUME:
|
||||
{
|
||||
final VolumeInfo volumeInfo = (VolumeInfo) dataObject;
|
||||
final String rscName = LinstorUtil.RSC_PREFIX + volumeInfo.getPath();
|
||||
deleteResourceDefinition(storagePool, rscName);
|
||||
|
||||
long usedBytes = storagePool.getUsedBytes();
|
||||
long capacityIops = storagePool.getCapacityIops();
|
||||
|
||||
usedBytes -= volumeInfo.getSize();
|
||||
if (volumeInfo.getMaxIops() != null)
|
||||
capacityIops += volumeInfo.getMaxIops();
|
||||
|
||||
storagePool.setUsedBytes(Math.max(0, usedBytes));
|
||||
storagePool.setCapacityIops(Math.max(0, capacityIops));
|
||||
|
||||
_storagePoolDao.update(storagePoolId, storagePool);
|
||||
}
|
||||
break;
|
||||
case SNAPSHOT:
|
||||
final SnapshotInfo snapshotInfo = (SnapshotInfo) dataObject;
|
||||
final String rscName = LinstorUtil.RSC_PREFIX + snapshotInfo.getBaseVolume().getPath();
|
||||
deleteSnapshot(dataStore, rscName, getSnapshotName(snapshotInfo.getUuid()));
|
||||
long usedBytes = storagePool.getUsedBytes() - snapshotInfo.getSize();
|
||||
storagePool.setUsedBytes(Math.max(0, usedBytes));
|
||||
_storagePoolDao.update(storagePoolId, storagePool);
|
||||
break;
|
||||
default:
|
||||
errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to deleteAsync";
|
||||
s_logger.error(errMsg);
|
||||
}
|
||||
|
||||
if (callback != null) {
|
||||
CommandResult result = new CommandResult();
|
||||
result.setResult(errMsg);
|
||||
|
||||
callback.complete(result);
|
||||
}
|
||||
}
|
||||
|
||||
private void logLinstorAnswer(@Nonnull ApiCallRc answer) {
|
||||
if (answer.isError()) {
|
||||
s_logger.error(answer.getMessage());
|
||||
} else if (answer.isWarning()) {
|
||||
s_logger.warn(answer.getMessage());
|
||||
} else if (answer.isInfo()) {
|
||||
s_logger.info(answer.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private void logLinstorAnswers(@Nonnull ApiCallRcList answers) {
|
||||
answers.forEach(this::logLinstorAnswer);
|
||||
}
|
||||
|
||||
private void checkLinstorAnswersThrow(@Nonnull ApiCallRcList answers) {
|
||||
logLinstorAnswers(answers);
|
||||
if (answers.hasError())
|
||||
{
|
||||
String errMsg = answers.stream()
|
||||
.filter(ApiCallRc::isError)
|
||||
.findFirst()
|
||||
.map(ApiCallRc::getMessage).orElse("Unknown linstor error");
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
}
|
||||
|
||||
private String checkLinstorAnswers(@Nonnull ApiCallRcList answers) {
|
||||
logLinstorAnswers(answers);
|
||||
return answers.stream().filter(ApiCallRc::isError).findFirst().map(ApiCallRc::getMessage).orElse(null);
|
||||
}
|
||||
|
||||
private String getDeviceName(DevelopersApi linstorApi, String rscName) throws ApiException {
|
||||
List<ResourceWithVolumes> resources = linstorApi.viewResources(
|
||||
Collections.emptyList(),
|
||||
Collections.singletonList(rscName),
|
||||
Collections.emptyList(),
|
||||
null,
|
||||
null,
|
||||
null);
|
||||
if (!resources.isEmpty() && !resources.get(0).getVolumes().isEmpty())
|
||||
{
|
||||
s_logger.info("Linstor: Created drbd device: " + resources.get(0).getVolumes().get(0).getDevicePath());
|
||||
return resources.get(0).getVolumes().get(0).getDevicePath();
|
||||
} else
|
||||
{
|
||||
s_logger.error("Linstor: viewResources didn't return resources or volumes.");
|
||||
throw new CloudRuntimeException("Linstor: viewResources didn't return resources or volumes.");
|
||||
}
|
||||
}
|
||||
|
||||
private String createResource(VolumeInfo vol, StoragePoolVO storagePoolVO)
|
||||
{
|
||||
DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
|
||||
final String rscGrp = storagePoolVO.getUserInfo() != null && !storagePoolVO.getUserInfo().isEmpty() ?
|
||||
storagePoolVO.getUserInfo() : "DfltRscGrp";
|
||||
|
||||
ResourceGroupSpawn rscGrpSpawn = new ResourceGroupSpawn();
|
||||
final String rscName = LinstorUtil.RSC_PREFIX + vol.getUuid();
|
||||
rscGrpSpawn.setResourceDefinitionName(rscName);
|
||||
rscGrpSpawn.addVolumeSizesItem(vol.getSize() / 1024);
|
||||
|
||||
try
|
||||
{
|
||||
s_logger.debug("Linstor: Spawn resource " + rscName);
|
||||
ApiCallRcList answers = linstorApi.resourceGroupSpawn(rscGrp, rscGrpSpawn);
|
||||
checkLinstorAnswersThrow(answers);
|
||||
|
||||
return getDeviceName(linstorApi, rscName);
|
||||
} catch (ApiException apiEx)
|
||||
{
|
||||
s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
|
||||
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
|
||||
}
|
||||
}
|
||||
|
||||
private String cloneResource(long csCloneId, VolumeInfo volumeInfo, StoragePoolVO storagePoolVO) {
|
||||
// get the cached template on this storage
|
||||
VMTemplateStoragePoolVO tmplPoolRef = _vmTemplatePoolDao.findByPoolTemplate(
|
||||
storagePoolVO.getId(), csCloneId, null);
|
||||
|
||||
if (tmplPoolRef != null) {
|
||||
final String cloneRes = LinstorUtil.RSC_PREFIX + tmplPoolRef.getLocalDownloadPath();
|
||||
final String rscName = LinstorUtil.RSC_PREFIX + volumeInfo.getUuid();
|
||||
final DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
|
||||
|
||||
try {
|
||||
s_logger.debug("Clone resource definition " + cloneRes + " to " + rscName);
|
||||
ResourceDefinitionCloneRequest cloneRequest = new ResourceDefinitionCloneRequest();
|
||||
cloneRequest.setName(rscName);
|
||||
ResourceDefinitionCloneStarted cloneStarted = linstorApi.resourceDefinitionClone(
|
||||
cloneRes, cloneRequest);
|
||||
|
||||
checkLinstorAnswersThrow(cloneStarted.getMessages());
|
||||
|
||||
if (!CloneWaiter.waitFor(linstorApi, cloneStarted)) {
|
||||
throw new CloudRuntimeException("Clone for resource " + rscName + " failed.");
|
||||
}
|
||||
|
||||
return getDeviceName(linstorApi, rscName);
|
||||
} catch (ApiException apiEx) {
|
||||
s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
|
||||
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
|
||||
}
|
||||
} else {
|
||||
throw new CloudRuntimeException(
|
||||
"Unable to find Linstor resource for the following template data-object ID: " + csCloneId);
|
||||
}
|
||||
}
|
||||
|
||||
private String createResourceFromSnapshot(long csSnapshotId, String rscName, StoragePoolVO storagePoolVO) {
|
||||
final String rscGrp = storagePoolVO.getUserInfo() != null && !storagePoolVO.getUserInfo().isEmpty() ?
|
||||
storagePoolVO.getUserInfo() : "DfltRscGrp";
|
||||
final DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(storagePoolVO.getHostAddress());
|
||||
|
||||
SnapshotVO snapshotVO = _snapshotDao.findById(csSnapshotId);
|
||||
String snapName = LinstorUtil.RSC_PREFIX + snapshotVO.getUuid();
|
||||
VolumeVO volumeVO = _volumeDao.findById(snapshotVO.getVolumeId());
|
||||
String cloneRes = LinstorUtil.RSC_PREFIX + volumeVO.getPath();
|
||||
|
||||
try
|
||||
{
|
||||
s_logger.debug("Create new resource definition: " + rscName);
|
||||
ResourceDefinitionCreate rdCreate = new ResourceDefinitionCreate();
|
||||
ResourceDefinition rd = new ResourceDefinition();
|
||||
rd.setName(rscName);
|
||||
rd.setResourceGroupName(rscGrp);
|
||||
rdCreate.setResourceDefinition(rd);
|
||||
ApiCallRcList answers = linstorApi.resourceDefinitionCreate(rdCreate);
|
||||
checkLinstorAnswersThrow(answers);
|
||||
|
||||
SnapshotRestore snapshotRestore = new SnapshotRestore();
|
||||
snapshotRestore.toResource(rscName);
|
||||
|
||||
s_logger.debug("Create new volume definition for snapshot: " + cloneRes + ":" + snapName);
|
||||
answers = linstorApi.resourceSnapshotsRestoreVolumeDefinition(cloneRes, snapName, snapshotRestore);
|
||||
checkLinstorAnswersThrow(answers);
|
||||
|
||||
// restore snapshot to new resource
|
||||
s_logger.debug("Restore resource from snapshot: " + cloneRes + ":" + snapName);
|
||||
answers = linstorApi.resourceSnapshotRestore(cloneRes, snapName, snapshotRestore);
|
||||
checkLinstorAnswersThrow(answers);
|
||||
|
||||
return getDeviceName(linstorApi, rscName);
|
||||
} catch (ApiException apiEx) {
|
||||
s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
|
||||
throw new CloudRuntimeException(apiEx.getBestMessage(), apiEx);
|
||||
}
|
||||
}
|
||||
|
||||
private String createVolume(VolumeInfo volumeInfo, StoragePoolVO storagePoolVO) {
|
||||
long csSnapshotId = getCsIdForCloning(volumeInfo.getId(), "cloneOfSnapshot");
|
||||
long csTemplateId = getCsIdForCloning(volumeInfo.getId(), "cloneOfTemplate");
|
||||
|
||||
if (csSnapshotId > 0) {
|
||||
return createResourceFromSnapshot(csSnapshotId, LinstorUtil.RSC_PREFIX + volumeInfo.getUuid(), storagePoolVO);
|
||||
} else if (csTemplateId > 0) {
|
||||
return cloneResource(csTemplateId, volumeInfo, storagePoolVO);
|
||||
} else {
|
||||
return createResource(volumeInfo, storagePoolVO);
|
||||
}
|
||||
}
|
||||
|
||||
private void handleSnapshotDetails(long csSnapshotId, String name, String value) {
|
||||
_snapshotDetailsDao.removeDetail(csSnapshotId, name);
|
||||
SnapshotDetailsVO snapshotDetails = new SnapshotDetailsVO(csSnapshotId, name, value, false);
|
||||
_snapshotDetailsDao.persist(snapshotDetails);
|
||||
}
|
||||
|
||||
private void addTempVolumeToDb(long csSnapshotId, String tempVolumeName) {
|
||||
// TEMP_VOLUME_ID is needed, to find which temporary resource should be deleted after copying it on agent side
|
||||
handleSnapshotDetails(csSnapshotId, LinstorUtil.TEMP_VOLUME_ID, LinstorUtil.RSC_PREFIX + tempVolumeName);
|
||||
// the iqn will be used on the agent side to copy from, even though linstor doesn't have anything to do with IQN
|
||||
handleSnapshotDetails(csSnapshotId, DiskTO.IQN, tempVolumeName);
|
||||
}
|
||||
|
||||
private void removeTempVolumeFromDb(long csSnapshotId) {
|
||||
SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(csSnapshotId, LinstorUtil.TEMP_VOLUME_ID);
|
||||
|
||||
if (snapshotDetails == null || snapshotDetails.getValue() == null) {
|
||||
throw new CloudRuntimeException(
|
||||
"'removeTempVolumeId' should not be invoked unless " + LinstorUtil.TEMP_VOLUME_ID + " exists.");
|
||||
}
|
||||
|
||||
String originalVolumeId = snapshotDetails.getValue();
|
||||
|
||||
handleSnapshotDetails(csSnapshotId, LinstorUtil.TEMP_VOLUME_ID, originalVolumeId);
|
||||
|
||||
_snapshotDetailsDao.remove(snapshotDetails.getId());
|
||||
}
|
||||
|
||||
private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo, StoragePoolVO storagePoolVO) {
|
||||
long csSnapshotId = snapshotInfo.getId();
|
||||
|
||||
SnapshotDetailsVO snapshotDetails = _snapshotDetailsDao.findDetail(csSnapshotId, "tempVolume");
|
||||
|
||||
if (snapshotDetails != null && snapshotDetails.getValue() != null &&
|
||||
snapshotDetails.getValue().equalsIgnoreCase("create"))
|
||||
{
|
||||
final String csName = "Temp-" + snapshotInfo.getUuid();
|
||||
final String tempRscName = LinstorUtil.RSC_PREFIX + csName;
|
||||
createResourceFromSnapshot(csSnapshotId, tempRscName, storagePoolVO);
|
||||
|
||||
s_logger.debug("Temp resource created: " + tempRscName);
|
||||
addTempVolumeToDb(csSnapshotId, csName);
|
||||
}
|
||||
else if (snapshotDetails != null && snapshotDetails.getValue() != null &&
|
||||
snapshotDetails.getValue().equalsIgnoreCase("delete"))
|
||||
{
|
||||
snapshotDetails = _snapshotDetailsDao.findDetail(csSnapshotId, LinstorUtil.TEMP_VOLUME_ID);
|
||||
|
||||
deleteResourceDefinition(storagePoolVO, snapshotDetails.getValue());
|
||||
|
||||
s_logger.debug("Temp resource deleted: " + snapshotDetails.getValue());
|
||||
removeTempVolumeFromDb(csSnapshotId);
|
||||
}
|
||||
else {
|
||||
throw new CloudRuntimeException("Invalid state in 'createVolumeFromSnapshot(SnapshotInfo, StoragePoolVO)'");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void createAsync(DataStore dataStore, DataObject vol, AsyncCompletionCallback<CreateCmdResult> callback)
|
||||
{
|
||||
String devPath = null;
|
||||
String errMsg = null;
|
||||
StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId());
|
||||
|
||||
try
|
||||
{
|
||||
switch (vol.getType())
|
||||
{
|
||||
case VOLUME:
|
||||
VolumeInfo volumeInfo = (VolumeInfo) vol;
|
||||
VolumeVO volume = _volumeDao.findById(volumeInfo.getId());
|
||||
s_logger.debug("createAsync - creating volume");
|
||||
devPath = createVolume(volumeInfo, storagePool);
|
||||
volume.setFolder("/dev/");
|
||||
volume.setPoolId(storagePool.getId());
|
||||
volume.setUuid(vol.getUuid());
|
||||
volume.setPath(vol.getUuid());
|
||||
|
||||
_volumeDao.update(volume.getId(), volume);
|
||||
break;
|
||||
case SNAPSHOT:
|
||||
s_logger.debug("createAsync - SNAPSHOT");
|
||||
createVolumeFromSnapshot((SnapshotInfo) vol, storagePool);
|
||||
break;
|
||||
case TEMPLATE:
|
||||
errMsg = "creating template - not supported";
|
||||
s_logger.error("createAsync - " + errMsg);
|
||||
break;
|
||||
default:
|
||||
errMsg = "Invalid DataObjectType (" + vol.getType() + ") passed to createAsync";
|
||||
s_logger.error(errMsg);
|
||||
}
|
||||
} catch (Exception ex)
|
||||
{
|
||||
errMsg = ex.getMessage();
|
||||
|
||||
s_logger.error("createAsync: " + errMsg);
|
||||
if (callback == null)
|
||||
{
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
||||
if (callback != null)
|
||||
{
|
||||
CreateCmdResult result = new CreateCmdResult(devPath, new Answer(null, errMsg == null, errMsg));
|
||||
result.setResult(errMsg);
|
||||
callback.complete(result);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void revertSnapshot(
|
||||
SnapshotInfo snapshot,
|
||||
SnapshotInfo snapshotOnPrimaryStore,
|
||||
AsyncCompletionCallback<CommandResult> callback)
|
||||
{
|
||||
s_logger.debug("Linstor: revertSnapshot");
|
||||
final VolumeInfo volumeInfo = snapshot.getBaseVolume();
|
||||
VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId());
|
||||
if (volumeVO == null || volumeVO.getRemoved() != null) {
|
||||
CommandResult commandResult = new CommandResult();
|
||||
commandResult.setResult("The volume that the snapshot belongs to no longer exists.");
|
||||
callback.complete(commandResult);
|
||||
return;
|
||||
}
|
||||
|
||||
String resultMsg;
|
||||
try {
|
||||
final StoragePool pool = (StoragePool) snapshot.getDataStore();
|
||||
final String rscName = LinstorUtil.RSC_PREFIX + volumeInfo.getUuid();
|
||||
final String snapName = LinstorUtil.RSC_PREFIX + snapshot.getUuid();
|
||||
final DevelopersApi linstorApi = LinstorUtil.getLinstorAPI(pool.getHostAddress());
|
||||
|
||||
ApiCallRcList answers = linstorApi.resourceSnapshotRollback(rscName, snapName);
|
||||
resultMsg = checkLinstorAnswers(answers);
|
||||
} catch (ApiException apiEx) {
|
||||
s_logger.error("Linstor: ApiEx - " + apiEx.getMessage());
|
||||
resultMsg = apiEx.getBestMessage();
|
||||
}
|
||||
|
||||
if (callback != null)
|
||||
{
|
||||
CommandResult result = new CommandResult();
|
||||
result.setResult(resultMsg);
|
||||
callback.complete(result);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canCopy(DataObject srcData, DataObject destData)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback)
|
||||
{
|
||||
// as long as canCopy is false, this isn't called
|
||||
s_logger.debug("Linstor: copyAsync with srcdata: " + srcData.getUuid());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback)
|
||||
{
|
||||
// as long as canCopy is false, this isn't called
|
||||
s_logger.debug("Linstor: copyAsync with srcdata: " + srcData.getUuid());
|
||||
}
|
||||
|
||||
private CreateCmdResult notifyResize(
|
||||
DataObject data,
|
||||
long oldSize,
|
||||
ResizeVolumePayload resizeParameter)
|
||||
{
|
||||
VolumeObject vol = (VolumeObject) data;
|
||||
StoragePool pool = (StoragePool) data.getDataStore();
|
||||
|
||||
ResizeVolumeCommand resizeCmd =
|
||||
new ResizeVolumeCommand(vol.getPath(), new StorageFilerTO(pool), oldSize, resizeParameter.newSize, resizeParameter.shrinkOk,
|
||||
resizeParameter.instanceName);
|
||||
CreateCmdResult result = new CreateCmdResult(null, null);
|
||||
try {
|
||||
ResizeVolumeAnswer answer = (ResizeVolumeAnswer) _storageMgr.sendToPool(pool, resizeParameter.hosts, resizeCmd);
|
||||
if (answer != null && answer.getResult()) {
|
||||
s_logger.debug("Resize: notified hosts");
|
||||
} else if (answer != null) {
|
||||
result.setResult(answer.getDetails());
|
||||
} else {
|
||||
s_logger.debug("return a null answer, mark it as failed for unknown reason");
|
||||
result.setResult("return a null answer, mark it as failed for unknown reason");
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("sending resize command failed", e);
|
||||
result.setResult(e.toString());
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void resize(DataObject data, AsyncCompletionCallback<CreateCmdResult> callback)
|
||||
{
|
||||
final VolumeObject vol = (VolumeObject) data;
|
||||
final StoragePool pool = (StoragePool) data.getDataStore();
|
||||
final DevelopersApi api = LinstorUtil.getLinstorAPI(pool.getHostAddress());
|
||||
final ResizeVolumePayload resizeParameter = (ResizeVolumePayload) vol.getpayload();
|
||||
|
||||
final String rscName = LinstorUtil.RSC_PREFIX + vol.getPath();
|
||||
final long oldSize = vol.getSize();
|
||||
|
||||
String errMsg = null;
|
||||
VolumeDefinitionModify dfm = new VolumeDefinitionModify();
|
||||
dfm.setSizeKib(resizeParameter.newSize / 1024);
|
||||
try
|
||||
{
|
||||
ApiCallRcList answers = api.volumeDefinitionModify(rscName, 0, dfm);
|
||||
if (answers.hasError())
|
||||
{
|
||||
s_logger.error("Resize error: " + answers.get(0).getMessage());
|
||||
errMsg = answers.get(0).getMessage();
|
||||
} else
|
||||
{
|
||||
s_logger.info(String.format("Successfully resized %s to %d kib", rscName, dfm.getSizeKib()));
|
||||
vol.setSize(resizeParameter.newSize);
|
||||
vol.update();
|
||||
}
|
||||
|
||||
} catch (ApiException apiExc)
|
||||
{
|
||||
s_logger.error(apiExc);
|
||||
errMsg = apiExc.getBestMessage();
|
||||
}
|
||||
|
||||
CreateCmdResult result;
|
||||
if (errMsg != null)
|
||||
{
|
||||
result = new CreateCmdResult(null, new Answer(null, false, errMsg));
|
||||
result.setResult(errMsg);
|
||||
} else
|
||||
{
|
||||
// notify guests
|
||||
result = notifyResize(data, oldSize, resizeParameter);
|
||||
}
|
||||
|
||||
callback.complete(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleQualityOfServiceForVolumeMigration(
|
||||
VolumeInfo volumeInfo,
|
||||
QualityOfServiceState qualityOfServiceState)
|
||||
{
|
||||
s_logger.debug("Linstor: handleQualityOfServiceForVolumeMigration");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback<CreateCmdResult> callback)
|
||||
{
|
||||
s_logger.debug("Linstor: takeSnapshot with snapshot: " + snapshotInfo.getUuid());
|
||||
|
||||
final VolumeInfo volumeInfo = snapshotInfo.getBaseVolume();
|
||||
final VolumeVO volumeVO = _volumeDao.findById(volumeInfo.getId());
|
||||
|
||||
long storagePoolId = volumeVO.getPoolId();
|
||||
final StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId);
|
||||
final DevelopersApi api = LinstorUtil.getLinstorAPI(storagePool.getHostAddress());
|
||||
final String rscName = LinstorUtil.RSC_PREFIX + volumeVO.getPath();
|
||||
|
||||
Snapshot snapshot = new Snapshot();
|
||||
snapshot.setName(getSnapshotName(snapshotInfo.getUuid()));
|
||||
|
||||
CreateCmdResult result;
|
||||
try
|
||||
{
|
||||
ApiCallRcList answers = api.resourceSnapshotCreate(rscName, snapshot);
|
||||
|
||||
if (answers.hasError())
|
||||
{
|
||||
final String errMsg = answers.get(0).getMessage();
|
||||
s_logger.error("Snapshot error: " + errMsg);
|
||||
result = new CreateCmdResult(null, new Answer(null, false, errMsg));
|
||||
result.setResult(errMsg);
|
||||
} else
|
||||
{
|
||||
s_logger.info(String.format("Successfully took snapshot from %s", rscName));
|
||||
|
||||
SnapshotObjectTO snapshotObjectTo = (SnapshotObjectTO)snapshotInfo.getTO();
|
||||
snapshotObjectTo.setPath(rscName + "-" + snapshotInfo.getName());
|
||||
|
||||
result = new CreateCmdResult(null, new CreateObjectAnswer(snapshotObjectTo));
|
||||
result.setResult(null);
|
||||
}
|
||||
} catch (ApiException apiExc)
|
||||
{
|
||||
s_logger.error(apiExc);
|
||||
result = new CreateCmdResult(null, new Answer(null, false, apiExc.getBestMessage()));
|
||||
result.setResult(apiExc.getBestMessage());
|
||||
}
|
||||
|
||||
callback.complete(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canProvideStorageStats() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<Long, Long> getStorageStats(StoragePool storagePool) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canProvideVolumeStats() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Pair<Long, Long> getVolumeStats(StoragePool storagePool, String volumeId) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,335 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.storage.datastore.lifecycle;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
import com.cloud.agent.AgentManager;
|
||||
import com.cloud.agent.api.Answer;
|
||||
import com.cloud.agent.api.CreateStoragePoolCommand;
|
||||
import com.cloud.agent.api.StoragePoolInfo;
|
||||
import com.cloud.capacity.CapacityManager;
|
||||
import com.cloud.dc.ClusterVO;
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
import com.cloud.host.Host;
|
||||
import com.cloud.host.HostVO;
|
||||
import com.cloud.hypervisor.Hypervisor.HypervisorType;
|
||||
import com.cloud.resource.ResourceManager;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.StorageManager;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.StoragePoolAutomation;
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
|
||||
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
|
||||
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
|
||||
import org.apache.cloudstack.storage.datastore.util.LinstorUtil;
|
||||
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
public class LinstorPrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
|
||||
private static final Logger s_logger = Logger.getLogger(LinstorPrimaryDataStoreLifeCycleImpl.class);
|
||||
|
||||
@Inject
|
||||
private ClusterDao clusterDao;
|
||||
@Inject
|
||||
PrimaryDataStoreDao _primaryDataStoreDao;
|
||||
@Inject
|
||||
private ResourceManager resourceMgr;
|
||||
@Inject
|
||||
private StorageManager _storageMgr;
|
||||
@Inject
|
||||
PrimaryDataStoreHelper dataStoreHelper;
|
||||
@Inject
|
||||
private StoragePoolAutomation storagePoolAutomation;
|
||||
@Inject
|
||||
private CapacityManager _capacityMgr;
|
||||
@Inject
|
||||
AgentManager _agentMgr;
|
||||
|
||||
public LinstorPrimaryDataStoreLifeCycleImpl()
|
||||
{
|
||||
}
|
||||
|
||||
private static boolean isSupportedHypervisorType(HypervisorType hypervisorType) {
|
||||
return HypervisorType.KVM.equals(hypervisorType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataStore initialize(Map<String, Object> dsInfos) {
|
||||
String url = (String) dsInfos.get("url");
|
||||
Long zoneId = (Long) dsInfos.get("zoneId");
|
||||
Long podId = (Long) dsInfos.get("podId");
|
||||
Long clusterId = (Long) dsInfos.get("clusterId");
|
||||
String storagePoolName = (String) dsInfos.get("name");
|
||||
String providerName = (String) dsInfos.get("providerName");
|
||||
String tags = (String) dsInfos.get("tags");
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, String> details = (Map<String, String>) dsInfos.get("details");
|
||||
|
||||
final String resourceGroup = details.get(LinstorUtil.RSC_GROUP);
|
||||
|
||||
final String uuid = UUID.randomUUID().toString();
|
||||
|
||||
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
|
||||
|
||||
// checks if primary datastore is clusterwide. If so, uses the clusterId to set
|
||||
// the uuid and then sets the podId and clusterId parameters
|
||||
if (clusterId != null) {
|
||||
if (podId == null) {
|
||||
throw new CloudRuntimeException("The Pod ID must be specified.");
|
||||
}
|
||||
if (zoneId == null) {
|
||||
throw new CloudRuntimeException("The Zone ID must be specified.");
|
||||
}
|
||||
ClusterVO cluster = clusterDao.findById(clusterId);
|
||||
s_logger.info("Linstor: Setting Linstor cluster-wide primary storage uuid to " + uuid);
|
||||
parameters.setPodId(podId);
|
||||
parameters.setClusterId(clusterId);
|
||||
|
||||
HypervisorType hypervisorType = cluster.getHypervisorType();
|
||||
|
||||
if (!isSupportedHypervisorType(hypervisorType)) {
|
||||
throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
|
||||
}
|
||||
}
|
||||
|
||||
if (!url.contains("://")) {
|
||||
url = "http://" + url;
|
||||
}
|
||||
|
||||
URL controllerURL;
|
||||
int port = 3370;
|
||||
try
|
||||
{
|
||||
controllerURL = new URL(url);
|
||||
if (!controllerURL.getProtocol().startsWith("http")) {
|
||||
throw new IllegalArgumentException("Linstor controller URL wrong protocol: " + url);
|
||||
}
|
||||
if (!controllerURL.getPath().isEmpty()) {
|
||||
throw new IllegalArgumentException("Linstor controller URL shouldn't have a path: " + url);
|
||||
}
|
||||
if (controllerURL.getPort() == -1) {
|
||||
port = controllerURL.getProtocol().equals("https") ? 3371 : 3370;
|
||||
url += ":" + port;
|
||||
}
|
||||
} catch (MalformedURLException e)
|
||||
{
|
||||
throw new IllegalArgumentException("Linstor controller URL is not valid: " + e);
|
||||
}
|
||||
|
||||
long capacityBytes = LinstorUtil.getCapacityBytes(url, resourceGroup);
|
||||
|
||||
if (capacityBytes <= 0) {
|
||||
throw new IllegalArgumentException("'capacityBytes' must be present and greater than 0.");
|
||||
}
|
||||
|
||||
parameters.setHost(url);
|
||||
parameters.setPort(port);
|
||||
parameters.setPath(resourceGroup);
|
||||
parameters.setType(Storage.StoragePoolType.Linstor);
|
||||
parameters.setUuid(uuid);
|
||||
parameters.setZoneId(zoneId);
|
||||
parameters.setName(storagePoolName);
|
||||
parameters.setProviderName(providerName);
|
||||
parameters.setManaged(false);
|
||||
parameters.setCapacityBytes(capacityBytes);
|
||||
parameters.setUsedBytes(0);
|
||||
parameters.setCapacityIops(0L);
|
||||
parameters.setHypervisorType(HypervisorType.KVM);
|
||||
parameters.setTags(tags);
|
||||
parameters.setDetails(details);
|
||||
parameters.setUserInfo(resourceGroup);
|
||||
|
||||
return dataStoreHelper.createPrimaryDataStore(parameters);
|
||||
}
|
||||
|
||||
protected boolean createStoragePool(long hostId, StoragePool pool) {
|
||||
s_logger.debug("creating pool " + pool.getName() + " on host " + hostId);
|
||||
|
||||
if (pool.getPoolType() != Storage.StoragePoolType.Linstor) {
|
||||
s_logger.warn(" Doesn't support storage pool type " + pool.getPoolType());
|
||||
return false;
|
||||
}
|
||||
CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, pool);
|
||||
final Answer answer = _agentMgr.easySend(hostId, cmd);
|
||||
if (answer != null && answer.getResult()) {
|
||||
return true;
|
||||
} else {
|
||||
_primaryDataStoreDao.expunge(pool.getId());
|
||||
String msg = answer != null ?
|
||||
"Can not create storage pool through host " + hostId + " due to " + answer.getDetails() :
|
||||
"Can not create storage pool through host " + hostId + " due to CreateStoragePoolCommand returns null";
|
||||
s_logger.warn(msg);
|
||||
throw new CloudRuntimeException(msg);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
|
||||
final PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) dataStore;
|
||||
|
||||
final ClusterVO cluster = clusterDao.findById(primaryDataStoreInfo.getClusterId());
|
||||
final HypervisorType hypervisorType = cluster.getHypervisorType();
|
||||
if (!isSupportedHypervisorType(hypervisorType)) {
|
||||
throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
|
||||
}
|
||||
|
||||
// check if there is at least one host up in this cluster
|
||||
List<HostVO> allHosts = resourceMgr.listAllUpAndEnabledHosts(Host.Type.Routing,
|
||||
primaryDataStoreInfo.getClusterId(), primaryDataStoreInfo.getPodId(),
|
||||
primaryDataStoreInfo.getDataCenterId());
|
||||
|
||||
if (allHosts.isEmpty()) {
|
||||
_primaryDataStoreDao.expunge(primaryDataStoreInfo.getId());
|
||||
|
||||
throw new CloudRuntimeException(
|
||||
"No host up to associate a storage pool with in cluster " + primaryDataStoreInfo.getClusterId());
|
||||
}
|
||||
|
||||
List<HostVO> poolHosts = new ArrayList<>();
|
||||
for (HostVO host : allHosts) {
|
||||
try {
|
||||
createStoragePool(host.getId(), primaryDataStoreInfo);
|
||||
|
||||
_storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId());
|
||||
|
||||
poolHosts.add(host);
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e);
|
||||
}
|
||||
}
|
||||
|
||||
if (poolHosts.isEmpty()) {
|
||||
s_logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '"
|
||||
+ primaryDataStoreInfo.getClusterId() + "'.");
|
||||
|
||||
_primaryDataStoreDao.expunge(primaryDataStoreInfo.getId());
|
||||
|
||||
throw new CloudRuntimeException("Failed to access storage pool");
|
||||
}
|
||||
|
||||
dataStoreHelper.attachCluster(dataStore);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
|
||||
if (!isSupportedHypervisorType(hypervisorType)) {
|
||||
throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
|
||||
}
|
||||
|
||||
List<HostVO> hosts = resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType,
|
||||
scope.getScopeId());
|
||||
|
||||
for (HostVO host : hosts) {
|
||||
try {
|
||||
_storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId());
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
|
||||
}
|
||||
}
|
||||
|
||||
dataStoreHelper.attachZone(dataStore, hypervisorType);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean maintain(DataStore dataStore) {
|
||||
storagePoolAutomation.maintain(dataStore);
|
||||
dataStoreHelper.maintain(dataStore);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancelMaintain(DataStore store) {
|
||||
dataStoreHelper.cancelMaintain(store);
|
||||
storagePoolAutomation.cancelMaintain(store);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean deleteDataStore(DataStore store) {
|
||||
return dataStoreHelper.deletePrimaryDataStore(store);
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle#migrateToObjectStore(org.apache.cloudstack.engine.subsystem.api.storage.DataStore)
|
||||
*/
|
||||
@Override
|
||||
public boolean migrateToObjectStore(DataStore store) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateStoragePool(StoragePool storagePool, Map<String, String> details) {
|
||||
StoragePoolVO storagePoolVo = _primaryDataStoreDao.findById(storagePool.getId());
|
||||
|
||||
String strCapacityBytes = details.get(PrimaryDataStoreLifeCycle.CAPACITY_BYTES);
|
||||
Long capacityBytes = strCapacityBytes != null ? Long.parseLong(strCapacityBytes) : null;
|
||||
|
||||
if (capacityBytes != null) {
|
||||
long usedBytes = _capacityMgr.getUsedBytes(storagePoolVo);
|
||||
|
||||
if (capacityBytes < usedBytes) {
|
||||
throw new CloudRuntimeException(
|
||||
"Cannot reduce the number of bytes for this storage pool as it would lead to an insufficient number of bytes");
|
||||
}
|
||||
}
|
||||
|
||||
String strCapacityIops = details.get(PrimaryDataStoreLifeCycle.CAPACITY_IOPS);
|
||||
Long capacityIops = strCapacityIops != null ? Long.parseLong(strCapacityIops) : null;
|
||||
|
||||
if (capacityIops != null) {
|
||||
long usedIops = _capacityMgr.getUsedIops(storagePoolVo);
|
||||
|
||||
if (capacityIops < usedIops) {
|
||||
throw new CloudRuntimeException(
|
||||
"Cannot reduce the number of IOPS for this storage pool as it would lead to an insufficient number of IOPS");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void enableStoragePool(DataStore store) {
|
||||
dataStoreHelper.enable(store);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void disableStoragePool(DataStore store) {
|
||||
dataStoreHelper.disable(store);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.storage.datastore.provider;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.cloud.utils.component.ComponentContext;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
|
||||
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
|
||||
import org.apache.cloudstack.storage.datastore.driver.LinstorPrimaryDataStoreDriverImpl;
|
||||
import org.apache.cloudstack.storage.datastore.lifecycle.LinstorPrimaryDataStoreLifeCycleImpl;
|
||||
|
||||
public class LinstorPrimaryDatastoreProviderImpl implements PrimaryDataStoreProvider {
|
||||
private final static String PROVIDER_NAME = "Linstor";
|
||||
protected PrimaryDataStoreDriver driver;
|
||||
protected HypervisorHostListener listener;
|
||||
protected DataStoreLifeCycle lifecycle;
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return PROVIDER_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataStoreLifeCycle getDataStoreLifeCycle() {
|
||||
return this.lifecycle;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean configure(Map<String, Object> params) {
|
||||
lifecycle = ComponentContext.inject(LinstorPrimaryDataStoreLifeCycleImpl.class);
|
||||
driver = ComponentContext.inject(LinstorPrimaryDataStoreDriverImpl.class);
|
||||
listener = ComponentContext.inject(DefaultHostListener.class);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PrimaryDataStoreDriver getDataStoreDriver() {
|
||||
return this.driver;
|
||||
}
|
||||
|
||||
@Override
|
||||
public HypervisorHostListener getHostListener() {
|
||||
return this.listener;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<DataStoreProviderType> getTypes() {
|
||||
Set<DataStoreProviderType> types = new HashSet<>();
|
||||
types.add(DataStoreProviderType.PRIMARY);
|
||||
return types;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,81 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package org.apache.cloudstack.storage.datastore.util;
|
||||
|
||||
import com.linbit.linstor.api.ApiClient;
|
||||
import com.linbit.linstor.api.ApiException;
|
||||
import com.linbit.linstor.api.Configuration;
|
||||
import com.linbit.linstor.api.DevelopersApi;
|
||||
import com.linbit.linstor.api.model.ProviderKind;
|
||||
import com.linbit.linstor.api.model.ResourceGroup;
|
||||
import com.linbit.linstor.api.model.StoragePool;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
public class LinstorUtil {
|
||||
private static final Logger s_logger = Logger.getLogger(LinstorUtil.class);
|
||||
|
||||
public static final String RSC_PREFIX = "cs-";
|
||||
public static final String RSC_GROUP = "resourceGroup";
|
||||
|
||||
public static final String TEMP_VOLUME_ID = "tempVolumeId";
|
||||
|
||||
public static final String CLUSTER_DEFAULT_MIN_IOPS = "clusterDefaultMinIops";
|
||||
public static final String CLUSTER_DEFAULT_MAX_IOPS = "clusterDefaultMaxIops";
|
||||
|
||||
public static DevelopersApi getLinstorAPI(String linstorUrl) {
|
||||
ApiClient client = Configuration.getDefaultApiClient();
|
||||
client.setBasePath(linstorUrl);
|
||||
return new DevelopersApi(client);
|
||||
}
|
||||
|
||||
public static long getCapacityBytes(String linstorUrl, String rscGroupName) {
|
||||
DevelopersApi linstorApi = getLinstorAPI(linstorUrl);
|
||||
try {
|
||||
List<ResourceGroup> rscGrps = linstorApi.resourceGroupList(
|
||||
Collections.singletonList(rscGroupName),
|
||||
null,
|
||||
null,
|
||||
null);
|
||||
|
||||
if (rscGrps.isEmpty()) {
|
||||
final String errMsg = String.format("Linstor: Resource group '%s' not found", rscGroupName);
|
||||
s_logger.error(errMsg);
|
||||
throw new CloudRuntimeException(errMsg);
|
||||
}
|
||||
|
||||
List<StoragePool> storagePools = linstorApi.viewStoragePools(
|
||||
Collections.emptyList(),
|
||||
rscGrps.get(0).getSelectFilter().getStoragePoolList(),
|
||||
null,
|
||||
null,
|
||||
null
|
||||
);
|
||||
|
||||
return storagePools.stream()
|
||||
.filter(sp -> sp.getProviderKind() != ProviderKind.DISKLESS)
|
||||
.mapToLong(StoragePool::getTotalCapacity).sum() * 1024; // linstor uses kiB
|
||||
} catch (ApiException apiEx) {
|
||||
s_logger.error(apiEx.getMessage());
|
||||
throw new CloudRuntimeException(apiEx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
name=storage-volume-linstor
|
||||
parent=storage
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
<beans xmlns="http://www.springframework.org/schema/beans"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xmlns:context="http://www.springframework.org/schema/context"
|
||||
xmlns:aop="http://www.springframework.org/schema/aop"
|
||||
xsi:schemaLocation="http://www.springframework.org/schema/beans
|
||||
http://www.springframework.org/schema/beans/spring-beans.xsd
|
||||
http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop.xsd
|
||||
http://www.springframework.org/schema/context
|
||||
http://www.springframework.org/schema/context/spring-context.xsd"
|
||||
>
|
||||
|
||||
<bean id="linstorPrimaryDataStoreProviderImpl"
|
||||
class="org.apache.cloudstack.storage.datastore.provider.LinstorPrimaryDatastoreProviderImpl" />
|
||||
</beans>
|
||||
8
pom.xml
8
pom.xml
|
|
@ -144,7 +144,7 @@
|
|||
<cs.javax.annotation.version>1.3.2</cs.javax.annotation.version>
|
||||
<cs.jaxb.version>2.3.0</cs.jaxb.version>
|
||||
<cs.jaxws.version>2.3.2-1</cs.jaxws.version>
|
||||
<cs.jersey-bundle.version>1.19.4</cs.jersey-bundle.version>
|
||||
<cs.jersey-client.version>2.26</cs.jersey-client.version>
|
||||
<cs.jetty.version>9.4.36.v20210114</cs.jetty.version>
|
||||
<cs.jetty-maven-plugin.version>9.4.27.v20200227</cs.jetty-maven-plugin.version>
|
||||
<cs.jna.version>5.5.0</cs.jna.version>
|
||||
|
|
@ -161,6 +161,7 @@
|
|||
<cs.nitro.version>10.1</cs.nitro.version>
|
||||
<cs.opensaml.version>2.6.4</cs.opensaml.version>
|
||||
<cs.rados-java.version>0.6.0</cs.rados-java.version>
|
||||
<cs.java-linstor.version>0.3.0</cs.java-linstor.version>
|
||||
<cs.reflections.version>0.9.12</cs.reflections.version>
|
||||
<cs.servicemix.version>3.4.4_1</cs.servicemix.version>
|
||||
<cs.servlet.version>4.0.1</cs.servlet.version>
|
||||
|
|
@ -323,11 +324,6 @@
|
|||
<artifactId>amqp-client</artifactId>
|
||||
<version>${cs.amqp-client.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-bundle</artifactId>
|
||||
<version>${cs.jersey-bundle.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.thoughtworks.xstream</groupId>
|
||||
<artifactId>xstream</artifactId>
|
||||
|
|
|
|||
|
|
@ -1244,7 +1244,10 @@ public class ApiDBUtils {
|
|||
ListIterator<StoragePoolVO> itr = pools.listIterator();
|
||||
while(itr.hasNext()) {
|
||||
StoragePoolVO pool = itr.next();
|
||||
if(pool.getPoolType() == StoragePoolType.RBD || pool.getPoolType() == StoragePoolType.PowerFlex || pool.getPoolType() == StoragePoolType.CLVM) {
|
||||
if(pool.getPoolType() == StoragePoolType.RBD ||
|
||||
pool.getPoolType() == StoragePoolType.PowerFlex ||
|
||||
pool.getPoolType() == StoragePoolType.CLVM ||
|
||||
pool.getPoolType() == StoragePoolType.Linstor) {
|
||||
// This case will note the presence of non-qcow2 primary stores, suggesting KVM without NFS. Otherwse,
|
||||
// If this check is not passed, the hypervisor type will remain OVM.
|
||||
type = HypervisorType.KVM;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,31 @@
|
|||
# Linstor storage plugin
|
||||
==================================
|
||||
This directory contains the basic VM, Volume life cycle tests for Linstor storage pool (in KVM hypervisor).
|
||||
|
||||
# Running tests
|
||||
===============
|
||||
To run the basic volume tests, first update the below test data of the CloudStack environment
|
||||
|
||||
````
|
||||
TestData.zoneId: <id of zone>
|
||||
TestData.clusterId: <id of cluster>
|
||||
TestData.domainId: <id of domain>
|
||||
TestData.url: <management server IP>
|
||||
TestData.primaryStorage "url": <Linstor storage pool url (see the format below) to use as primary storage>
|
||||
````
|
||||
|
||||
and to enable and run volume migration tests, update the below test data
|
||||
|
||||
````
|
||||
TestData.migrationTests: True
|
||||
TestData.primaryStorageSameInstance "url": <Linstor url (see the format below) of the pool on same storage cluster as TestData.primaryStorage>
|
||||
TestData.primaryStorageDistinctInstance "url": <Linstor url (see the format below) of the pool not on the same storage cluster as TestData.primaryStorage>
|
||||
````
|
||||
|
||||
Then run the tests using python unittest runner: nosetests
|
||||
|
||||
````
|
||||
nosetests --with-marvin --marvin-config=<marvin-cfg-file> <cloudstack-dir>/test/integration/plugins/linstor/test_linstor_volumes.py --zone=<zone> --hypervisor=kvm
|
||||
````
|
||||
|
||||
You can also run these tests out of the box with PyDev or PyCharm or whatever.
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -2978,7 +2978,8 @@ class StoragePool:
|
|||
@classmethod
|
||||
def create(cls, apiclient, services, scope=None, clusterid=None,
|
||||
zoneid=None, podid=None, provider=None, tags=None,
|
||||
capacityiops=None, capacitybytes=None, hypervisor=None):
|
||||
capacityiops=None, capacitybytes=None, hypervisor=None,
|
||||
details=None):
|
||||
"""Create Storage pool (Primary Storage)"""
|
||||
|
||||
cmd = createStoragePool.createStoragePoolCmd()
|
||||
|
|
@ -3030,6 +3031,13 @@ class StoragePool:
|
|||
elif "hypervisor" in services:
|
||||
cmd.hypervisor = services["hypervisor"]
|
||||
|
||||
d = services.get("details", details)
|
||||
if d:
|
||||
count = 1
|
||||
for key, value in d.items():
|
||||
setattr(cmd, "details[{}].{}".format(count, key), value)
|
||||
count = count + 1
|
||||
|
||||
return StoragePool(apiclient.createStoragePool(cmd).__dict__)
|
||||
|
||||
def delete(self, apiclient):
|
||||
|
|
|
|||
|
|
@ -1896,6 +1896,7 @@
|
|||
"label.routerrequiresupgrade": "Upgrade is required",
|
||||
"label.routertype": "Type",
|
||||
"label.routing.host": "Routing Host",
|
||||
"label.resourcegroup": "Resource Group",
|
||||
"label.rule": "Rule",
|
||||
"label.rule.number": "Rule Number",
|
||||
"label.rules": "Rules",
|
||||
|
|
@ -2969,6 +2970,7 @@
|
|||
"message.error.zone.for.cluster": "Please select zone for Kubernetes cluster",
|
||||
"message.error.zone.name": "Please enter zone name",
|
||||
"message.error.zone.type": "Please select zone type",
|
||||
"message.error.linstor.resourcegroup": "Please enter the Linstor Resource-Group",
|
||||
"message.fail.to.delete": "Failed to delete.",
|
||||
"message.failed.to.add": "Failed to add",
|
||||
"message.failed.to.assign.vms": "Failed to assign VMs",
|
||||
|
|
@ -3169,6 +3171,7 @@
|
|||
"message.reset.password.warning.notpasswordenabled": "The template of this instance was created without password enabled",
|
||||
"message.reset.password.warning.notstopped": "Your instance must be stopped before attempting to change its current password",
|
||||
"message.reset.vpn.connection": "Please confirm that you want to reset VPN connection",
|
||||
"message.linstor.resourcegroup.description": "Linstor resource group to use for primary storage",
|
||||
"message.resize.volume.failed": "Failed to resize volume",
|
||||
"message.resource.not.found": "Resource not found",
|
||||
"message.restart.mgmt.server": "Please restart your management server(s) for your new settings to take effect.",
|
||||
|
|
@ -3196,7 +3199,7 @@
|
|||
"message.select.tier": "Please select a tier",
|
||||
"message.select.zone.description": "Select type of zone basic/advanced",
|
||||
"message.select.zone.hint": "This is the type of zone deployement that you want to use. Basic zone: provides a single network where each VM instance is assigned an IP directly from the network. Guest isolation can be provided through layer-3 means such as security groups (IP address source filtering). Advanced zone: For more sophisticated network topologies. This network model provides the most flexibility in defining guest networks and providing custom network offerings such as firewall, VPN, or load balancer support.",
|
||||
"message.server.description": "NFS, iSCSI, or PreSetup: IP address or DNS name of the storage device. VMWare PreSetup: IP address or DNS name of the vCenter server.",
|
||||
"message.server.description": "NFS, iSCSI, or PreSetup: IP address or DNS name of the storage device. VMWare PreSetup: IP address or DNS name of the vCenter server. Linstor: http(s) url of the linstor-controller.",
|
||||
"message.set.default.nic": "Please confirm that you would like to make this NIC the default for this VM.",
|
||||
"message.set.default.nic.manual": "Please manually update the default NIC on the VM now.",
|
||||
"message.setting.updated": "Setting Updated:",
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@
|
|||
</a-select>
|
||||
</a-form-item>
|
||||
<div
|
||||
v-if="protocolSelected === 'nfs' || protocolSelected === 'SMB' || protocolSelected === 'iscsi' || protocolSelected === 'vmfs'|| protocolSelected === 'Gluster' ||
|
||||
v-if="protocolSelected === 'nfs' || protocolSelected === 'SMB' || protocolSelected === 'iscsi' || protocolSelected === 'vmfs'|| protocolSelected === 'Gluster' || protocolSelected === 'Linstor' ||
|
||||
(protocolSelected === 'PreSetup' && hypervisorType === 'VMware') || protocolSelected === 'datastorecluster'">
|
||||
<a-form-item>
|
||||
<tooltip-label slot="label" :title="$t('label.server')" :tooltip="$t('message.server.description')"/>
|
||||
|
|
@ -193,7 +193,19 @@
|
|||
</a-select-option>
|
||||
</a-select>
|
||||
</a-form-item>
|
||||
<div v-if="this.providerSelected !== 'DefaultPrimary' && this.providerSelected !== 'PowerFlex'">
|
||||
<div v-if="protocolSelected !== 'Linstor'">
|
||||
<a-form-item>
|
||||
<tooltip-label slot="label" :title="$t('label.providername')" :tooltip="apiParams.provider.description"/>
|
||||
<a-select
|
||||
v-decorator="['provider', { initialValue: providerSelected, rules: [{ required: true, message: `${$t('label.required')}`}] }]"
|
||||
@change="updateProviderAndProtocol">
|
||||
<a-select-option :value="provider" v-for="(provider,idx) in providers" :key="idx">
|
||||
{{ provider }}
|
||||
</a-select-option>
|
||||
</a-select>
|
||||
</a-form-item>
|
||||
</div>
|
||||
<div v-if="this.providerSelected !== 'DefaultPrimary' && this.providerSelected !== 'PowerFlex' && this.providerSelected !== 'Linstor'">
|
||||
<a-form-item>
|
||||
<tooltip-label slot="label" :title="$t('label.ismanaged')" :tooltip="apiParams.managed.description"/>
|
||||
<a-checkbox-group v-decorator="['managed']" >
|
||||
|
|
@ -254,6 +266,17 @@
|
|||
<a-input v-decorator="['volume']" />
|
||||
</a-form-item>
|
||||
</div>
|
||||
<div v-if="protocolSelected === 'Linstor'">
|
||||
<a-form-item>
|
||||
<span slot="label">
|
||||
{{ $t('label.resourcegroup') }}
|
||||
<a-tooltip :title="$t('message.linstor.resourcegroup.description')">
|
||||
<a-icon type="info-circle" style="color: rgba(0,0,0,.45)" />
|
||||
</a-tooltip>
|
||||
</span>
|
||||
<a-input v-decorator="['resourcegroup', { rules: [{ required: true, message: `${$t('label.required')}` }] }]" />
|
||||
</a-form-item>
|
||||
</div>
|
||||
<a-form-item>
|
||||
<tooltip-label slot="label" :title="$t('label.storagetags')" :tooltip="apiParams.tags.description"/>
|
||||
<a-select
|
||||
|
|
@ -408,7 +431,7 @@ export default {
|
|||
const cluster = this.clusters.find(cluster => cluster.id === this.clusterSelected)
|
||||
this.hypervisorType = cluster.hypervisortype
|
||||
if (this.hypervisorType === 'KVM') {
|
||||
this.protocols = ['nfs', 'SharedMountPoint', 'RBD', 'CLVM', 'Gluster', 'custom']
|
||||
this.protocols = ['nfs', 'SharedMountPoint', 'RBD', 'CLVM', 'Gluster', 'Linstor', 'custom']
|
||||
} else if (this.hypervisorType === 'XenServer') {
|
||||
this.protocols = ['nfs', 'PreSetup', 'iscsi', 'custom']
|
||||
} else if (this.hypervisorType === 'VMware') {
|
||||
|
|
@ -562,6 +585,16 @@ export default {
|
|||
closeModal () {
|
||||
this.$parent.$parent.close()
|
||||
},
|
||||
linstorURL (server) {
|
||||
var url
|
||||
if (server.indexOf('://') === -1) {
|
||||
url = 'http://' + server
|
||||
} else {
|
||||
url = server
|
||||
}
|
||||
|
||||
return url
|
||||
},
|
||||
handleSubmit (e) {
|
||||
e.preventDefault()
|
||||
if (this.loading) return
|
||||
|
|
@ -650,6 +683,11 @@ export default {
|
|||
}
|
||||
var lun = values.lun
|
||||
url = this.iscsiURL(server, iqn, lun)
|
||||
} else if (values.protocol === 'Linstor') {
|
||||
url = this.linstorURL(server)
|
||||
params.provider = 'Linstor'
|
||||
values.managed = false
|
||||
params['details[0].resourceGroup'] = values.resourcegroup
|
||||
}
|
||||
params.url = url
|
||||
if (values.provider !== 'DefaultPrimary' && values.provider !== 'PowerFlex') {
|
||||
|
|
|
|||
|
|
@ -350,7 +350,7 @@ export default {
|
|||
placeHolder: 'message.error.server',
|
||||
required: true,
|
||||
display: {
|
||||
primaryStorageProtocol: ['nfs', 'iscsi', 'gluster', 'SMB']
|
||||
primaryStorageProtocol: ['nfs', 'iscsi', 'gluster', 'SMB', 'Linstor']
|
||||
}
|
||||
},
|
||||
{
|
||||
|
|
@ -489,6 +489,15 @@ export default {
|
|||
primaryStorageProtocol: 'vmfs'
|
||||
}
|
||||
},
|
||||
{
|
||||
title: 'label.resourcegroup',
|
||||
key: 'primaryStorageLinstorResourceGroup',
|
||||
placeHolder: 'message.error.linstor.resourcegroup',
|
||||
required: true,
|
||||
display: {
|
||||
primaryStorageProtocol: 'Linstor'
|
||||
}
|
||||
},
|
||||
{
|
||||
title: 'label.storage.tags',
|
||||
key: 'primaryStorageTags',
|
||||
|
|
@ -826,6 +835,10 @@ export default {
|
|||
id: 'gluster',
|
||||
description: 'Gluster'
|
||||
})
|
||||
protocols.push({
|
||||
id: 'Linstor',
|
||||
description: 'Linstor'
|
||||
})
|
||||
} else if (hypervisor === 'XenServer') {
|
||||
protocols.push({
|
||||
id: 'nfs',
|
||||
|
|
|
|||
|
|
@ -1345,6 +1345,9 @@ export default {
|
|||
}
|
||||
path += '/' + this.prefillContent.primaryStorageVmfsDatastore?.value || ''
|
||||
url = this.vmfsURL('dummy', path)
|
||||
} else if (protocol === 'Linstor') {
|
||||
url = this.linstorURL(server)
|
||||
params['details[0].resourceGroup'] = this.prefillContent.primaryStorageLinstorResourceGroup.value
|
||||
} else {
|
||||
let iqn = this.prefillContent.primaryStorageTargetIQN?.value || ''
|
||||
if (iqn.substring(0, 1) !== '/') {
|
||||
|
|
@ -2125,6 +2128,15 @@ export default {
|
|||
}
|
||||
return url
|
||||
},
|
||||
linstorURL (server) {
|
||||
var url
|
||||
if (server.indexOf('://') === -1) {
|
||||
url = 'http://' + server
|
||||
} else {
|
||||
url = server
|
||||
}
|
||||
return url
|
||||
},
|
||||
iscsiURL (server, iqn, lun) {
|
||||
let url = ''
|
||||
if (server.indexOf('://') === -1) {
|
||||
|
|
|
|||
Loading…
Reference in New Issue