mirror of https://github.com/apache/cloudstack.git
Merge abf6aae675 into bce3e54a7e
This commit is contained in:
commit
758461f5e9
|
|
@ -106,7 +106,7 @@
|
||||||
templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-xen")
|
templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-xen")
|
||||||
templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-ovm")
|
templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-ovm")
|
||||||
templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-hyperv")
|
templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-hyperv")
|
||||||
File file = new File("./engine/schema/dist/systemvm-templates/md5sum.txt")
|
File file = new File("./engine/schema/dist/systemvm-templates/sha512sum.txt")
|
||||||
def lines = file.readLines()
|
def lines = file.readLines()
|
||||||
for (template in templateList) {
|
for (template in templateList) {
|
||||||
def data = lines.findAll { it.contains(template) }
|
def data = lines.findAll { it.contains(template) }
|
||||||
|
|
@ -135,7 +135,7 @@
|
||||||
<goal>wget</goal>
|
<goal>wget</goal>
|
||||||
</goals>
|
</goals>
|
||||||
<configuration>
|
<configuration>
|
||||||
<url>${project.systemvm.template.location}/${cs.version}/md5sum.txt</url>
|
<url>${project.systemvm.template.location}/${cs.version}/sha512sum.txt</url>
|
||||||
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
|
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
|
||||||
<skipCache>true</skipCache>
|
<skipCache>true</skipCache>
|
||||||
<overwrite>true</overwrite>
|
<overwrite>true</overwrite>
|
||||||
|
|
@ -205,7 +205,7 @@
|
||||||
<checkSignature>true</checkSignature>
|
<checkSignature>true</checkSignature>
|
||||||
<url>${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-kvm.qcow2.bz2</url>
|
<url>${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-kvm.qcow2.bz2</url>
|
||||||
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
|
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
|
||||||
<md5>${kvm.checksum}</md5>
|
<sha512>${kvm.checksum}</sha512>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
</executions>
|
</executions>
|
||||||
|
|
@ -241,7 +241,7 @@
|
||||||
<checkSignature>true</checkSignature>
|
<checkSignature>true</checkSignature>
|
||||||
<url>${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-vmware.ova</url>
|
<url>${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-vmware.ova</url>
|
||||||
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
|
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
|
||||||
<md5>${vmware.checksum}</md5>
|
<sha512>${vmware.checksum}</sha512>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
</executions>
|
</executions>
|
||||||
|
|
@ -277,7 +277,7 @@
|
||||||
<checkSignature>true</checkSignature>
|
<checkSignature>true</checkSignature>
|
||||||
<url>${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-xen.vhd.bz2</url>
|
<url>${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-xen.vhd.bz2</url>
|
||||||
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
|
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
|
||||||
<md5>${xen.checksum}</md5>
|
<sha512>${xen.checksum}</sha512>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
</executions>
|
</executions>
|
||||||
|
|
@ -313,7 +313,7 @@
|
||||||
<checkSignature>true</checkSignature>
|
<checkSignature>true</checkSignature>
|
||||||
<url>${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-ovm.raw.bz2</url>
|
<url>${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-ovm.raw.bz2</url>
|
||||||
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
|
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
|
||||||
<md5>${ovm.checksum}</md5>
|
<sha512>${ovm.checksum}</sha512>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
</executions>
|
</executions>
|
||||||
|
|
@ -349,7 +349,7 @@
|
||||||
<checkSignature>true</checkSignature>
|
<checkSignature>true</checkSignature>
|
||||||
<url>${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-hyperv.vhd.zip</url>
|
<url>${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-hyperv.vhd.zip</url>
|
||||||
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
|
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
|
||||||
<md5>${hyperv.checksum}</md5>
|
<sha512>${hyperv.checksum}</sha512>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
</executions>
|
</executions>
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,53 @@
|
||||||
|
// Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
// or more contributor license agreements. See the NOTICE file
|
||||||
|
// distributed with this work for additional information
|
||||||
|
// regarding copyright ownership. The ASF licenses this file
|
||||||
|
// to you under the Apache License, Version 2.0 (the
|
||||||
|
// "License"); you may not use this file except in compliance
|
||||||
|
// with the License. You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing,
|
||||||
|
// software distributed under the License is distributed on an
|
||||||
|
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
// KIND, either express or implied. See the License for the
|
||||||
|
// specific language governing permissions and limitations
|
||||||
|
// under the License.
|
||||||
|
package com.cloud.hypervisor.kvm.storage;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Decorator for StorageAdapters that implement asynchronous physical disk connections to improve
|
||||||
|
* performance on VM starts with large numbers of disks.
|
||||||
|
*/
|
||||||
|
public interface AsyncPhysicalDiskConnectorDecorator {
|
||||||
|
/**
|
||||||
|
* Initiates a connection attempt (may or may not complete it depending on implementation)
|
||||||
|
* @param path
|
||||||
|
* @param pool
|
||||||
|
* @param details
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public boolean startConnectPhysicalDisk(String path, KVMStoragePool pool, Map<String,String> details);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests if the physical disk is connected
|
||||||
|
* @param path
|
||||||
|
* @param pool
|
||||||
|
* @param details
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public boolean isConnected(String path, KVMStoragePool pool, Map<String,String> details);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Completes a connection attempt after isConnected returns true;
|
||||||
|
* @param path
|
||||||
|
* @param pool
|
||||||
|
* @param details
|
||||||
|
* @return
|
||||||
|
* @throws Exception
|
||||||
|
*/
|
||||||
|
public boolean finishConnectPhysicalDisk(String path, KVMStoragePool pool, Map<String,String> details) throws Exception;
|
||||||
|
}
|
||||||
|
|
@ -20,6 +20,7 @@ import java.lang.reflect.Constructor;
|
||||||
import java.lang.reflect.Modifier;
|
import java.lang.reflect.Modifier;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
@ -42,9 +43,11 @@ import com.cloud.hypervisor.kvm.resource.KVMHABase;
|
||||||
import com.cloud.hypervisor.kvm.resource.KVMHABase.PoolType;
|
import com.cloud.hypervisor.kvm.resource.KVMHABase.PoolType;
|
||||||
import com.cloud.hypervisor.kvm.resource.KVMHAMonitor;
|
import com.cloud.hypervisor.kvm.resource.KVMHAMonitor;
|
||||||
import com.cloud.storage.Storage;
|
import com.cloud.storage.Storage;
|
||||||
|
import com.cloud.storage.StorageManager;
|
||||||
import com.cloud.storage.Storage.StoragePoolType;
|
import com.cloud.storage.Storage.StoragePoolType;
|
||||||
import com.cloud.storage.StorageLayer;
|
import com.cloud.storage.StorageLayer;
|
||||||
import com.cloud.storage.Volume;
|
import com.cloud.storage.Volume;
|
||||||
|
import com.cloud.utils.StringUtils;
|
||||||
import com.cloud.utils.Pair;
|
import com.cloud.utils.Pair;
|
||||||
import com.cloud.utils.Ternary;
|
import com.cloud.utils.Ternary;
|
||||||
import com.cloud.utils.exception.CloudRuntimeException;
|
import com.cloud.utils.exception.CloudRuntimeException;
|
||||||
|
|
@ -164,6 +167,19 @@ public class KVMStoragePoolManager {
|
||||||
return adaptor.connectPhysicalDisk(volPath, pool, details, false);
|
return adaptor.connectPhysicalDisk(volPath, pool, details, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static class ConnectingDiskInfo {
|
||||||
|
ConnectingDiskInfo(VolumeObjectTO volume, StorageAdaptor adaptor, KVMStoragePool pool, Map<String, String> details) {
|
||||||
|
this.volume = volume;
|
||||||
|
this.adapter = adaptor;
|
||||||
|
this.pool = pool;
|
||||||
|
this.details = details;
|
||||||
|
}
|
||||||
|
VolumeObjectTO volume;
|
||||||
|
KVMStoragePool pool = null;
|
||||||
|
StorageAdaptor adapter = null;
|
||||||
|
Map<String,String> details = null;
|
||||||
|
}
|
||||||
|
|
||||||
public boolean connectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec, boolean isVMMigrate) {
|
public boolean connectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec, boolean isVMMigrate) {
|
||||||
boolean result = false;
|
boolean result = false;
|
||||||
|
|
||||||
|
|
@ -171,6 +187,10 @@ public class KVMStoragePoolManager {
|
||||||
|
|
||||||
List<DiskTO> disks = Arrays.asList(vmSpec.getDisks());
|
List<DiskTO> disks = Arrays.asList(vmSpec.getDisks());
|
||||||
|
|
||||||
|
|
||||||
|
// disks that connect in background
|
||||||
|
List<ConnectingDiskInfo> connectingDisks = new ArrayList<>();
|
||||||
|
|
||||||
for (DiskTO disk : disks) {
|
for (DiskTO disk : disks) {
|
||||||
if (disk.getType() == Volume.Type.ISO) {
|
if (disk.getType() == Volume.Type.ISO) {
|
||||||
result = true;
|
result = true;
|
||||||
|
|
@ -187,17 +207,79 @@ public class KVMStoragePoolManager {
|
||||||
KVMStoragePool pool = getStoragePool(store.getPoolType(), store.getUuid());
|
KVMStoragePool pool = getStoragePool(store.getPoolType(), store.getUuid());
|
||||||
StorageAdaptor adaptor = getStorageAdaptor(pool.getType());
|
StorageAdaptor adaptor = getStorageAdaptor(pool.getType());
|
||||||
|
|
||||||
result = adaptor.connectPhysicalDisk(vol.getPath(), pool, disk.getDetails(), isVMMigrate);
|
if (adaptor instanceof AsyncPhysicalDiskConnectorDecorator) {
|
||||||
|
// If the adaptor supports async disk connection, we can start the connection
|
||||||
|
// and return immediately, allowing the connection to complete in the background.
|
||||||
|
result = ((AsyncPhysicalDiskConnectorDecorator) adaptor).startConnectPhysicalDisk(vol.getPath(), pool, disk.getDetails());
|
||||||
|
if (!result) {
|
||||||
|
logger.error("Failed to start connecting disks via vm spec for vm: " + vmName + " volume:" + vol.toString());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
if (!result) {
|
// add disk to list of disks to check later
|
||||||
logger.error("Failed to connect disks via Instance spec for Instance: " + vmName + " volume:" + vol.toString());
|
connectingDisks.add(new ConnectingDiskInfo(vol, adaptor, pool, disk.getDetails()));
|
||||||
return result;
|
} else {
|
||||||
|
result = adaptor.connectPhysicalDisk(vol.getPath(), pool, disk.getDetails(), isVMMigrate);
|
||||||
|
|
||||||
|
if (!result) {
|
||||||
|
logger.error("Failed to connect disks via vm spec for vm: " + vmName + " volume:" + vol.toString());
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if we have any connecting disks to check, wait for them to connect or timeout
|
||||||
|
if (!connectingDisks.isEmpty()) {
|
||||||
|
for (ConnectingDiskInfo connectingDisk : connectingDisks) {
|
||||||
|
StorageAdaptor adaptor = connectingDisk.adapter;
|
||||||
|
KVMStoragePool pool = connectingDisk.pool;
|
||||||
|
VolumeObjectTO volume = connectingDisk.volume;
|
||||||
|
Map<String, String> details = connectingDisk.details;
|
||||||
|
long diskWaitTimeMillis = getDiskWaitTimeMillis(details);
|
||||||
|
|
||||||
|
// wait for the disk to connect
|
||||||
|
long startTime = System.currentTimeMillis();
|
||||||
|
while (System.currentTimeMillis() - startTime < diskWaitTimeMillis) {
|
||||||
|
if (((AsyncPhysicalDiskConnectorDecorator) adaptor).isConnected(volume.getPath(), pool, details)) {
|
||||||
|
logger.debug(String.format("Disk %s connected successfully for VM %s", volume.getPath(), vmName));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
sleep(1000); // wait for 1 second before checking again
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private long getDiskWaitTimeMillis(Map<String,String> details) {
|
||||||
|
int waitTimeInSec = 60; // default wait time in seconds
|
||||||
|
if (details != null && details.containsKey(StorageManager.STORAGE_POOL_DISK_WAIT.toString())) {
|
||||||
|
String waitTime = details.get(StorageManager.STORAGE_POOL_DISK_WAIT.toString());
|
||||||
|
if (StringUtils.isNotEmpty(waitTime)) {
|
||||||
|
waitTimeInSec = Integer.valueOf(waitTime).intValue();
|
||||||
|
logger.debug(String.format("%s set to %s", StorageManager.STORAGE_POOL_DISK_WAIT.toString(), waitTimeInSec));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// wait at least 60 seconds even if input was lower
|
||||||
|
if (waitTimeInSec < 60) {
|
||||||
|
logger.debug(String.format("%s was less than 60s. Increasing to 60s default.", StorageManager.STORAGE_POOL_DISK_WAIT.toString()));
|
||||||
|
waitTimeInSec = 60;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return waitTimeInSec * 1000; // convert to milliseconds
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean sleep(long millis) {
|
||||||
|
try {
|
||||||
|
Thread.sleep(millis);
|
||||||
|
return true;
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) {
|
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) {
|
||||||
logger.debug(String.format("Disconnect physical disks using volume map: %s", volumeToDisconnect.toString()));
|
logger.debug(String.format("Disconnect physical disks using volume map: %s", volumeToDisconnect.toString()));
|
||||||
if (MapUtils.isEmpty(volumeToDisconnect)) {
|
if (MapUtils.isEmpty(volumeToDisconnect)) {
|
||||||
|
|
|
||||||
|
|
@ -1607,8 +1607,20 @@ public class KVMStorageProcessor implements StorageProcessor {
|
||||||
storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath());
|
storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath());
|
||||||
|
|
||||||
return new DettachAnswer(disk);
|
return new DettachAnswer(disk);
|
||||||
} catch (final LibvirtException | InternalErrorException | CloudRuntimeException e) {
|
} catch (final LibvirtException e) {
|
||||||
logger.debug(String.format("Failed to detach volume [id: %d, uuid: %s, name: %s, path: %s], due to ", vol.getId(), vol.getUuid(), vol.getName(), vol.getPath()), e);
|
// check if the error was related to an already unplugged event - we can safely ignore
|
||||||
|
if (e.getMessage() != null && e.getMessage().contains("is already in the process of unplug")) {
|
||||||
|
logger.debug("Volume: " + vol.getPath() + " is already unplugged, ignoring the error");
|
||||||
|
return new DettachAnswer(disk);
|
||||||
|
} else {
|
||||||
|
logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e);
|
||||||
|
return new DettachAnswer(e.toString());
|
||||||
|
}
|
||||||
|
} catch (final InternalErrorException e) {
|
||||||
|
logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e);
|
||||||
|
return new DettachAnswer(e.toString());
|
||||||
|
} catch (final CloudRuntimeException e) {
|
||||||
|
logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e);
|
||||||
return new DettachAnswer(e.toString());
|
return new DettachAnswer(e.toString());
|
||||||
} finally {
|
} finally {
|
||||||
vol.clearPassphrase();
|
vol.clearPassphrase();
|
||||||
|
|
|
||||||
|
|
@ -65,10 +65,12 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||||
* Property keys and defaults
|
* Property keys and defaults
|
||||||
*/
|
*/
|
||||||
static final Property<Integer> CLEANUP_FREQUENCY_SECS = new Property<Integer>("multimap.cleanup.frequency.secs", 60);
|
static final Property<Integer> CLEANUP_FREQUENCY_SECS = new Property<Integer>("multimap.cleanup.frequency.secs", 60);
|
||||||
static final Property<Integer> CLEANUP_TIMEOUT_SECS = new Property<Integer>("multimap.cleanup.timeout.secs", 4);
|
static final Property<Integer> CLEANUP_TIMEOUT_SECS = new Property<Integer>("multimap.cleanup.timeout.secs", 600);
|
||||||
static final Property<Boolean> CLEANUP_ENABLED = new Property<Boolean>("multimap.cleanup.enabled", true);
|
static final Property<Boolean> CLEANUP_ENABLED = new Property<Boolean>("multimap.cleanup.enabled", true);
|
||||||
static final Property<String> CLEANUP_SCRIPT = new Property<String>("multimap.cleanup.script", "cleanStaleMaps.sh");
|
static final Property<String> CLEANUP_SCRIPT = new Property<String>("multimap.cleanup.script", "cleanStaleMaps.sh");
|
||||||
static final Property<String> CONNECT_SCRIPT = new Property<String>("multimap.connect.script", "connectVolume.sh");
|
static final Property<String> CONNECT_SCRIPT = new Property<String>("multimap.connect.script", "connectVolume.sh");
|
||||||
|
static final Property<String> START_CONNECT_SCRIPT = new Property<String>("multimap.startconnect.script", "startConnectVolume.sh");
|
||||||
|
static final Property<String> FINISH_CONNECT_SCRIPT = new Property<String>("multimap.finishconnect.script", "finishConnectVolume.sh");
|
||||||
static final Property<String> COPY_SCRIPT = new Property<String>("multimap.copy.script", "copyVolume.sh");
|
static final Property<String> COPY_SCRIPT = new Property<String>("multimap.copy.script", "copyVolume.sh");
|
||||||
static final Property<String> DISCONNECT_SCRIPT = new Property<String>("multimap.disconnect.script", "disconnectVolume.sh");
|
static final Property<String> DISCONNECT_SCRIPT = new Property<String>("multimap.disconnect.script", "disconnectVolume.sh");
|
||||||
static final Property<String> RESIZE_SCRIPT = new Property<String>("multimap.resize.script", "resizeVolume.sh");
|
static final Property<String> RESIZE_SCRIPT = new Property<String>("multimap.resize.script", "resizeVolume.sh");
|
||||||
|
|
@ -78,6 +80,8 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||||
static Timer cleanupTimer = new Timer();
|
static Timer cleanupTimer = new Timer();
|
||||||
private static int cleanupTimeoutSecs = CLEANUP_TIMEOUT_SECS.getFinalValue();
|
private static int cleanupTimeoutSecs = CLEANUP_TIMEOUT_SECS.getFinalValue();
|
||||||
private static String connectScript = CONNECT_SCRIPT.getFinalValue();
|
private static String connectScript = CONNECT_SCRIPT.getFinalValue();
|
||||||
|
private static String startConnectScript = START_CONNECT_SCRIPT.getFinalValue();
|
||||||
|
private static String finishConnectScript = FINISH_CONNECT_SCRIPT.getFinalValue();
|
||||||
private static String disconnectScript = DISCONNECT_SCRIPT.getFinalValue();
|
private static String disconnectScript = DISCONNECT_SCRIPT.getFinalValue();
|
||||||
private static String cleanupScript = CLEANUP_SCRIPT.getFinalValue();
|
private static String cleanupScript = CLEANUP_SCRIPT.getFinalValue();
|
||||||
private static String resizeScript = RESIZE_SCRIPT.getFinalValue();
|
private static String resizeScript = RESIZE_SCRIPT.getFinalValue();
|
||||||
|
|
@ -98,6 +102,16 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||||
throw new Error("Unable to find the connectVolume.sh script");
|
throw new Error("Unable to find the connectVolume.sh script");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
startConnectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), startConnectScript);
|
||||||
|
if (startConnectScript == null) {
|
||||||
|
throw new Error("Unable to find the startConnectVolume.sh script");
|
||||||
|
}
|
||||||
|
|
||||||
|
finishConnectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), finishConnectScript);
|
||||||
|
if (finishConnectScript == null) {
|
||||||
|
throw new Error("Unable to find the finishConnectVolume.sh script");
|
||||||
|
}
|
||||||
|
|
||||||
disconnectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), disconnectScript);
|
disconnectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), disconnectScript);
|
||||||
if (disconnectScript == null) {
|
if (disconnectScript == null) {
|
||||||
throw new Error("Unable to find the disconnectVolume.sh script");
|
throw new Error("Unable to find the disconnectVolume.sh script");
|
||||||
|
|
@ -164,9 +178,11 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||||
|
|
||||||
// validate we have a connection, if not we need to connect first.
|
// validate we have a connection, if not we need to connect first.
|
||||||
if (!isConnected(address.getPath())) {
|
if (!isConnected(address.getPath())) {
|
||||||
if (!connectPhysicalDisk(address, pool, null)) {
|
LOGGER.warn("Physical disk " + address.getPath() + " is not connected, a request to connectPhysicalDisk must be made before it can be used.");
|
||||||
throw new CloudRuntimeException("Unable to connect to volume " + address.getPath());
|
return null;
|
||||||
}
|
} else {
|
||||||
|
LOGGER.debug("Physical disk " + address.getPath() + " is connected, proceeding to get its size.");
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
long diskSize = getPhysicalDiskSize(address.getPath());
|
long diskSize = getPhysicalDiskSize(address.getPath());
|
||||||
|
|
@ -222,8 +238,91 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||||
if (StringUtils.isNotEmpty(waitTime)) {
|
if (StringUtils.isNotEmpty(waitTime)) {
|
||||||
waitTimeInSec = Integer.valueOf(waitTime).intValue();
|
waitTimeInSec = Integer.valueOf(waitTime).intValue();
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// wait at least 60 seconds even if input was lower
|
||||||
|
if (waitTimeInSec < 60) {
|
||||||
|
LOGGER.debug(String.format("multimap.disk.wait.secs was less than 60. Increasing to 60"));
|
||||||
|
waitTimeInSec = 60;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return waitForDiskToBecomeAvailable(address, pool, waitTimeInSec);
|
|
||||||
|
if (!startConnect(address, pool, waitTimeInSec)) {
|
||||||
|
LOGGER.error("Failed to trigger connect for address [" + address.getPath() + "] of the storage pool: " + pool.getUuid());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOGGER.debug("Waiting for disk to become available after connect for address [" + address.getPath() + "] of the storage pool: " + pool.getUuid());
|
||||||
|
|
||||||
|
// loop through and call isConnected() until true or the waitTimeInSec is exceeded
|
||||||
|
long startTime = System.currentTimeMillis();
|
||||||
|
while (System.currentTimeMillis() - startTime < TimeUnit.SECONDS.toMillis(waitTimeInSec)) {
|
||||||
|
if (isConnected(address.getPath())) {
|
||||||
|
LOGGER.info("Disk " + address.getPath() + " of the storage pool: " + pool.getUuid() + " is connected");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
Thread.sleep(1000); // wait 1 second before checking again
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LOGGER.error("Disk " + address.getPath() + " of the storage pool: " + pool.getUuid() + " is not connected after waiting for " + waitTimeInSec + " seconds");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean startConnectPhysicalDisk(String volumePath, KVMStoragePool pool, Map<String,String> details) {
|
||||||
|
LOGGER.info("startConnectPhysicalDisk called for [" + volumePath + "]");
|
||||||
|
|
||||||
|
if (StringUtils.isEmpty(volumePath)) {
|
||||||
|
LOGGER.error("Unable to connect physical disk due to insufficient data - volume path is undefined");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pool == null) {
|
||||||
|
LOGGER.error("Unable to connect physical disk due to insufficient data - pool is not set");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// we expect WWN values in the volumePath so need to convert it to an actual physical path
|
||||||
|
AddressInfo address = this.parseAndValidatePath(volumePath);
|
||||||
|
|
||||||
|
return startConnect(address, pool, diskWaitTimeSecs);
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean finishConnectPhysicalDisk(String volumePath, KVMStoragePool pool, Map<String,String> details) throws Exception {
|
||||||
|
LOGGER.info("finishConnectPhysicalDisk called for [" + volumePath + "]");
|
||||||
|
|
||||||
|
if (StringUtils.isEmpty(volumePath)) {
|
||||||
|
LOGGER.error("Unable to finish connect physical disk due to insufficient data - volume path is undefined");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pool == null) {
|
||||||
|
LOGGER.error("Unable to finish connect physical disk due to insufficient data - pool is not set");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// we expect WWN values in the volumePath so need to convert it to an actual physical path
|
||||||
|
AddressInfo address = this.parseAndValidatePath(volumePath);
|
||||||
|
|
||||||
|
return finishConnect(address, pool, diskWaitTimeSecs);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests if the physical disk is connected
|
||||||
|
*/
|
||||||
|
public boolean isConnected(String path, KVMStoragePool pool, Map<String,String> details) {
|
||||||
|
AddressInfo address = this.parseAndValidatePath(path);
|
||||||
|
if (address.getAddress() == null) {
|
||||||
|
LOGGER.debug(String.format("isConnected(path,pool) returning FALSE, volume path has no address field: %s", path));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (isConnected(address.getPath())) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
@ -441,24 +540,18 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) {
|
/**
|
||||||
LOGGER.debug("Waiting for the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs");
|
* Trigger (but does not wait for success) a LUN connect operation for the given address and storage pool.
|
||||||
|
* @param address
|
||||||
long scriptTimeoutSecs = 30; // how long to wait for each script execution to run
|
* @param pool
|
||||||
long maxTries = 10; // how many max retries to attempt the script
|
* @param waitTimeInSec
|
||||||
long waitTimeInMillis = waitTimeInSec * 1000; // how long overall to wait
|
* @return
|
||||||
int timeBetweenTries = 1000; // how long to sleep between tries
|
*/
|
||||||
// wait at least 60 seconds even if input was lower
|
boolean startConnect(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) {
|
||||||
if (waitTimeInSec < 60) {
|
LOGGER.debug("Triggering connect for : " + address.getPath() + " of the storage pool: " + pool.getUuid());
|
||||||
waitTimeInSec = 60;
|
long scriptTimeoutSecs = waitTimeInSec - 1; // how long to wait for each script execution to run
|
||||||
}
|
Process p = null;
|
||||||
KVMPhysicalDisk physicalDisk = null;
|
try {
|
||||||
|
|
||||||
// Rescan before checking for the physical disk
|
|
||||||
int tries = 0;
|
|
||||||
while (waitTimeInMillis > 0 && tries < maxTries) {
|
|
||||||
tries++;
|
|
||||||
long start = System.currentTimeMillis();
|
|
||||||
String lun;
|
String lun;
|
||||||
if (address.getConnectionId() == null) {
|
if (address.getConnectionId() == null) {
|
||||||
lun = "-";
|
lun = "-";
|
||||||
|
|
@ -466,59 +559,97 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
|
||||||
lun = address.getConnectionId();
|
lun = address.getConnectionId();
|
||||||
}
|
}
|
||||||
|
|
||||||
Process p = null;
|
ProcessBuilder builder = new ProcessBuilder(startConnectScript, lun, address.getAddress());
|
||||||
try {
|
p = builder.start();
|
||||||
ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress());
|
if (p.waitFor(scriptTimeoutSecs, TimeUnit.SECONDS)) {
|
||||||
p = builder.start();
|
int rc = p.exitValue();
|
||||||
if (p.waitFor(scriptTimeoutSecs, TimeUnit.SECONDS)) {
|
StringBuffer output = new StringBuffer();
|
||||||
int rc = p.exitValue();
|
if (rc == 0) {
|
||||||
StringBuffer output = new StringBuffer();
|
BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream()));
|
||||||
if (rc == 0) {
|
String line = null;
|
||||||
BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream()));
|
while ((line = input.readLine()) != null) {
|
||||||
String line = null;
|
output.append(line);
|
||||||
while ((line = input.readLine()) != null) {
|
output.append(" ");
|
||||||
output.append(line);
|
|
||||||
output.append(" ");
|
|
||||||
}
|
|
||||||
|
|
||||||
physicalDisk = getPhysicalDisk(address, pool);
|
|
||||||
if (physicalDisk != null && physicalDisk.getSize() > 0) {
|
|
||||||
LOGGER.debug("Found the volume using id: " + address.getPath() + " of the storage pool: " + pool.getUuid());
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
} else {
|
|
||||||
LOGGER.warn("Failure discovering LUN via " + connectScript);
|
|
||||||
BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream()));
|
|
||||||
String line = null;
|
|
||||||
while ((line = error.readLine()) != null) {
|
|
||||||
LOGGER.warn("error --> " + line);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
LOGGER.debug("LUN discovery triggered for " + address.getPath() + " of the storage pool: " + pool.getUuid() + ", output: " + output.toString());
|
||||||
} else {
|
} else {
|
||||||
LOGGER.debug("Timeout waiting for " + connectScript + " to complete - try " + tries);
|
LOGGER.warn("Failure triggering LUN discovery via " + startConnectScript);
|
||||||
}
|
BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream()));
|
||||||
} catch (IOException | InterruptedException | IllegalThreadStateException e) {
|
String line = null;
|
||||||
LOGGER.warn("Problem performing scan on SCSI hosts - try " + tries, e);
|
while ((line = error.readLine()) != null) {
|
||||||
} finally {
|
LOGGER.warn("error --> " + line);
|
||||||
if (p != null && p.isAlive()) {
|
}
|
||||||
p.destroyForcibly();
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
LOGGER.debug(String.format("Timeout [%s] waiting for %s to complete", scriptTimeoutSecs, startConnectScript));
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
} catch (IOException | InterruptedException | IllegalThreadStateException e) {
|
||||||
long elapsed = System.currentTimeMillis() - start;
|
LOGGER.warn("Problem performing LUN discovery for " + address.getPath() + " of the storage pool: " + pool.getUuid(), e);
|
||||||
waitTimeInMillis = waitTimeInMillis - elapsed;
|
return false;
|
||||||
|
} finally {
|
||||||
try {
|
if (p != null && p.isAlive()) {
|
||||||
Thread.sleep(timeBetweenTries);
|
p.destroyForcibly();
|
||||||
} catch (Exception ex) {
|
|
||||||
// don't do anything
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LOGGER.debug("Unable to find the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid());
|
return true;
|
||||||
return false;
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Trigger (but does not wait for success) a LUN connect operation for the given address and storage pool.
|
||||||
|
* @param address
|
||||||
|
* @param pool
|
||||||
|
* @param waitTimeInSec
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
boolean finishConnect(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) {
|
||||||
|
LOGGER.debug("Triggering connect for : " + address.getPath() + " of the storage pool: " + pool.getUuid());
|
||||||
|
long scriptTimeoutSecs = waitTimeInSec - 1; // how long to wait for each script execution to run
|
||||||
|
Process p = null;
|
||||||
|
try {
|
||||||
|
String lun;
|
||||||
|
if (address.getConnectionId() == null) {
|
||||||
|
lun = "-";
|
||||||
|
} else {
|
||||||
|
lun = address.getConnectionId();
|
||||||
|
}
|
||||||
|
|
||||||
|
ProcessBuilder builder = new ProcessBuilder(finishConnectScript, lun, address.getAddress());
|
||||||
|
p = builder.start();
|
||||||
|
if (p.waitFor(scriptTimeoutSecs, TimeUnit.SECONDS)) {
|
||||||
|
int rc = p.exitValue();
|
||||||
|
StringBuffer output = new StringBuffer();
|
||||||
|
if (rc == 0) {
|
||||||
|
BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream()));
|
||||||
|
String line = null;
|
||||||
|
while ((line = input.readLine()) != null) {
|
||||||
|
output.append(line);
|
||||||
|
output.append(" ");
|
||||||
|
}
|
||||||
|
LOGGER.debug("LUN discovery triggered for " + address.getPath() + " of the storage pool: " + pool.getUuid() + ", output: " + output.toString());
|
||||||
|
} else {
|
||||||
|
LOGGER.warn("Failure triggering LUN discovery via " + finishConnectScript);
|
||||||
|
BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream()));
|
||||||
|
String line = null;
|
||||||
|
while ((line = error.readLine()) != null) {
|
||||||
|
LOGGER.warn("error --> " + line);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOGGER.debug(String.format("Timeout [%s] waiting for %s to complete", scriptTimeoutSecs, finishConnectScript));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} catch (IOException | InterruptedException | IllegalThreadStateException e) {
|
||||||
|
LOGGER.warn("Problem performing LUN discovery for " + address.getPath() + " of the storage pool: " + pool.getUuid(), e);
|
||||||
|
return false;
|
||||||
|
} finally {
|
||||||
|
if (p != null && p.isAlive()) {
|
||||||
|
p.destroyForcibly();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean isConnected(String path) {
|
boolean isConnected(String path) {
|
||||||
|
|
|
||||||
|
|
@ -269,7 +269,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||||
dataIn.setExternalUuid(volume.getExternalUuid());
|
dataIn.setExternalUuid(volume.getExternalUuid());
|
||||||
|
|
||||||
// update the cloudstack metadata about the volume
|
// update the cloudstack metadata about the volume
|
||||||
persistVolumeOrTemplateData(storagePool, details, dataObject, volume, null);
|
persistVolumeOrTemplateData(storagePool, details, dataObject, volume, null, volume.getAllocatedSizeInBytes());
|
||||||
|
|
||||||
result = new CreateCmdResult(dataObject.getUuid(), new Answer(null));
|
result = new CreateCmdResult(dataObject.getUuid(), new Answer(null));
|
||||||
result.setSuccess(true);
|
result.setSuccess(true);
|
||||||
|
|
@ -346,14 +346,17 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||||
|
|
||||||
// if we copied from one volume to another, the target volume's disk offering or user input may be of a larger size
|
// if we copied from one volume to another, the target volume's disk offering or user input may be of a larger size
|
||||||
// we won't, however, shrink a volume if its smaller.
|
// we won't, however, shrink a volume if its smaller.
|
||||||
|
long size = destdata.getSize();
|
||||||
if (outVolume.getAllocatedSizeInBytes() < destdata.getSize()) {
|
if (outVolume.getAllocatedSizeInBytes() < destdata.getSize()) {
|
||||||
logger.info("Resizing volume {} to requested target volume size of {}", destdata, destdata.getSize());
|
logger.info("Resizing volume {} to requested target volume size of {}", destdata.getUuid(), destdata.getSize());
|
||||||
api.resize(context, destIn, destdata.getSize());
|
api.resize(context, destIn, destdata.getSize());
|
||||||
|
} else if (outVolume.getAllocatedSizeInBytes() > destdata.getSize()) {
|
||||||
|
size = outVolume.getAllocatedSizeInBytes();
|
||||||
}
|
}
|
||||||
|
|
||||||
// initial volume info does not have connection map yet. That is added when grantAccess is called later.
|
// initial volume info does not have connection map yet. That is added when grantAccess is called later.
|
||||||
String finalPath = generatePathInfo(outVolume, null);
|
String finalPath = generatePathInfo(outVolume, null);
|
||||||
persistVolumeData(storagePool, details, destdata, outVolume, null);
|
persistVolumeData(storagePool, details, destdata, outVolume, null, size);
|
||||||
logger.info("Copy completed from [{}] to [{}]", srcdata, destdata);
|
logger.info("Copy completed from [{}] to [{}]", srcdata, destdata);
|
||||||
|
|
||||||
VolumeObjectTO voto = new VolumeObjectTO();
|
VolumeObjectTO voto = new VolumeObjectTO();
|
||||||
|
|
@ -384,15 +387,11 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||||
logger.debug("canCopy: Checking srcData [{}:{}:{} AND destData [{}:{}:{}]",
|
logger.debug("canCopy: Checking srcData [{}:{}:{} AND destData [{}:{}:{}]",
|
||||||
srcData, srcData.getType(), srcData.getDataStore(), destData, destData.getType(), destData.getDataStore());
|
srcData, srcData.getType(), srcData.getDataStore(), destData, destData.getType(), destData.getDataStore());
|
||||||
try {
|
try {
|
||||||
if (!isSameProvider(srcData)) {
|
if (!srcData.getDataStore().getUuid().equals(destData.getDataStore().getUuid())) {
|
||||||
logger.debug("canCopy: No we can't -- the source provider is NOT the correct type for this driver!");
|
logger.debug("canCopy: No we can't -- the source provider is NOT the correct type for this driver!");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isSameProvider(destData)) {
|
|
||||||
logger.debug("canCopy: No we can't -- the destination provider is NOT the correct type for this driver!");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"canCopy: Source and destination are the same so we can copy via storage endpoint, checking that the source actually exists");
|
"canCopy: Source and destination are the same so we can copy via storage endpoint, checking that the source actually exists");
|
||||||
StoragePoolVO poolVO = _storagePoolDao.findById(srcData.getDataStore().getId());
|
StoragePoolVO poolVO = _storagePoolDao.findById(srcData.getDataStore().getId());
|
||||||
|
|
@ -500,7 +499,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||||
ProviderVolume vol = api.getVolume(context, sourceIn);
|
ProviderVolume vol = api.getVolume(context, sourceIn);
|
||||||
ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool);
|
ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool);
|
||||||
Map<String,String> connIdMap = api.getConnectionIdMap(dataIn);
|
Map<String,String> connIdMap = api.getConnectionIdMap(dataIn);
|
||||||
persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap);
|
persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap, null);
|
||||||
|
|
||||||
|
|
||||||
logger.info("Granted host {} access to volume {}", host, dataObject);
|
logger.info("Granted host {} access to volume {}", host, dataObject);
|
||||||
|
|
@ -534,7 +533,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||||
ProviderVolume vol = api.getVolume(context, sourceIn);
|
ProviderVolume vol = api.getVolume(context, sourceIn);
|
||||||
ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool);
|
ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool);
|
||||||
Map<String,String> connIdMap = api.getConnectionIdMap(dataIn);
|
Map<String,String> connIdMap = api.getConnectionIdMap(dataIn);
|
||||||
persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap);
|
persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap, null);
|
||||||
|
|
||||||
logger.info("Revoked access for host {} to volume {}", host, dataObject);
|
logger.info("Revoked access for host {} to volume {}", host, dataObject);
|
||||||
} catch (Throwable e) {
|
} catch (Throwable e) {
|
||||||
|
|
@ -725,6 +724,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||||
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
|
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
|
||||||
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); // set to false because it causes weird behavior when copying templates to root volumes
|
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); // set to false because it causes weird behavior when copying templates to root volumes
|
||||||
mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString());
|
mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString());
|
||||||
|
mapCapabilities.put("CAN_CLONE_VOLUME_FROM_TEMPLATE", Boolean.TRUE.toString());
|
||||||
ProviderAdapterFactory factory = _adapterFactoryMap.getFactory(this.getProviderName());
|
ProviderAdapterFactory factory = _adapterFactoryMap.getFactory(this.getProviderName());
|
||||||
if (factory != null) {
|
if (factory != null) {
|
||||||
mapCapabilities.put("CAN_DIRECT_ATTACH_SNAPSHOT", factory.canDirectAttachSnapshot().toString());
|
mapCapabilities.put("CAN_DIRECT_ATTACH_SNAPSHOT", factory.canDirectAttachSnapshot().toString());
|
||||||
|
|
@ -840,55 +840,96 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||||
}
|
}
|
||||||
|
|
||||||
void persistVolumeOrTemplateData(StoragePoolVO storagePool, Map<String, String> storagePoolDetails,
|
void persistVolumeOrTemplateData(StoragePoolVO storagePool, Map<String, String> storagePoolDetails,
|
||||||
DataObject dataObject, ProviderVolume volume, Map<String,String> connIdMap) {
|
DataObject dataObject, ProviderVolume volume, Map<String,String> connIdMap, Long size) {
|
||||||
if (dataObject.getType() == DataObjectType.VOLUME) {
|
if (dataObject.getType() == DataObjectType.VOLUME) {
|
||||||
persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connIdMap);
|
persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connIdMap, size);
|
||||||
} else if (dataObject.getType() == DataObjectType.TEMPLATE) {
|
} else if (dataObject.getType() == DataObjectType.TEMPLATE) {
|
||||||
persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connIdMap);
|
persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connIdMap, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void persistVolumeData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject,
|
void persistVolumeData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject,
|
||||||
ProviderVolume managedVolume, Map<String,String> connIdMap) {
|
ProviderVolume managedVolume, Map<String,String> connIdMap, Long size) {
|
||||||
|
|
||||||
|
// Get the volume by dataObject id
|
||||||
VolumeVO volumeVO = _volumeDao.findById(dataObject.getId());
|
VolumeVO volumeVO = _volumeDao.findById(dataObject.getId());
|
||||||
|
long volumeId = volumeVO.getId();
|
||||||
|
|
||||||
|
// Generate path for volume and details
|
||||||
String finalPath = generatePathInfo(managedVolume, connIdMap);
|
String finalPath = generatePathInfo(managedVolume, connIdMap);
|
||||||
volumeVO.setPath(finalPath);
|
|
||||||
volumeVO.setFormat(ImageFormat.RAW);
|
|
||||||
volumeVO.setPoolId(storagePool.getId());
|
|
||||||
volumeVO.setExternalUuid(managedVolume.getExternalUuid());
|
|
||||||
volumeVO.setDisplay(true);
|
|
||||||
volumeVO.setDisplayVolume(true);
|
|
||||||
_volumeDao.update(volumeVO.getId(), volumeVO);
|
|
||||||
|
|
||||||
volumeVO = _volumeDao.findById(volumeVO.getId());
|
try {
|
||||||
|
if (finalPath != null) {
|
||||||
|
volumeVO.setPath(finalPath);
|
||||||
|
}
|
||||||
|
volumeVO.setFormat(ImageFormat.RAW);
|
||||||
|
volumeVO.setPoolId(storagePool.getId());
|
||||||
|
volumeVO.setExternalUuid(managedVolume.getExternalUuid());
|
||||||
|
volumeVO.setDisplay(true);
|
||||||
|
volumeVO.setDisplayVolume(true);
|
||||||
|
// the size may have been adjusted by the storage provider
|
||||||
|
if (size != null) {
|
||||||
|
volumeVO.setSize(size);
|
||||||
|
}
|
||||||
|
_volumeDao.update(volumeVO.getId(), volumeVO);
|
||||||
|
} catch (Throwable e) {
|
||||||
|
logger.error("Failed to persist volume path", e);
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
|
||||||
VolumeDetailVO volumeDetailVO = new VolumeDetailVO(volumeVO.getId(),
|
// PATH
|
||||||
DiskTO.PATH, finalPath, true);
|
try {
|
||||||
_volumeDetailsDao.persist(volumeDetailVO);
|
// If volume_detail exist
|
||||||
|
_volumeDetailsDao.removeDetail(volumeId, DiskTO.PATH);
|
||||||
|
VolumeDetailVO volumeDetailVO = new VolumeDetailVO(volumeId, DiskTO.PATH, finalPath, true);
|
||||||
|
_volumeDetailsDao.persist(volumeDetailVO);
|
||||||
|
} catch (Exception e) {
|
||||||
|
logger.error("Failed to persist volume path", e);
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
|
||||||
volumeDetailVO = new VolumeDetailVO(volumeVO.getId(),
|
// EXTERNAL_NAME
|
||||||
ProviderAdapterConstants.EXTERNAL_NAME, managedVolume.getExternalName(), true);
|
try {
|
||||||
_volumeDetailsDao.persist(volumeDetailVO);
|
_volumeDetailsDao.removeDetail(volumeId, ProviderAdapterConstants.EXTERNAL_NAME);
|
||||||
|
VolumeDetailVO volumeDetailVO = new VolumeDetailVO(volumeId, ProviderAdapterConstants.EXTERNAL_NAME, managedVolume.getExternalName(), true);
|
||||||
|
_volumeDetailsDao.persist(volumeDetailVO);
|
||||||
|
} catch (Exception e) {
|
||||||
|
logger.error("Failed to persist volume external name", e);
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
|
||||||
volumeDetailVO = new VolumeDetailVO(volumeVO.getId(),
|
// EXTERNAL_UUID
|
||||||
ProviderAdapterConstants.EXTERNAL_UUID, managedVolume.getExternalUuid(), true);
|
try {
|
||||||
_volumeDetailsDao.persist(volumeDetailVO);
|
_volumeDetailsDao.removeDetail(volumeId, ProviderAdapterConstants.EXTERNAL_UUID);
|
||||||
|
VolumeDetailVO volumeDetailVO = new VolumeDetailVO(volumeId, ProviderAdapterConstants.EXTERNAL_UUID, managedVolume.getExternalUuid(), true);
|
||||||
|
_volumeDetailsDao.persist(volumeDetailVO);
|
||||||
|
} catch (Exception e) {
|
||||||
|
logger.error("Failed to persist volume external uuid", e);
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void persistTemplateData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject,
|
void persistTemplateData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject,
|
||||||
ProviderVolume volume, Map<String,String> connIdMap) {
|
ProviderVolume volume, Map<String,String> connIdMap, Long size) {
|
||||||
TemplateInfo templateInfo = (TemplateInfo) dataObject;
|
TemplateInfo templateInfo = (TemplateInfo) dataObject;
|
||||||
VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(),
|
VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(),
|
||||||
templateInfo.getId(), null);
|
templateInfo.getId(), null);
|
||||||
|
|
||||||
templatePoolRef.setInstallPath(generatePathInfo(volume, connIdMap));
|
templatePoolRef.setInstallPath(generatePathInfo(volume, connIdMap));
|
||||||
templatePoolRef.setLocalDownloadPath(volume.getExternalName());
|
templatePoolRef.setLocalDownloadPath(volume.getExternalName());
|
||||||
templatePoolRef.setTemplateSize(volume.getAllocatedSizeInBytes());
|
if (size == null) {
|
||||||
|
templatePoolRef.setTemplateSize(volume.getAllocatedSizeInBytes());
|
||||||
|
} else {
|
||||||
|
templatePoolRef.setTemplateSize(size);
|
||||||
|
}
|
||||||
_vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef);
|
_vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef);
|
||||||
}
|
}
|
||||||
|
|
||||||
String generatePathInfo(ProviderVolume volume, Map<String,String> connIdMap) {
|
String generatePathInfo(ProviderVolume volume, Map<String,String> connIdMap) {
|
||||||
|
if (volume == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
String finalPath = String.format("type=%s; address=%s; providerName=%s; providerID=%s;",
|
String finalPath = String.format("type=%s; address=%s; providerName=%s; providerID=%s;",
|
||||||
volume.getAddressType().toString(), volume.getAddress().toLowerCase(), volume.getExternalName(), volume.getExternalUuid());
|
volume.getAddressType().toString(), volume.getAddress().toLowerCase(), volume.getExternalName(), volume.getExternalUuid());
|
||||||
|
|
||||||
|
|
@ -938,15 +979,6 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||||
return ctx;
|
return ctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean isSameProvider(DataObject obj) {
|
|
||||||
StoragePoolVO storagePool = this._storagePoolDao.findById(obj.getDataStore().getId());
|
|
||||||
if (storagePool != null && storagePool.getStorageProviderName().equals(this.getProviderName())) {
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ProviderAdapterDataObject newManagedDataObject(DataObject data, StoragePool storagePool) {
|
ProviderAdapterDataObject newManagedDataObject(DataObject data, StoragePool storagePool) {
|
||||||
ProviderAdapterDataObject dataIn = new ProviderAdapterDataObject();
|
ProviderAdapterDataObject dataIn = new ProviderAdapterDataObject();
|
||||||
if (data instanceof VolumeInfo) {
|
if (data instanceof VolumeInfo) {
|
||||||
|
|
@ -1002,4 +1034,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
|
||||||
public boolean volumesRequireGrantAccessWhenUsed() {
|
public boolean volumesRequireGrantAccessWhenUsed() {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public boolean zoneWideVolumesAvailableWithoutClusterMotion() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -80,6 +80,7 @@
|
||||||
</dependencies>
|
</dependencies>
|
||||||
<executions>
|
<executions>
|
||||||
<execution>
|
<execution>
|
||||||
|
<?m2e execute onConfiguration,onIncremental?>
|
||||||
<id>set-properties</id>
|
<id>set-properties</id>
|
||||||
<phase>validate</phase>
|
<phase>validate</phase>
|
||||||
<goals>
|
<goals>
|
||||||
|
|
|
||||||
|
|
@ -52,6 +52,7 @@
|
||||||
</dependencies>
|
</dependencies>
|
||||||
<executions>
|
<executions>
|
||||||
<execution>
|
<execution>
|
||||||
|
<?m2e execute onConfiguration,onIncremental?>
|
||||||
<goals>
|
<goals>
|
||||||
<goal>compile</goal>
|
<goal>compile</goal>
|
||||||
<goal>compileTests</goal>
|
<goal>compileTests</goal>
|
||||||
|
|
@ -114,11 +115,10 @@
|
||||||
<version>${groovy.version}</version>
|
<version>${groovy.version}</version>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<!-- Optional dependencies for using Spock -->
|
<dependency>
|
||||||
<dependency> <!-- enables mocking of classes (in addition to interfaces) -->
|
|
||||||
<groupId>cglib</groupId>
|
<groupId>cglib</groupId>
|
||||||
<artifactId>cglib-nodep</artifactId>
|
<artifactId>cglib-nodep</artifactId>
|
||||||
<scope>test</scope>
|
<version>${cs.cglib.version}</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.zapodot</groupId>
|
<groupId>org.zapodot</groupId>
|
||||||
|
|
|
||||||
|
|
@ -29,102 +29,39 @@ WWID=${2:?"WWID required"}
|
||||||
|
|
||||||
WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
|
WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
|
||||||
|
|
||||||
systemctl is-active multipathd || systemctl restart multipathd || {
|
START_CONNECT=$(dirname $0)/startConnectVolume.sh
|
||||||
echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume."
|
if [ -x "${START_CONNECT}" ]; then
|
||||||
logger -t "CS_SCSI_VOL_FIND" "${WWID} cannot be mapped to this host because multipathd is not currently running and cannot be started"
|
echo "$(date): Starting connect process for ${WWID} on lun ${LUN}"
|
||||||
|
${START_CONNECT} ${LUN} ${WWID}
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "$(date): Failed to start connect process for ${WWID} on lun ${LUN}"
|
||||||
|
logger -t "CS_SCSI_VOL_FIND" "${WWID} failed to start connect process on lun ${LUN}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "$(date): Unable to find startConnect.sh script!"
|
||||||
exit 1
|
exit 1
|
||||||
}
|
|
||||||
|
|
||||||
echo "$(date): Looking for ${WWID} on lun ${LUN}"
|
|
||||||
|
|
||||||
# get vendor OUI. we will only delete a device on the designated lun if it matches the
|
|
||||||
# incoming WWN OUI value. This is because multiple storage arrays may be mapped to the
|
|
||||||
# host on different fiber channel hosts with the same LUN
|
|
||||||
INCOMING_OUI=$(echo ${WWID} | cut -c2-7)
|
|
||||||
echo "$(date): Incoming OUI: ${INCOMING_OUI}"
|
|
||||||
|
|
||||||
# first we need to check if any stray references are left from a previous use of this lun
|
|
||||||
for fchost in $(ls /sys/class/fc_host | sed -e 's/host//g'); do
|
|
||||||
lingering_devs=$(lsscsi -w "${fchost}:*:*:${LUN}" | grep /dev | awk '{if (NF > 6) { printf("%s:%s ", $NF, $(NF-1));} }' | sed -e 's/0x/3/g')
|
|
||||||
|
|
||||||
if [ ! -z "${lingering_devs}" ]; then
|
|
||||||
for dev in ${lingering_devs}; do
|
|
||||||
LSSCSI_WWID=$(echo $dev | awk -F: '{print $2}' | sed -e 's/0x/3/g')
|
|
||||||
FOUND_OUI=$(echo ${LSSCSI_WWID} | cut -c3-8)
|
|
||||||
if [ "${INCOMING_OUI}" != "${FOUND_OUI}" ]; then
|
|
||||||
continue;
|
|
||||||
fi
|
|
||||||
dev=$(echo $dev | awk -F: '{ print $1}')
|
|
||||||
logger -t "CS_SCSI_VOL_FIND" "${WWID} processing identified a lingering device ${dev} from previous lun use, attempting to clean up"
|
|
||||||
MP_WWID=$(multipath -l ${dev} | head -1 | awk '{print $1}')
|
|
||||||
MP_WWID=${MP_WWID:1} # strip first character (3) off
|
|
||||||
# don't do this if the WWID passed in matches the WWID from multipath
|
|
||||||
if [ ! -z "${MP_WWID}" ] && [ "${MP_WWID}" != "${WWID}" ]; then
|
|
||||||
# run full removal again so all devices and multimap are cleared
|
|
||||||
$(dirname $0)/disconnectVolume.sh ${MP_WWID}
|
|
||||||
# we don't have a multimap but we may still have some stranded devices to clean up
|
|
||||||
elif [ "${LSSCSI_WWID}" != "${WWID}" ]; then
|
|
||||||
echo "1" > /sys/block/$(echo ${dev} | awk -F'/' '{print $NF}')/device/delete
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
sleep 3
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
logger -t "CS_SCSI_VOL_FIND" "${WWID} awaiting disk path at /dev/mapper/3${WWID}"
|
|
||||||
|
|
||||||
# wait for multipath to map the new lun to the WWID
|
|
||||||
echo "$(date): Waiting for multipath entry to show up for the WWID"
|
|
||||||
while true; do
|
|
||||||
ls /dev/mapper/3${WWID} >/dev/null 2>&1
|
|
||||||
if [ $? == 0 ]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
|
|
||||||
logger -t "CS_SCSI_VOL_FIND" "${WWID} not available yet, triggering scan"
|
|
||||||
|
|
||||||
# instruct bus to scan for new lun
|
|
||||||
for fchost in $(ls /sys/class/fc_host); do
|
|
||||||
echo " --> Scanning ${fchost}"
|
|
||||||
echo "- - ${LUN}" > /sys/class/scsi_host/${fchost}/scan
|
|
||||||
done
|
|
||||||
|
|
||||||
multipath -v2 2>/dev/null
|
|
||||||
|
|
||||||
ls /dev/mapper/3${WWID} >/dev/null 2>&1
|
|
||||||
if [ $? == 0 ]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
|
|
||||||
sleep 5
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "$(date): Doing a recan to make sure we have proper current size locally"
|
|
||||||
for device in $(multipath -ll 3${WWID} | egrep '^ ' | awk '{print $2}'); do
|
|
||||||
echo "1" > /sys/bus/scsi/drivers/sd/${device}/rescan;
|
|
||||||
done
|
|
||||||
|
|
||||||
sleep 3
|
|
||||||
|
|
||||||
multipathd reconfigure
|
|
||||||
|
|
||||||
sleep 3
|
|
||||||
|
|
||||||
# cleanup any old/faulty paths
|
|
||||||
delete_needed=false
|
|
||||||
multipath -l 3${WWID}
|
|
||||||
for dev in $(multipath -l 3${WWID} 2>/dev/null| grep failed | awk '{print $3}' ); do
|
|
||||||
logger -t "CS_SCSI_VOL_FIND" "${WWID} multipath contains faulty path ${dev}, removing"
|
|
||||||
echo 1 > /sys/block/${dev}/device/delete;
|
|
||||||
delete_needed=true
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "${delete_needed}" == "true" ]; then
|
|
||||||
sleep 10
|
|
||||||
multipath -v2 >/dev/null
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
multipath -l 3${WWID}
|
# wait for the device path to show up
|
||||||
|
while [ ! -e /dev/mapper/3${WWID} ]; do
|
||||||
|
echo "$(date): Waiting for /dev/mapper/3${WWID} to appear"
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
FINISH_CONNECT=$(dirname $0)/finishConnectVolume.sh
|
||||||
|
if [ -x "${FINISH_CONNECT}" ]; then
|
||||||
|
echo "$(date): Starting post-connect validation for ${WWID} on lun ${LUN}"
|
||||||
|
${FINISH_CONNECT} ${LUN} ${WWID}
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "$(date): Failed to finish connect process for ${WWID} on lun ${LUN}"
|
||||||
|
logger -t "CS_SCSI_VOL_CONN_FINISH" "${WWID} failed to finish connect process on lun ${LUN}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "$(date): Unable to find finishConnect.sh script!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
logger -t "CS_SCSI_VOL_FIND" "${WWID} successfully discovered and available"
|
logger -t "CS_SCSI_VOL_FIND" "${WWID} successfully discovered and available"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,14 @@
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
|
|
||||||
WWID=${1:?"WWID required"}
|
WWID=${1:?"WWID required"}
|
||||||
|
BACKGROUND="${2}"
|
||||||
|
|
||||||
|
# move the script to run in the background, no need to block other flows for this to complete
|
||||||
|
if [ -z "${BACKGROUND}" ]; then
|
||||||
|
nohup "$0" "${WWID}" --background &
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
|
WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
|
||||||
|
|
||||||
echo "$(date): Removing ${WWID}"
|
echo "$(date): Removing ${WWID}"
|
||||||
|
|
@ -36,6 +44,9 @@ systemctl is-active multipathd || systemctl restart multipathd || {
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Remove any active IO on the device so it can be removed.
|
||||||
|
multipathd disablequeueing map 3${WWID}
|
||||||
|
|
||||||
# first get dm- name
|
# first get dm- name
|
||||||
DM_NAME=$(ls -lrt /dev/mapper/3${WWID} | awk '{ print $NF }' | awk -F'/' '{print $NF}')
|
DM_NAME=$(ls -lrt /dev/mapper/3${WWID} | awk '{ print $NF }' | awk -F'/' '{print $NF}')
|
||||||
SLAVE_DEVS=""
|
SLAVE_DEVS=""
|
||||||
|
|
@ -66,9 +77,6 @@ fi
|
||||||
|
|
||||||
logger -t CS_SCSI_VOL_REMOVE "${WWID} successfully purged from multipath along with slave devices"
|
logger -t CS_SCSI_VOL_REMOVE "${WWID} successfully purged from multipath along with slave devices"
|
||||||
|
|
||||||
# Added to give time for the event to be fired to the server
|
|
||||||
sleep 10
|
|
||||||
|
|
||||||
echo "$(date): ${WWID} removed"
|
echo "$(date): ${WWID} removed"
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,79 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
#####################################################################################
|
||||||
|
#
|
||||||
|
# Given a lun # and a WWID for a volume provisioned externally, find the volume
|
||||||
|
# through the SCSI bus and make sure its visible via multipath
|
||||||
|
#
|
||||||
|
#####################################################################################
|
||||||
|
|
||||||
|
|
||||||
|
LUN=${1:?"LUN required"}
|
||||||
|
WWID=${2:?"WWID required"}
|
||||||
|
|
||||||
|
WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
|
||||||
|
|
||||||
|
systemctl is-active multipathd || systemctl restart multipathd || {
|
||||||
|
echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume."
|
||||||
|
logger -t "CS_SCSI_VOL_CONN_FINISH" "${WWID} cannot be mapped to this host because multipathd is not currently running and cannot be started"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "$(date): Doing post-connect validation for ${WWID} on lun ${LUN}"
|
||||||
|
|
||||||
|
# get vendor OUI. we will only delete a device on the designated lun if it matches the
|
||||||
|
# incoming WWN OUI value. This is because multiple storage arrays may be mapped to the
|
||||||
|
# host on different fiber channel hosts with the same LUN
|
||||||
|
INCOMING_OUI=$(echo ${WWID} | cut -c2-7)
|
||||||
|
echo "$(date): Incoming OUI: ${INCOMING_OUI}"
|
||||||
|
|
||||||
|
logger -t "CS_SCSI_VOL_CONN_FINISH" "${WWID} looking for disk path at /dev/mapper/3${WWID}"
|
||||||
|
|
||||||
|
echo "$(date): Doing a recan to make sure we have proper current size locally"
|
||||||
|
for device in $(multipath -ll 3${WWID} | egrep '^ ' | awk '{print $2}'); do
|
||||||
|
echo "1" > /sys/bus/scsi/drivers/sd/${device}/rescan;
|
||||||
|
done
|
||||||
|
|
||||||
|
sleep 3
|
||||||
|
|
||||||
|
multipathd reconfigure
|
||||||
|
|
||||||
|
sleep 3
|
||||||
|
|
||||||
|
# cleanup any old/faulty paths
|
||||||
|
delete_needed=false
|
||||||
|
multipath -l 3${WWID}
|
||||||
|
for dev in $(multipath -l 3${WWID} 2>/dev/null| grep failed | awk '{print $3}' ); do
|
||||||
|
logger -t "CS_SCSI_VOL_CONN_FINISH" "${WWID} multipath contains faulty path ${dev}, removing"
|
||||||
|
echo 1 > /sys/block/${dev}/device/delete;
|
||||||
|
delete_needed=true
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "${delete_needed}" == "true" ]; then
|
||||||
|
sleep 10
|
||||||
|
multipath -v2 >/dev/null
|
||||||
|
fi
|
||||||
|
|
||||||
|
multipath -l 3${WWID}
|
||||||
|
|
||||||
|
logger -t "CS_SCSI_VOL_CONN_FINISH" "${WWID} successfully discovered and available"
|
||||||
|
|
||||||
|
echo "$(date): Complete - found mapped LUN at /dev/mapper/3${WWID}"
|
||||||
|
|
||||||
|
exit 0
|
||||||
|
|
@ -0,0 +1,101 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
#####################################################################################
|
||||||
|
#
|
||||||
|
# Given a lun # and a WWID for a volume provisioned externally, find the volume
|
||||||
|
# through the SCSI bus and make sure its visible via multipath
|
||||||
|
#
|
||||||
|
#####################################################################################
|
||||||
|
|
||||||
|
|
||||||
|
LUN=${1:?"LUN required"}
|
||||||
|
WWID=${2:?"WWID required"}
|
||||||
|
|
||||||
|
WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
|
||||||
|
|
||||||
|
systemctl is-active multipathd || systemctl restart multipathd || {
|
||||||
|
echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume."
|
||||||
|
logger -t "CS_SCSI_VOL_CONN_START" "${WWID} cannot be mapped to this host because multipathd is not currently running and cannot be started"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "$(date): Looking for ${WWID} on lun ${LUN}"
|
||||||
|
|
||||||
|
# get vendor OUI. we will only delete a device on the designated lun if it matches the
|
||||||
|
# incoming WWN OUI value. This is because multiple storage arrays may be mapped to the
|
||||||
|
# host on different fiber channel hosts with the same LUN
|
||||||
|
INCOMING_OUI=$(echo ${WWID} | cut -c2-7)
|
||||||
|
echo "$(date): Incoming OUI: ${INCOMING_OUI}"
|
||||||
|
|
||||||
|
# first we need to check if any stray references are left from a previous use of this lun
|
||||||
|
for fchost in $(ls /sys/class/fc_host | sed -e 's/host//g'); do
|
||||||
|
lingering_devs=$(lsscsi -w "${fchost}:*:*:${LUN}" | grep /dev | awk '{if (NF > 6) { printf("%s:%s ", $NF, $(NF-1));} }' | sed -e 's/0x/3/g')
|
||||||
|
|
||||||
|
if [ ! -z "${lingering_devs}" ]; then
|
||||||
|
for dev in ${lingering_devs}; do
|
||||||
|
LSSCSI_WWID=$(echo $dev | awk -F: '{print $2}' | sed -e 's/0x/3/g')
|
||||||
|
FOUND_OUI=$(echo ${LSSCSI_WWID} | cut -c3-8)
|
||||||
|
if [ "${INCOMING_OUI}" != "${FOUND_OUI}" ]; then
|
||||||
|
continue;
|
||||||
|
fi
|
||||||
|
dev=$(echo $dev | awk -F: '{ print $1}')
|
||||||
|
logger -t "CS_SCSI_VOL_CONN_START" "${WWID} processing identified a lingering device ${dev} from previous lun use, attempting to clean up"
|
||||||
|
MP_WWID=$(multipath -l ${dev} | head -1 | awk '{print $1}')
|
||||||
|
MP_WWID=${MP_WWID:1} # strip first character (3) off
|
||||||
|
# don't do this if the WWID passed in matches the WWID from multipath
|
||||||
|
if [ ! -z "${MP_WWID}" ] && [ "${MP_WWID}" != "${WWID}" ]; then
|
||||||
|
# run full removal again so all devices and multimap are cleared
|
||||||
|
$(dirname $0)/disconnectVolume.sh ${MP_WWID}
|
||||||
|
# we don't have a multimap but we may still have some stranded devices to clean up
|
||||||
|
elif [ "${LSSCSI_WWID}" != "${WWID}" ]; then
|
||||||
|
echo "1" > /sys/block/$(echo ${dev} | awk -F'/' '{print $NF}')/device/delete
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
sleep 3
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
logger -t "CS_SCSI_VOL_CONN_START" "${WWID} awaiting disk path at /dev/mapper/3${WWID}"
|
||||||
|
|
||||||
|
# wait for multipath to map the new lun to the WWID
|
||||||
|
echo "$(date): Triggering discovery for multipath WWID ${WWID} on LUN ${LUN}"
|
||||||
|
ls /dev/mapper/3${WWID} >/dev/null 2>&1
|
||||||
|
if [ $? == 0 ]; then
|
||||||
|
logger -t "CS_SCSI_VOL_CONN_START" "${WWID} already available at /dev/mapper/3${WWID}, no need to trigger a scan"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
|
||||||
|
# instruct bus to scan for new lun
|
||||||
|
for fchost in $(ls /sys/class/fc_host); do
|
||||||
|
echo " --> Scanning ${fchost}"
|
||||||
|
echo "- - ${LUN}" > /sys/class/scsi_host/${fchost}/scan
|
||||||
|
done
|
||||||
|
|
||||||
|
multipath -v2 2>/dev/null
|
||||||
|
|
||||||
|
ls /dev/mapper/3${WWID} >/dev/null 2>&1
|
||||||
|
if [ $? == 0 ]; then
|
||||||
|
logger -t "CS_SCSI_VOL_CONN_START" "${WWID} scan triggered and device immediately became visible at /dev/mapper/3${WWID}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
logger -t "CS_SCSI_VOL_CONN_START" "${WWID} successfully triggered discovery"
|
||||||
|
|
||||||
|
echo "$(date): Complete - Triggered discovery of ${WWID}, watch for device at /dev/mapper/3${WWID}"
|
||||||
|
|
||||||
|
exit 0
|
||||||
|
|
@ -39,6 +39,12 @@ public interface AutoScaleManager extends AutoScaleService {
|
||||||
"The Number of worker threads to scan the autoscale vm groups.",
|
"The Number of worker threads to scan the autoscale vm groups.",
|
||||||
false);
|
false);
|
||||||
|
|
||||||
|
ConfigKey<Boolean> UseAutoscaleVmHostnamePrefixEnabled = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Boolean.class,
|
||||||
|
"autoscale.vm.hostname.prefixenabled",
|
||||||
|
"true",
|
||||||
|
"If true, the auto scale vm group name will be used as a prefix for the auto scale vm hostnames.",
|
||||||
|
true);
|
||||||
|
|
||||||
ConfigKey<Integer> AutoScaleErroredInstanceThreshold = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Integer.class,
|
ConfigKey<Integer> AutoScaleErroredInstanceThreshold = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Integer.class,
|
||||||
"autoscale.errored.instance.threshold",
|
"autoscale.errored.instance.threshold",
|
||||||
"10",
|
"10",
|
||||||
|
|
|
||||||
|
|
@ -294,6 +294,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage
|
||||||
PARAM_OVERRIDE_DISK_OFFERING_ID, PARAM_SSH_KEYPAIRS, PARAM_AFFINITY_GROUP_IDS, PARAM_NETWORK_IDS);
|
PARAM_OVERRIDE_DISK_OFFERING_ID, PARAM_SSH_KEYPAIRS, PARAM_AFFINITY_GROUP_IDS, PARAM_NETWORK_IDS);
|
||||||
|
|
||||||
protected static final String VM_HOSTNAME_PREFIX = "autoScaleVm-";
|
protected static final String VM_HOSTNAME_PREFIX = "autoScaleVm-";
|
||||||
|
|
||||||
protected static final int VM_HOSTNAME_RANDOM_SUFFIX_LENGTH = 6;
|
protected static final int VM_HOSTNAME_RANDOM_SUFFIX_LENGTH = 6;
|
||||||
|
|
||||||
private static final Long DEFAULT_HOST_ID = -1L;
|
private static final Long DEFAULT_HOST_ID = -1L;
|
||||||
|
|
@ -1952,6 +1953,19 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getNextVmHostName(AutoScaleVmGroupVO asGroup) {
|
public String getNextVmHostName(AutoScaleVmGroupVO asGroup) {
|
||||||
|
if (UseAutoscaleVmHostnamePrefixEnabled.value()) {
|
||||||
|
return getNextVmHostNameWithPrefix(asGroup);
|
||||||
|
} else {
|
||||||
|
return getNextVmHostNameWithoutPrefix(asGroup);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getNextVmHostNameWithoutPrefix(AutoScaleVmGroupVO asGroup) {
|
||||||
|
int subStringLength = Math.min(asGroup.getName().length(), 63 - Long.toString(asGroup.getNextVmSeq()).length());
|
||||||
|
return asGroup.getName().substring(0, subStringLength) + Long.toString(asGroup.getNextVmSeq());
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getNextVmHostNameWithPrefix(AutoScaleVmGroupVO asGroup) {
|
||||||
String vmHostNameSuffix = "-" + asGroup.getNextVmSeq() + "-" +
|
String vmHostNameSuffix = "-" + asGroup.getNextVmSeq() + "-" +
|
||||||
RandomStringUtils.random(VM_HOSTNAME_RANDOM_SUFFIX_LENGTH, 0, 0, true, false, (char[])null, new SecureRandom()).toLowerCase();
|
RandomStringUtils.random(VM_HOSTNAME_RANDOM_SUFFIX_LENGTH, 0, 0, true, false, (char[])null, new SecureRandom()).toLowerCase();
|
||||||
// Truncate vm group name because max length of vm name is 63
|
// Truncate vm group name because max length of vm name is 63
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue