Compare commits

...

31 Commits

Author SHA1 Message Date
Rene Glover 758461f5e9
Merge abf6aae675 into bce3e54a7e 2026-01-22 15:14:14 +01:00
Daman Arora bce3e54a7e
improve error handling for template upload notifications (#12412)
Co-authored-by: Daman Arora <daman.arora@shapeblue.com>
2026-01-22 15:02:46 +01:00
Nicolas Vazquez 6a9835904c
Fix for zoneids parameters length on updateAPIs (#12440) 2026-01-22 14:57:46 +01:00
Nicolas Vazquez 6846619a6f
Fix update network offering domainids size limitation (#12431) 2026-01-22 14:32:46 +01:00
Vishesh d1eb2822d9
Remove redundant Exceptions from logs for vm schedules (#12428) 2026-01-22 14:29:35 +01:00
Glover, Rene (rg9975) abf6aae675 remove basedir from build/replace.properties reference in root pom.xml 2025-12-24 13:48:27 -06:00
Rene Glover ff818ab187
Update engine/schema/pom.xml
Co-authored-by: dahn <daan.hoogland@gmail.com>
2025-12-24 13:24:29 -06:00
Glover, Rene (rg9975) d45188bd97 revert engine/schema/pom.xml changes 2025-12-24 11:44:06 -06:00
Rene Glover ce3e5318bc
Merge branch '4.20' into 4.20-fiberchannel-patches 2025-12-24 11:35:57 -06:00
Rene Glover 4fb4f1b380
Merge branch '4.20' into 4.20-fiberchannel-patches 2025-12-22 11:00:33 -06:00
Rene Glover f962978466 update to multipath scripts and code/error logging 2025-11-14 17:36:43 +00:00
Rene Glover 8e9316a5e5 fix absolute path for md5sum.txt in pom.xml 2025-11-14 17:21:24 +00:00
Rene Glover 9c2d9275e0
Update plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-14 07:47:29 -06:00
Rene Glover 36396e4424
Update plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-14 07:47:07 -06:00
Rene Glover e0416397bc
Update plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-14 07:45:42 -06:00
Rene Glover 7256bded9a
Update scripts/storage/multipath/startConnectVolume.sh
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-14 07:44:40 -06:00
Rene Glover 705ef8fa75
Update plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-14 07:35:34 -06:00
Rene Glover 6105c2fe98
Update engine/schema/pom.xml
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-14 07:35:08 -06:00
Rene Glover cd69e1a8e4
Update server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-14 07:33:39 -06:00
Rene Glover e6208687c5
Update plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java
Co-authored-by: Vishesh <8760112+vishesh92@users.noreply.github.com>
2025-11-14 07:33:29 -06:00
Rene Glover 5cfbb8b183
Update scripts/storage/multipath/connectVolume.sh
Co-authored-by: Vishesh <8760112+vishesh92@users.noreply.github.com>
2025-11-14 07:33:10 -06:00
Rene Glover fdfc05108f
Merge branch '4.20' into 4.20-fiberchannel-patches 2025-11-13 15:30:29 -06:00
Rene Glover 017a3325c3
Update plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
Co-authored-by: Vishesh <vishesh92@gmail.com>
2025-09-09 07:11:51 -05:00
Rene Glover 86eff0e092
Merge branch '4.20' into 4.20-fiberchannel-patches 2025-09-03 07:18:58 -05:00
Glover, Rene (rg9975) 6a97d69a75 end of line resizeVolume.sh 2025-06-23 10:59:04 -05:00
Rene Glover f83481e532
Update plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-06-23 10:22:02 -05:00
Rene Glover 2a80c9629e
Update plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-06-23 10:21:48 -05:00
Rene Glover 6353507bef
Update scripts/storage/multipath/connectVolume.sh
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-06-23 10:21:21 -05:00
Rene Glover 7be9bebdcf
Update scripts/storage/multipath/finishConnectVolume.sh
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-06-23 10:19:09 -05:00
Rene Glover 1bf169b4ea
Update scripts/storage/multipath/startConnectVolume.sh
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-06-23 10:18:56 -05:00
Glover, Rene (rg9975) e55d016418 various fixes for fiberchannel and autoscale prefix option 2025-06-22 21:01:23 -05:00
23 changed files with 719 additions and 235 deletions

View File

@ -78,6 +78,7 @@ public class UpdateNetworkOfferingCmd extends BaseCmd {
@Parameter(name = ApiConstants.DOMAIN_ID,
type = CommandType.STRING,
length = 4096,
description = "The ID of the containing domain(s) as comma separated string, public for public offerings")
private String domainIds;

View File

@ -75,6 +75,7 @@ public class UpdateDiskOfferingCmd extends BaseCmd {
@Parameter(name = ApiConstants.ZONE_ID,
type = CommandType.STRING,
description = "The ID of the containing zone(s) as comma separated string, all for all zones offerings",
length = 4096,
since = "4.13")
private String zoneIds;

View File

@ -69,6 +69,7 @@ public class UpdateServiceOfferingCmd extends BaseCmd {
@Parameter(name = ApiConstants.ZONE_ID,
type = CommandType.STRING,
description = "The ID of the containing zone(s) as comma separated string, all for all zones offerings",
length = 4096,
since = "4.13")
private String zoneIds;

View File

@ -65,6 +65,7 @@ public class UpdateVPCOfferingCmd extends BaseAsyncCmd {
@Parameter(name = ApiConstants.ZONE_ID,
type = CommandType.STRING,
description = "The ID of the containing zone(s) as comma separated string, all for all zones offerings",
length = 4096,
since = "4.13")
private String zoneIds;

View File

@ -106,7 +106,7 @@
templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-xen")
templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-ovm")
templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-hyperv")
File file = new File("./engine/schema/dist/systemvm-templates/md5sum.txt")
File file = new File("./engine/schema/dist/systemvm-templates/sha512sum.txt")
def lines = file.readLines()
for (template in templateList) {
def data = lines.findAll { it.contains(template) }
@ -135,7 +135,7 @@
<goal>wget</goal>
</goals>
<configuration>
<url>${project.systemvm.template.location}/${cs.version}/md5sum.txt</url>
<url>${project.systemvm.template.location}/${cs.version}/sha512sum.txt</url>
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
<skipCache>true</skipCache>
<overwrite>true</overwrite>
@ -205,7 +205,7 @@
<checkSignature>true</checkSignature>
<url>${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-kvm.qcow2.bz2</url>
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
<md5>${kvm.checksum}</md5>
<sha512>${kvm.checksum}</sha512>
</configuration>
</execution>
</executions>
@ -241,7 +241,7 @@
<checkSignature>true</checkSignature>
<url>${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-vmware.ova</url>
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
<md5>${vmware.checksum}</md5>
<sha512>${vmware.checksum}</sha512>
</configuration>
</execution>
</executions>
@ -277,7 +277,7 @@
<checkSignature>true</checkSignature>
<url>${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-xen.vhd.bz2</url>
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
<md5>${xen.checksum}</md5>
<sha512>${xen.checksum}</sha512>
</configuration>
</execution>
</executions>
@ -313,7 +313,7 @@
<checkSignature>true</checkSignature>
<url>${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-ovm.raw.bz2</url>
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
<md5>${ovm.checksum}</md5>
<sha512>${ovm.checksum}</sha512>
</configuration>
</execution>
</executions>
@ -349,7 +349,7 @@
<checkSignature>true</checkSignature>
<url>${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-hyperv.vhd.zip</url>
<outputDirectory>${basedir}/dist/systemvm-templates/</outputDirectory>
<md5>${hyperv.checksum}</md5>
<sha512>${hyperv.checksum}</sha512>
</configuration>
</execution>
</executions>

View File

@ -31,4 +31,6 @@ public interface VMScheduledJobDao extends GenericDao<VMScheduledJobVO, Long> {
int expungeJobsForSchedules(List<Long> scheduleId, Date dateAfter);
int expungeJobsBefore(Date currentTimestamp);
VMScheduledJobVO findByScheduleAndTimestamp(long scheduleId, Date scheduledTimestamp);
}

View File

@ -39,6 +39,8 @@ public class VMScheduledJobDaoImpl extends GenericDaoBase<VMScheduledJobVO, Long
private final SearchBuilder<VMScheduledJobVO> expungeJobForScheduleSearch;
private final SearchBuilder<VMScheduledJobVO> scheduleAndTimestampSearch;
static final String SCHEDULED_TIMESTAMP = "scheduled_timestamp";
static final String VM_SCHEDULE_ID = "vm_schedule_id";
@ -58,6 +60,11 @@ public class VMScheduledJobDaoImpl extends GenericDaoBase<VMScheduledJobVO, Long
expungeJobForScheduleSearch.and(VM_SCHEDULE_ID, expungeJobForScheduleSearch.entity().getVmScheduleId(), SearchCriteria.Op.IN);
expungeJobForScheduleSearch.and(SCHEDULED_TIMESTAMP, expungeJobForScheduleSearch.entity().getScheduledTime(), SearchCriteria.Op.GTEQ);
expungeJobForScheduleSearch.done();
scheduleAndTimestampSearch = createSearchBuilder();
scheduleAndTimestampSearch.and(VM_SCHEDULE_ID, scheduleAndTimestampSearch.entity().getVmScheduleId(), SearchCriteria.Op.EQ);
scheduleAndTimestampSearch.and(SCHEDULED_TIMESTAMP, scheduleAndTimestampSearch.entity().getScheduledTime(), SearchCriteria.Op.EQ);
scheduleAndTimestampSearch.done();
}
/**
@ -92,4 +99,12 @@ public class VMScheduledJobDaoImpl extends GenericDaoBase<VMScheduledJobVO, Long
sc.setParameters(SCHEDULED_TIMESTAMP, date);
return expunge(sc);
}
@Override
public VMScheduledJobVO findByScheduleAndTimestamp(long scheduleId, Date scheduledTimestamp) {
SearchCriteria<VMScheduledJobVO> sc = scheduleAndTimestampSearch.create();
sc.setParameters(VM_SCHEDULE_ID, scheduleId);
sc.setParameters(SCHEDULED_TIMESTAMP, scheduledTimestamp);
return findOneBy(sc);
}
}

View File

@ -0,0 +1,53 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.kvm.storage;
import java.util.Map;
/**
* Decorator for StorageAdapters that implement asynchronous physical disk connections to improve
* performance on VM starts with large numbers of disks.
*/
public interface AsyncPhysicalDiskConnectorDecorator {
/**
* Initiates a connection attempt (may or may not complete it depending on implementation)
* @param path
* @param pool
* @param details
* @return
*/
public boolean startConnectPhysicalDisk(String path, KVMStoragePool pool, Map<String,String> details);
/**
* Tests if the physical disk is connected
* @param path
* @param pool
* @param details
* @return
*/
public boolean isConnected(String path, KVMStoragePool pool, Map<String,String> details);
/**
* Completes a connection attempt after isConnected returns true;
* @param path
* @param pool
* @param details
* @return
* @throws Exception
*/
public boolean finishConnectPhysicalDisk(String path, KVMStoragePool pool, Map<String,String> details) throws Exception;
}

View File

@ -20,6 +20,7 @@ import java.lang.reflect.Constructor;
import java.lang.reflect.Modifier;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
@ -42,9 +43,11 @@ import com.cloud.hypervisor.kvm.resource.KVMHABase;
import com.cloud.hypervisor.kvm.resource.KVMHABase.PoolType;
import com.cloud.hypervisor.kvm.resource.KVMHAMonitor;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageLayer;
import com.cloud.storage.Volume;
import com.cloud.utils.StringUtils;
import com.cloud.utils.Pair;
import com.cloud.utils.Ternary;
import com.cloud.utils.exception.CloudRuntimeException;
@ -164,6 +167,19 @@ public class KVMStoragePoolManager {
return adaptor.connectPhysicalDisk(volPath, pool, details, false);
}
private static class ConnectingDiskInfo {
ConnectingDiskInfo(VolumeObjectTO volume, StorageAdaptor adaptor, KVMStoragePool pool, Map<String, String> details) {
this.volume = volume;
this.adapter = adaptor;
this.pool = pool;
this.details = details;
}
VolumeObjectTO volume;
KVMStoragePool pool = null;
StorageAdaptor adapter = null;
Map<String,String> details = null;
}
public boolean connectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec, boolean isVMMigrate) {
boolean result = false;
@ -171,6 +187,10 @@ public class KVMStoragePoolManager {
List<DiskTO> disks = Arrays.asList(vmSpec.getDisks());
// disks that connect in background
List<ConnectingDiskInfo> connectingDisks = new ArrayList<>();
for (DiskTO disk : disks) {
if (disk.getType() == Volume.Type.ISO) {
result = true;
@ -187,17 +207,79 @@ public class KVMStoragePoolManager {
KVMStoragePool pool = getStoragePool(store.getPoolType(), store.getUuid());
StorageAdaptor adaptor = getStorageAdaptor(pool.getType());
if (adaptor instanceof AsyncPhysicalDiskConnectorDecorator) {
// If the adaptor supports async disk connection, we can start the connection
// and return immediately, allowing the connection to complete in the background.
result = ((AsyncPhysicalDiskConnectorDecorator) adaptor).startConnectPhysicalDisk(vol.getPath(), pool, disk.getDetails());
if (!result) {
logger.error("Failed to start connecting disks via vm spec for vm: " + vmName + " volume:" + vol.toString());
return false;
}
// add disk to list of disks to check later
connectingDisks.add(new ConnectingDiskInfo(vol, adaptor, pool, disk.getDetails()));
} else {
result = adaptor.connectPhysicalDisk(vol.getPath(), pool, disk.getDetails(), isVMMigrate);
if (!result) {
logger.error("Failed to connect disks via Instance spec for Instance: " + vmName + " volume:" + vol.toString());
logger.error("Failed to connect disks via vm spec for vm: " + vmName + " volume:" + vol.toString());
return result;
}
}
}
// if we have any connecting disks to check, wait for them to connect or timeout
if (!connectingDisks.isEmpty()) {
for (ConnectingDiskInfo connectingDisk : connectingDisks) {
StorageAdaptor adaptor = connectingDisk.adapter;
KVMStoragePool pool = connectingDisk.pool;
VolumeObjectTO volume = connectingDisk.volume;
Map<String, String> details = connectingDisk.details;
long diskWaitTimeMillis = getDiskWaitTimeMillis(details);
// wait for the disk to connect
long startTime = System.currentTimeMillis();
while (System.currentTimeMillis() - startTime < diskWaitTimeMillis) {
if (((AsyncPhysicalDiskConnectorDecorator) adaptor).isConnected(volume.getPath(), pool, details)) {
logger.debug(String.format("Disk %s connected successfully for VM %s", volume.getPath(), vmName));
break;
}
sleep(1000); // wait for 1 second before checking again
}
}
}
return result;
}
private long getDiskWaitTimeMillis(Map<String,String> details) {
int waitTimeInSec = 60; // default wait time in seconds
if (details != null && details.containsKey(StorageManager.STORAGE_POOL_DISK_WAIT.toString())) {
String waitTime = details.get(StorageManager.STORAGE_POOL_DISK_WAIT.toString());
if (StringUtils.isNotEmpty(waitTime)) {
waitTimeInSec = Integer.valueOf(waitTime).intValue();
logger.debug(String.format("%s set to %s", StorageManager.STORAGE_POOL_DISK_WAIT.toString(), waitTimeInSec));
}
} else {
// wait at least 60 seconds even if input was lower
if (waitTimeInSec < 60) {
logger.debug(String.format("%s was less than 60s. Increasing to 60s default.", StorageManager.STORAGE_POOL_DISK_WAIT.toString()));
waitTimeInSec = 60;
}
}
return waitTimeInSec * 1000; // convert to milliseconds
}
private boolean sleep(long millis) {
try {
Thread.sleep(millis);
return true;
} catch (InterruptedException e) {
return false;
}
}
public boolean disconnectPhysicalDisk(Map<String, String> volumeToDisconnect) {
logger.debug(String.format("Disconnect physical disks using volume map: %s", volumeToDisconnect.toString()));
if (MapUtils.isEmpty(volumeToDisconnect)) {

View File

@ -1607,8 +1607,20 @@ public class KVMStorageProcessor implements StorageProcessor {
storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath());
return new DettachAnswer(disk);
} catch (final LibvirtException | InternalErrorException | CloudRuntimeException e) {
logger.debug(String.format("Failed to detach volume [id: %d, uuid: %s, name: %s, path: %s], due to ", vol.getId(), vol.getUuid(), vol.getName(), vol.getPath()), e);
} catch (final LibvirtException e) {
// check if the error was related to an already unplugged event - we can safely ignore
if (e.getMessage() != null && e.getMessage().contains("is already in the process of unplug")) {
logger.debug("Volume: " + vol.getPath() + " is already unplugged, ignoring the error");
return new DettachAnswer(disk);
} else {
logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e);
return new DettachAnswer(e.toString());
}
} catch (final InternalErrorException e) {
logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e);
return new DettachAnswer(e.toString());
} catch (final CloudRuntimeException e) {
logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e);
return new DettachAnswer(e.toString());
} finally {
vol.clearPassphrase();

View File

@ -65,10 +65,12 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
* Property keys and defaults
*/
static final Property<Integer> CLEANUP_FREQUENCY_SECS = new Property<Integer>("multimap.cleanup.frequency.secs", 60);
static final Property<Integer> CLEANUP_TIMEOUT_SECS = new Property<Integer>("multimap.cleanup.timeout.secs", 4);
static final Property<Integer> CLEANUP_TIMEOUT_SECS = new Property<Integer>("multimap.cleanup.timeout.secs", 600);
static final Property<Boolean> CLEANUP_ENABLED = new Property<Boolean>("multimap.cleanup.enabled", true);
static final Property<String> CLEANUP_SCRIPT = new Property<String>("multimap.cleanup.script", "cleanStaleMaps.sh");
static final Property<String> CONNECT_SCRIPT = new Property<String>("multimap.connect.script", "connectVolume.sh");
static final Property<String> START_CONNECT_SCRIPT = new Property<String>("multimap.startconnect.script", "startConnectVolume.sh");
static final Property<String> FINISH_CONNECT_SCRIPT = new Property<String>("multimap.finishconnect.script", "finishConnectVolume.sh");
static final Property<String> COPY_SCRIPT = new Property<String>("multimap.copy.script", "copyVolume.sh");
static final Property<String> DISCONNECT_SCRIPT = new Property<String>("multimap.disconnect.script", "disconnectVolume.sh");
static final Property<String> RESIZE_SCRIPT = new Property<String>("multimap.resize.script", "resizeVolume.sh");
@ -78,6 +80,8 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
static Timer cleanupTimer = new Timer();
private static int cleanupTimeoutSecs = CLEANUP_TIMEOUT_SECS.getFinalValue();
private static String connectScript = CONNECT_SCRIPT.getFinalValue();
private static String startConnectScript = START_CONNECT_SCRIPT.getFinalValue();
private static String finishConnectScript = FINISH_CONNECT_SCRIPT.getFinalValue();
private static String disconnectScript = DISCONNECT_SCRIPT.getFinalValue();
private static String cleanupScript = CLEANUP_SCRIPT.getFinalValue();
private static String resizeScript = RESIZE_SCRIPT.getFinalValue();
@ -98,6 +102,16 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
throw new Error("Unable to find the connectVolume.sh script");
}
startConnectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), startConnectScript);
if (startConnectScript == null) {
throw new Error("Unable to find the startConnectVolume.sh script");
}
finishConnectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), finishConnectScript);
if (finishConnectScript == null) {
throw new Error("Unable to find the finishConnectVolume.sh script");
}
disconnectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), disconnectScript);
if (disconnectScript == null) {
throw new Error("Unable to find the disconnectVolume.sh script");
@ -164,9 +178,11 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
// validate we have a connection, if not we need to connect first.
if (!isConnected(address.getPath())) {
if (!connectPhysicalDisk(address, pool, null)) {
throw new CloudRuntimeException("Unable to connect to volume " + address.getPath());
}
LOGGER.warn("Physical disk " + address.getPath() + " is not connected, a request to connectPhysicalDisk must be made before it can be used.");
return null;
} else {
LOGGER.debug("Physical disk " + address.getPath() + " is connected, proceeding to get its size.");
}
long diskSize = getPhysicalDiskSize(address.getPath());
@ -222,8 +238,91 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
if (StringUtils.isNotEmpty(waitTime)) {
waitTimeInSec = Integer.valueOf(waitTime).intValue();
}
} else {
// wait at least 60 seconds even if input was lower
if (waitTimeInSec < 60) {
LOGGER.debug(String.format("multimap.disk.wait.secs was less than 60. Increasing to 60"));
waitTimeInSec = 60;
}
return waitForDiskToBecomeAvailable(address, pool, waitTimeInSec);
}
if (!startConnect(address, pool, waitTimeInSec)) {
LOGGER.error("Failed to trigger connect for address [" + address.getPath() + "] of the storage pool: " + pool.getUuid());
return false;
}
LOGGER.debug("Waiting for disk to become available after connect for address [" + address.getPath() + "] of the storage pool: " + pool.getUuid());
// loop through and call isConnected() until true or the waitTimeInSec is exceeded
long startTime = System.currentTimeMillis();
while (System.currentTimeMillis() - startTime < TimeUnit.SECONDS.toMillis(waitTimeInSec)) {
if (isConnected(address.getPath())) {
LOGGER.info("Disk " + address.getPath() + " of the storage pool: " + pool.getUuid() + " is connected");
return true;
}
try {
Thread.sleep(1000); // wait 1 second before checking again
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
LOGGER.error("Disk " + address.getPath() + " of the storage pool: " + pool.getUuid() + " is not connected after waiting for " + waitTimeInSec + " seconds");
return false;
}
public boolean startConnectPhysicalDisk(String volumePath, KVMStoragePool pool, Map<String,String> details) {
LOGGER.info("startConnectPhysicalDisk called for [" + volumePath + "]");
if (StringUtils.isEmpty(volumePath)) {
LOGGER.error("Unable to connect physical disk due to insufficient data - volume path is undefined");
return false;
}
if (pool == null) {
LOGGER.error("Unable to connect physical disk due to insufficient data - pool is not set");
return false;
}
// we expect WWN values in the volumePath so need to convert it to an actual physical path
AddressInfo address = this.parseAndValidatePath(volumePath);
return startConnect(address, pool, diskWaitTimeSecs);
}
public boolean finishConnectPhysicalDisk(String volumePath, KVMStoragePool pool, Map<String,String> details) throws Exception {
LOGGER.info("finishConnectPhysicalDisk called for [" + volumePath + "]");
if (StringUtils.isEmpty(volumePath)) {
LOGGER.error("Unable to finish connect physical disk due to insufficient data - volume path is undefined");
return false;
}
if (pool == null) {
LOGGER.error("Unable to finish connect physical disk due to insufficient data - pool is not set");
return false;
}
// we expect WWN values in the volumePath so need to convert it to an actual physical path
AddressInfo address = this.parseAndValidatePath(volumePath);
return finishConnect(address, pool, diskWaitTimeSecs);
}
/**
* Tests if the physical disk is connected
*/
public boolean isConnected(String path, KVMStoragePool pool, Map<String,String> details) {
AddressInfo address = this.parseAndValidatePath(path);
if (address.getAddress() == null) {
LOGGER.debug(String.format("isConnected(path,pool) returning FALSE, volume path has no address field: %s", path));
return false;
}
if (isConnected(address.getPath())) {
return true;
}
return false;
}
@Override
@ -441,24 +540,18 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
}
}
boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) {
LOGGER.debug("Waiting for the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs");
long scriptTimeoutSecs = 30; // how long to wait for each script execution to run
long maxTries = 10; // how many max retries to attempt the script
long waitTimeInMillis = waitTimeInSec * 1000; // how long overall to wait
int timeBetweenTries = 1000; // how long to sleep between tries
// wait at least 60 seconds even if input was lower
if (waitTimeInSec < 60) {
waitTimeInSec = 60;
}
KVMPhysicalDisk physicalDisk = null;
// Rescan before checking for the physical disk
int tries = 0;
while (waitTimeInMillis > 0 && tries < maxTries) {
tries++;
long start = System.currentTimeMillis();
/**
* Trigger (but does not wait for success) a LUN connect operation for the given address and storage pool.
* @param address
* @param pool
* @param waitTimeInSec
* @return
*/
boolean startConnect(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) {
LOGGER.debug("Triggering connect for : " + address.getPath() + " of the storage pool: " + pool.getUuid());
long scriptTimeoutSecs = waitTimeInSec - 1; // how long to wait for each script execution to run
Process p = null;
try {
String lun;
if (address.getConnectionId() == null) {
lun = "-";
@ -466,9 +559,7 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
lun = address.getConnectionId();
}
Process p = null;
try {
ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress());
ProcessBuilder builder = new ProcessBuilder(startConnectScript, lun, address.getAddress());
p = builder.start();
if (p.waitFor(scriptTimeoutSecs, TimeUnit.SECONDS)) {
int rc = p.exitValue();
@ -480,16 +571,9 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
output.append(line);
output.append(" ");
}
physicalDisk = getPhysicalDisk(address, pool);
if (physicalDisk != null && physicalDisk.getSize() > 0) {
LOGGER.debug("Found the volume using id: " + address.getPath() + " of the storage pool: " + pool.getUuid());
return true;
}
break;
LOGGER.debug("LUN discovery triggered for " + address.getPath() + " of the storage pool: " + pool.getUuid() + ", output: " + output.toString());
} else {
LOGGER.warn("Failure discovering LUN via " + connectScript);
LOGGER.warn("Failure triggering LUN discovery via " + startConnectScript);
BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream()));
String line = null;
while ((line = error.readLine()) != null) {
@ -497,28 +581,75 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
}
}
} else {
LOGGER.debug("Timeout waiting for " + connectScript + " to complete - try " + tries);
LOGGER.debug(String.format("Timeout [%s] waiting for %s to complete", scriptTimeoutSecs, startConnectScript));
return false;
}
} catch (IOException | InterruptedException | IllegalThreadStateException e) {
LOGGER.warn("Problem performing scan on SCSI hosts - try " + tries, e);
LOGGER.warn("Problem performing LUN discovery for " + address.getPath() + " of the storage pool: " + pool.getUuid(), e);
return false;
} finally {
if (p != null && p.isAlive()) {
p.destroyForcibly();
}
}
long elapsed = System.currentTimeMillis() - start;
waitTimeInMillis = waitTimeInMillis - elapsed;
return true;
}
/**
* Trigger (but does not wait for success) a LUN connect operation for the given address and storage pool.
* @param address
* @param pool
* @param waitTimeInSec
* @return
*/
boolean finishConnect(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) {
LOGGER.debug("Triggering connect for : " + address.getPath() + " of the storage pool: " + pool.getUuid());
long scriptTimeoutSecs = waitTimeInSec - 1; // how long to wait for each script execution to run
Process p = null;
try {
Thread.sleep(timeBetweenTries);
} catch (Exception ex) {
// don't do anything
String lun;
if (address.getConnectionId() == null) {
lun = "-";
} else {
lun = address.getConnectionId();
}
ProcessBuilder builder = new ProcessBuilder(finishConnectScript, lun, address.getAddress());
p = builder.start();
if (p.waitFor(scriptTimeoutSecs, TimeUnit.SECONDS)) {
int rc = p.exitValue();
StringBuffer output = new StringBuffer();
if (rc == 0) {
BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream()));
String line = null;
while ((line = input.readLine()) != null) {
output.append(line);
output.append(" ");
}
LOGGER.debug("LUN discovery triggered for " + address.getPath() + " of the storage pool: " + pool.getUuid() + ", output: " + output.toString());
} else {
LOGGER.warn("Failure triggering LUN discovery via " + finishConnectScript);
BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream()));
String line = null;
while ((line = error.readLine()) != null) {
LOGGER.warn("error --> " + line);
}
}
} else {
LOGGER.debug(String.format("Timeout [%s] waiting for %s to complete", scriptTimeoutSecs, finishConnectScript));
return false;
}
} catch (IOException | InterruptedException | IllegalThreadStateException e) {
LOGGER.warn("Problem performing LUN discovery for " + address.getPath() + " of the storage pool: " + pool.getUuid(), e);
return false;
} finally {
if (p != null && p.isAlive()) {
p.destroyForcibly();
}
}
LOGGER.debug("Unable to find the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid());
return false;
return true;
}
boolean isConnected(String path) {

View File

@ -269,7 +269,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
dataIn.setExternalUuid(volume.getExternalUuid());
// update the cloudstack metadata about the volume
persistVolumeOrTemplateData(storagePool, details, dataObject, volume, null);
persistVolumeOrTemplateData(storagePool, details, dataObject, volume, null, volume.getAllocatedSizeInBytes());
result = new CreateCmdResult(dataObject.getUuid(), new Answer(null));
result.setSuccess(true);
@ -346,14 +346,17 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
// if we copied from one volume to another, the target volume's disk offering or user input may be of a larger size
// we won't, however, shrink a volume if its smaller.
long size = destdata.getSize();
if (outVolume.getAllocatedSizeInBytes() < destdata.getSize()) {
logger.info("Resizing volume {} to requested target volume size of {}", destdata, destdata.getSize());
logger.info("Resizing volume {} to requested target volume size of {}", destdata.getUuid(), destdata.getSize());
api.resize(context, destIn, destdata.getSize());
} else if (outVolume.getAllocatedSizeInBytes() > destdata.getSize()) {
size = outVolume.getAllocatedSizeInBytes();
}
// initial volume info does not have connection map yet. That is added when grantAccess is called later.
String finalPath = generatePathInfo(outVolume, null);
persistVolumeData(storagePool, details, destdata, outVolume, null);
persistVolumeData(storagePool, details, destdata, outVolume, null, size);
logger.info("Copy completed from [{}] to [{}]", srcdata, destdata);
VolumeObjectTO voto = new VolumeObjectTO();
@ -384,15 +387,11 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
logger.debug("canCopy: Checking srcData [{}:{}:{} AND destData [{}:{}:{}]",
srcData, srcData.getType(), srcData.getDataStore(), destData, destData.getType(), destData.getDataStore());
try {
if (!isSameProvider(srcData)) {
if (!srcData.getDataStore().getUuid().equals(destData.getDataStore().getUuid())) {
logger.debug("canCopy: No we can't -- the source provider is NOT the correct type for this driver!");
return false;
}
if (!isSameProvider(destData)) {
logger.debug("canCopy: No we can't -- the destination provider is NOT the correct type for this driver!");
return false;
}
logger.debug(
"canCopy: Source and destination are the same so we can copy via storage endpoint, checking that the source actually exists");
StoragePoolVO poolVO = _storagePoolDao.findById(srcData.getDataStore().getId());
@ -500,7 +499,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
ProviderVolume vol = api.getVolume(context, sourceIn);
ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool);
Map<String,String> connIdMap = api.getConnectionIdMap(dataIn);
persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap);
persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap, null);
logger.info("Granted host {} access to volume {}", host, dataObject);
@ -534,7 +533,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
ProviderVolume vol = api.getVolume(context, sourceIn);
ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool);
Map<String,String> connIdMap = api.getConnectionIdMap(dataIn);
persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap);
persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap, null);
logger.info("Revoked access for host {} to volume {}", host, dataObject);
} catch (Throwable e) {
@ -725,6 +724,7 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); // set to false because it causes weird behavior when copying templates to root volumes
mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString());
mapCapabilities.put("CAN_CLONE_VOLUME_FROM_TEMPLATE", Boolean.TRUE.toString());
ProviderAdapterFactory factory = _adapterFactoryMap.getFactory(this.getProviderName());
if (factory != null) {
mapCapabilities.put("CAN_DIRECT_ATTACH_SNAPSHOT", factory.canDirectAttachSnapshot().toString());
@ -840,55 +840,96 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
}
void persistVolumeOrTemplateData(StoragePoolVO storagePool, Map<String, String> storagePoolDetails,
DataObject dataObject, ProviderVolume volume, Map<String,String> connIdMap) {
DataObject dataObject, ProviderVolume volume, Map<String,String> connIdMap, Long size) {
if (dataObject.getType() == DataObjectType.VOLUME) {
persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connIdMap);
persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connIdMap, size);
} else if (dataObject.getType() == DataObjectType.TEMPLATE) {
persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connIdMap);
persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connIdMap, size);
}
}
void persistVolumeData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject,
ProviderVolume managedVolume, Map<String,String> connIdMap) {
VolumeVO volumeVO = _volumeDao.findById(dataObject.getId());
ProviderVolume managedVolume, Map<String,String> connIdMap, Long size) {
// Get the volume by dataObject id
VolumeVO volumeVO = _volumeDao.findById(dataObject.getId());
long volumeId = volumeVO.getId();
// Generate path for volume and details
String finalPath = generatePathInfo(managedVolume, connIdMap);
try {
if (finalPath != null) {
volumeVO.setPath(finalPath);
}
volumeVO.setFormat(ImageFormat.RAW);
volumeVO.setPoolId(storagePool.getId());
volumeVO.setExternalUuid(managedVolume.getExternalUuid());
volumeVO.setDisplay(true);
volumeVO.setDisplayVolume(true);
// the size may have been adjusted by the storage provider
if (size != null) {
volumeVO.setSize(size);
}
_volumeDao.update(volumeVO.getId(), volumeVO);
} catch (Throwable e) {
logger.error("Failed to persist volume path", e);
throw e;
}
volumeVO = _volumeDao.findById(volumeVO.getId());
VolumeDetailVO volumeDetailVO = new VolumeDetailVO(volumeVO.getId(),
DiskTO.PATH, finalPath, true);
// PATH
try {
// If volume_detail exist
_volumeDetailsDao.removeDetail(volumeId, DiskTO.PATH);
VolumeDetailVO volumeDetailVO = new VolumeDetailVO(volumeId, DiskTO.PATH, finalPath, true);
_volumeDetailsDao.persist(volumeDetailVO);
} catch (Exception e) {
logger.error("Failed to persist volume path", e);
throw e;
}
volumeDetailVO = new VolumeDetailVO(volumeVO.getId(),
ProviderAdapterConstants.EXTERNAL_NAME, managedVolume.getExternalName(), true);
// EXTERNAL_NAME
try {
_volumeDetailsDao.removeDetail(volumeId, ProviderAdapterConstants.EXTERNAL_NAME);
VolumeDetailVO volumeDetailVO = new VolumeDetailVO(volumeId, ProviderAdapterConstants.EXTERNAL_NAME, managedVolume.getExternalName(), true);
_volumeDetailsDao.persist(volumeDetailVO);
} catch (Exception e) {
logger.error("Failed to persist volume external name", e);
throw e;
}
volumeDetailVO = new VolumeDetailVO(volumeVO.getId(),
ProviderAdapterConstants.EXTERNAL_UUID, managedVolume.getExternalUuid(), true);
// EXTERNAL_UUID
try {
_volumeDetailsDao.removeDetail(volumeId, ProviderAdapterConstants.EXTERNAL_UUID);
VolumeDetailVO volumeDetailVO = new VolumeDetailVO(volumeId, ProviderAdapterConstants.EXTERNAL_UUID, managedVolume.getExternalUuid(), true);
_volumeDetailsDao.persist(volumeDetailVO);
} catch (Exception e) {
logger.error("Failed to persist volume external uuid", e);
throw e;
}
}
void persistTemplateData(StoragePoolVO storagePool, Map<String, String> details, DataObject dataObject,
ProviderVolume volume, Map<String,String> connIdMap) {
ProviderVolume volume, Map<String,String> connIdMap, Long size) {
TemplateInfo templateInfo = (TemplateInfo) dataObject;
VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(),
templateInfo.getId(), null);
templatePoolRef.setInstallPath(generatePathInfo(volume, connIdMap));
templatePoolRef.setLocalDownloadPath(volume.getExternalName());
if (size == null) {
templatePoolRef.setTemplateSize(volume.getAllocatedSizeInBytes());
} else {
templatePoolRef.setTemplateSize(size);
}
_vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef);
}
String generatePathInfo(ProviderVolume volume, Map<String,String> connIdMap) {
if (volume == null) {
return null;
}
String finalPath = String.format("type=%s; address=%s; providerName=%s; providerID=%s;",
volume.getAddressType().toString(), volume.getAddress().toLowerCase(), volume.getExternalName(), volume.getExternalUuid());
@ -938,15 +979,6 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
return ctx;
}
boolean isSameProvider(DataObject obj) {
StoragePoolVO storagePool = this._storagePoolDao.findById(obj.getDataStore().getId());
if (storagePool != null && storagePool.getStorageProviderName().equals(this.getProviderName())) {
return true;
} else {
return false;
}
}
ProviderAdapterDataObject newManagedDataObject(DataObject data, StoragePool storagePool) {
ProviderAdapterDataObject dataIn = new ProviderAdapterDataObject();
if (data instanceof VolumeInfo) {
@ -1002,4 +1034,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive
public boolean volumesRequireGrantAccessWhenUsed() {
return true;
}
public boolean zoneWideVolumesAvailableWithoutClusterMotion() {
return true;
}
}

View File

@ -80,6 +80,7 @@
</dependencies>
<executions>
<execution>
<?m2e execute onConfiguration,onIncremental?>
<id>set-properties</id>
<phase>validate</phase>
<goals>

View File

@ -52,6 +52,7 @@
</dependencies>
<executions>
<execution>
<?m2e execute onConfiguration,onIncremental?>
<goals>
<goal>compile</goal>
<goal>compileTests</goal>
@ -114,11 +115,10 @@
<version>${groovy.version}</version>
<scope>test</scope>
</dependency>
<!-- Optional dependencies for using Spock -->
<dependency> <!-- enables mocking of classes (in addition to interfaces) -->
<dependency>
<groupId>cglib</groupId>
<artifactId>cglib-nodep</artifactId>
<scope>test</scope>
<version>${cs.cglib.version}</version>
</dependency>
<dependency>
<groupId>org.zapodot</groupId>

View File

@ -29,102 +29,39 @@ WWID=${2:?"WWID required"}
WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
systemctl is-active multipathd || systemctl restart multipathd || {
echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume."
logger -t "CS_SCSI_VOL_FIND" "${WWID} cannot be mapped to this host because multipathd is not currently running and cannot be started"
START_CONNECT=$(dirname $0)/startConnectVolume.sh
if [ -x "${START_CONNECT}" ]; then
echo "$(date): Starting connect process for ${WWID} on lun ${LUN}"
${START_CONNECT} ${LUN} ${WWID}
if [ $? -ne 0 ]; then
echo "$(date): Failed to start connect process for ${WWID} on lun ${LUN}"
logger -t "CS_SCSI_VOL_FIND" "${WWID} failed to start connect process on lun ${LUN}"
exit 1
}
echo "$(date): Looking for ${WWID} on lun ${LUN}"
# get vendor OUI. we will only delete a device on the designated lun if it matches the
# incoming WWN OUI value. This is because multiple storage arrays may be mapped to the
# host on different fiber channel hosts with the same LUN
INCOMING_OUI=$(echo ${WWID} | cut -c2-7)
echo "$(date): Incoming OUI: ${INCOMING_OUI}"
# first we need to check if any stray references are left from a previous use of this lun
for fchost in $(ls /sys/class/fc_host | sed -e 's/host//g'); do
lingering_devs=$(lsscsi -w "${fchost}:*:*:${LUN}" | grep /dev | awk '{if (NF > 6) { printf("%s:%s ", $NF, $(NF-1));} }' | sed -e 's/0x/3/g')
if [ ! -z "${lingering_devs}" ]; then
for dev in ${lingering_devs}; do
LSSCSI_WWID=$(echo $dev | awk -F: '{print $2}' | sed -e 's/0x/3/g')
FOUND_OUI=$(echo ${LSSCSI_WWID} | cut -c3-8)
if [ "${INCOMING_OUI}" != "${FOUND_OUI}" ]; then
continue;
fi
dev=$(echo $dev | awk -F: '{ print $1}')
logger -t "CS_SCSI_VOL_FIND" "${WWID} processing identified a lingering device ${dev} from previous lun use, attempting to clean up"
MP_WWID=$(multipath -l ${dev} | head -1 | awk '{print $1}')
MP_WWID=${MP_WWID:1} # strip first character (3) off
# don't do this if the WWID passed in matches the WWID from multipath
if [ ! -z "${MP_WWID}" ] && [ "${MP_WWID}" != "${WWID}" ]; then
# run full removal again so all devices and multimap are cleared
$(dirname $0)/disconnectVolume.sh ${MP_WWID}
# we don't have a multimap but we may still have some stranded devices to clean up
elif [ "${LSSCSI_WWID}" != "${WWID}" ]; then
echo "1" > /sys/block/$(echo ${dev} | awk -F'/' '{print $NF}')/device/delete
fi
done
sleep 3
fi
done
logger -t "CS_SCSI_VOL_FIND" "${WWID} awaiting disk path at /dev/mapper/3${WWID}"
# wait for multipath to map the new lun to the WWID
echo "$(date): Waiting for multipath entry to show up for the WWID"
while true; do
ls /dev/mapper/3${WWID} >/dev/null 2>&1
if [ $? == 0 ]; then
break
fi
logger -t "CS_SCSI_VOL_FIND" "${WWID} not available yet, triggering scan"
# instruct bus to scan for new lun
for fchost in $(ls /sys/class/fc_host); do
echo " --> Scanning ${fchost}"
echo "- - ${LUN}" > /sys/class/scsi_host/${fchost}/scan
done
multipath -v2 2>/dev/null
ls /dev/mapper/3${WWID} >/dev/null 2>&1
if [ $? == 0 ]; then
break
fi
sleep 5
done
echo "$(date): Doing a recan to make sure we have proper current size locally"
for device in $(multipath -ll 3${WWID} | egrep '^ ' | awk '{print $2}'); do
echo "1" > /sys/bus/scsi/drivers/sd/${device}/rescan;
done
sleep 3
multipathd reconfigure
sleep 3
# cleanup any old/faulty paths
delete_needed=false
multipath -l 3${WWID}
for dev in $(multipath -l 3${WWID} 2>/dev/null| grep failed | awk '{print $3}' ); do
logger -t "CS_SCSI_VOL_FIND" "${WWID} multipath contains faulty path ${dev}, removing"
echo 1 > /sys/block/${dev}/device/delete;
delete_needed=true
done
if [ "${delete_needed}" == "true" ]; then
sleep 10
multipath -v2 >/dev/null
else
echo "$(date): Unable to find startConnect.sh script!"
exit 1
fi
multipath -l 3${WWID}
# wait for the device path to show up
while [ ! -e /dev/mapper/3${WWID} ]; do
echo "$(date): Waiting for /dev/mapper/3${WWID} to appear"
sleep 1
done
FINISH_CONNECT=$(dirname $0)/finishConnectVolume.sh
if [ -x "${FINISH_CONNECT}" ]; then
echo "$(date): Starting post-connect validation for ${WWID} on lun ${LUN}"
${FINISH_CONNECT} ${LUN} ${WWID}
if [ $? -ne 0 ]; then
echo "$(date): Failed to finish connect process for ${WWID} on lun ${LUN}"
logger -t "CS_SCSI_VOL_CONN_FINISH" "${WWID} failed to finish connect process on lun ${LUN}"
exit 1
fi
else
echo "$(date): Unable to find finishConnect.sh script!"
exit 1
fi
logger -t "CS_SCSI_VOL_FIND" "${WWID} successfully discovered and available"

View File

@ -26,6 +26,14 @@
#########################################################################################
WWID=${1:?"WWID required"}
BACKGROUND="${2}"
# move the script to run in the background, no need to block other flows for this to complete
if [ -z "${BACKGROUND}" ]; then
nohup "$0" "${WWID}" --background &
exit 0
fi
WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
echo "$(date): Removing ${WWID}"
@ -36,6 +44,9 @@ systemctl is-active multipathd || systemctl restart multipathd || {
exit 1
}
# Remove any active IO on the device so it can be removed.
multipathd disablequeueing map 3${WWID}
# first get dm- name
DM_NAME=$(ls -lrt /dev/mapper/3${WWID} | awk '{ print $NF }' | awk -F'/' '{print $NF}')
SLAVE_DEVS=""
@ -66,9 +77,6 @@ fi
logger -t CS_SCSI_VOL_REMOVE "${WWID} successfully purged from multipath along with slave devices"
# Added to give time for the event to be fired to the server
sleep 10
echo "$(date): ${WWID} removed"
exit 0

View File

@ -0,0 +1,79 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#####################################################################################
#
# Given a lun # and a WWID for a volume provisioned externally, find the volume
# through the SCSI bus and make sure its visible via multipath
#
#####################################################################################
LUN=${1:?"LUN required"}
WWID=${2:?"WWID required"}
WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
systemctl is-active multipathd || systemctl restart multipathd || {
echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume."
logger -t "CS_SCSI_VOL_CONN_FINISH" "${WWID} cannot be mapped to this host because multipathd is not currently running and cannot be started"
exit 1
}
echo "$(date): Doing post-connect validation for ${WWID} on lun ${LUN}"
# get vendor OUI. we will only delete a device on the designated lun if it matches the
# incoming WWN OUI value. This is because multiple storage arrays may be mapped to the
# host on different fiber channel hosts with the same LUN
INCOMING_OUI=$(echo ${WWID} | cut -c2-7)
echo "$(date): Incoming OUI: ${INCOMING_OUI}"
logger -t "CS_SCSI_VOL_CONN_FINISH" "${WWID} looking for disk path at /dev/mapper/3${WWID}"
echo "$(date): Doing a recan to make sure we have proper current size locally"
for device in $(multipath -ll 3${WWID} | egrep '^ ' | awk '{print $2}'); do
echo "1" > /sys/bus/scsi/drivers/sd/${device}/rescan;
done
sleep 3
multipathd reconfigure
sleep 3
# cleanup any old/faulty paths
delete_needed=false
multipath -l 3${WWID}
for dev in $(multipath -l 3${WWID} 2>/dev/null| grep failed | awk '{print $3}' ); do
logger -t "CS_SCSI_VOL_CONN_FINISH" "${WWID} multipath contains faulty path ${dev}, removing"
echo 1 > /sys/block/${dev}/device/delete;
delete_needed=true
done
if [ "${delete_needed}" == "true" ]; then
sleep 10
multipath -v2 >/dev/null
fi
multipath -l 3${WWID}
logger -t "CS_SCSI_VOL_CONN_FINISH" "${WWID} successfully discovered and available"
echo "$(date): Complete - found mapped LUN at /dev/mapper/3${WWID}"
exit 0

View File

@ -0,0 +1,101 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#####################################################################################
#
# Given a lun # and a WWID for a volume provisioned externally, find the volume
# through the SCSI bus and make sure its visible via multipath
#
#####################################################################################
LUN=${1:?"LUN required"}
WWID=${2:?"WWID required"}
WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
systemctl is-active multipathd || systemctl restart multipathd || {
echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume."
logger -t "CS_SCSI_VOL_CONN_START" "${WWID} cannot be mapped to this host because multipathd is not currently running and cannot be started"
exit 1
}
echo "$(date): Looking for ${WWID} on lun ${LUN}"
# get vendor OUI. we will only delete a device on the designated lun if it matches the
# incoming WWN OUI value. This is because multiple storage arrays may be mapped to the
# host on different fiber channel hosts with the same LUN
INCOMING_OUI=$(echo ${WWID} | cut -c2-7)
echo "$(date): Incoming OUI: ${INCOMING_OUI}"
# first we need to check if any stray references are left from a previous use of this lun
for fchost in $(ls /sys/class/fc_host | sed -e 's/host//g'); do
lingering_devs=$(lsscsi -w "${fchost}:*:*:${LUN}" | grep /dev | awk '{if (NF > 6) { printf("%s:%s ", $NF, $(NF-1));} }' | sed -e 's/0x/3/g')
if [ ! -z "${lingering_devs}" ]; then
for dev in ${lingering_devs}; do
LSSCSI_WWID=$(echo $dev | awk -F: '{print $2}' | sed -e 's/0x/3/g')
FOUND_OUI=$(echo ${LSSCSI_WWID} | cut -c3-8)
if [ "${INCOMING_OUI}" != "${FOUND_OUI}" ]; then
continue;
fi
dev=$(echo $dev | awk -F: '{ print $1}')
logger -t "CS_SCSI_VOL_CONN_START" "${WWID} processing identified a lingering device ${dev} from previous lun use, attempting to clean up"
MP_WWID=$(multipath -l ${dev} | head -1 | awk '{print $1}')
MP_WWID=${MP_WWID:1} # strip first character (3) off
# don't do this if the WWID passed in matches the WWID from multipath
if [ ! -z "${MP_WWID}" ] && [ "${MP_WWID}" != "${WWID}" ]; then
# run full removal again so all devices and multimap are cleared
$(dirname $0)/disconnectVolume.sh ${MP_WWID}
# we don't have a multimap but we may still have some stranded devices to clean up
elif [ "${LSSCSI_WWID}" != "${WWID}" ]; then
echo "1" > /sys/block/$(echo ${dev} | awk -F'/' '{print $NF}')/device/delete
fi
done
sleep 3
fi
done
logger -t "CS_SCSI_VOL_CONN_START" "${WWID} awaiting disk path at /dev/mapper/3${WWID}"
# wait for multipath to map the new lun to the WWID
echo "$(date): Triggering discovery for multipath WWID ${WWID} on LUN ${LUN}"
ls /dev/mapper/3${WWID} >/dev/null 2>&1
if [ $? == 0 ]; then
logger -t "CS_SCSI_VOL_CONN_START" "${WWID} already available at /dev/mapper/3${WWID}, no need to trigger a scan"
break
fi
# instruct bus to scan for new lun
for fchost in $(ls /sys/class/fc_host); do
echo " --> Scanning ${fchost}"
echo "- - ${LUN}" > /sys/class/scsi_host/${fchost}/scan
done
multipath -v2 2>/dev/null
ls /dev/mapper/3${WWID} >/dev/null 2>&1
if [ $? == 0 ]; then
logger -t "CS_SCSI_VOL_CONN_START" "${WWID} scan triggered and device immediately became visible at /dev/mapper/3${WWID}"
fi
logger -t "CS_SCSI_VOL_CONN_START" "${WWID} successfully triggered discovery"
echo "$(date): Complete - Triggered discovery of ${WWID}, watch for device at /dev/mapper/3${WWID}"
exit 0

View File

@ -39,6 +39,12 @@ public interface AutoScaleManager extends AutoScaleService {
"The Number of worker threads to scan the autoscale vm groups.",
false);
ConfigKey<Boolean> UseAutoscaleVmHostnamePrefixEnabled = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Boolean.class,
"autoscale.vm.hostname.prefixenabled",
"true",
"If true, the auto scale vm group name will be used as a prefix for the auto scale vm hostnames.",
true);
ConfigKey<Integer> AutoScaleErroredInstanceThreshold = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Integer.class,
"autoscale.errored.instance.threshold",
"10",

View File

@ -294,6 +294,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage
PARAM_OVERRIDE_DISK_OFFERING_ID, PARAM_SSH_KEYPAIRS, PARAM_AFFINITY_GROUP_IDS, PARAM_NETWORK_IDS);
protected static final String VM_HOSTNAME_PREFIX = "autoScaleVm-";
protected static final int VM_HOSTNAME_RANDOM_SUFFIX_LENGTH = 6;
private static final Long DEFAULT_HOST_ID = -1L;
@ -1952,6 +1953,19 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage
@Override
public String getNextVmHostName(AutoScaleVmGroupVO asGroup) {
if (UseAutoscaleVmHostnamePrefixEnabled.value()) {
return getNextVmHostNameWithPrefix(asGroup);
} else {
return getNextVmHostNameWithoutPrefix(asGroup);
}
}
private String getNextVmHostNameWithoutPrefix(AutoScaleVmGroupVO asGroup) {
int subStringLength = Math.min(asGroup.getName().length(), 63 - Long.toString(asGroup.getNextVmSeq()).length());
return asGroup.getName().substring(0, subStringLength) + Long.toString(asGroup.getNextVmSeq());
}
private String getNextVmHostNameWithPrefix(AutoScaleVmGroupVO asGroup) {
String vmHostNameSuffix = "-" + asGroup.getNextVmSeq() + "-" +
RandomStringUtils.random(VM_HOSTNAME_RANDOM_SUFFIX_LENGTH, 0, 0, true, false, (char[])null, new SecureRandom()).toLowerCase();
// Truncate vm group name because max length of vm name is 63

View File

@ -162,7 +162,13 @@ public class VMSchedulerImpl extends ManagerBase implements VMScheduler, Configu
}
Date scheduledDateTime = Date.from(ts.toInstant());
VMScheduledJobVO scheduledJob = new VMScheduledJobVO(vmSchedule.getVmId(), vmSchedule.getId(), vmSchedule.getAction(), scheduledDateTime);
VMScheduledJobVO scheduledJob = vmScheduledJobDao.findByScheduleAndTimestamp(vmSchedule.getId(), scheduledDateTime);
if (scheduledJob != null) {
logger.trace("Job is already scheduled for schedule {} at {}", vmSchedule, scheduledDateTime);
return scheduledDateTime;
}
scheduledJob = new VMScheduledJobVO(vmSchedule.getVmId(), vmSchedule.getId(), vmSchedule.getAction(), scheduledDateTime);
try {
vmScheduledJobDao.persist(scheduledJob);
ActionEventUtils.onScheduledActionEvent(User.UID_SYSTEM, vm.getAccountId(), actionEventMap.get(vmSchedule.getAction()),

View File

@ -218,18 +218,19 @@ export const notifierPlugin = {
if (error.response.status) {
msg = `${i18n.global.t('message.request.failed')} (${error.response.status})`
}
if (error.message) {
desc = error.message
}
if (error.response.headers && 'x-description' in error.response.headers) {
if (error.response.headers?.['x-description']) {
desc = error.response.headers['x-description']
}
if (desc === '' && error.response.data) {
} else if (error.response.data) {
const responseKey = _.findKey(error.response.data, 'errortext')
if (responseKey) {
desc = error.response.data[responseKey].errortext
} else if (typeof error.response.data === 'string') {
desc = error.response.data
}
}
if (!desc && error.message) {
desc = error.message
}
}
let countNotify = store.getters.countNotify
countNotify++

View File

@ -638,11 +638,7 @@ export default {
this.$emit('refresh-data')
this.closeAction()
}).catch(e => {
this.$notification.error({
message: this.$t('message.upload.failed'),
description: `${this.$t('message.upload.template.failed.description')} - ${e}`,
duration: 0
})
this.$notifyError(e)
})
},
fetchCustomHypervisorName () {